query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
this function serves to initialize the ROS node
def startNode(): # init node rospy.init_node("resize_and_repub") rospy.loginfo("resize_and_repub node started") # setup subcribers rospy.Subscriber(leftArmCamTopic, Image, leftArmImageCallback) rospy.Subscriber(headCamTopic, Image, headImageCallback) rospy.Subscriber(primaryCamTopic, String, primaryCamCallback) rospy.Subscriber(secondaryCamTopic, String, secondayCamCallback) rospy.loginfo("all subscribers initialized, entering publishing loop...") # start repub thread thread = threading.Thread(target=resizeAndRepubThread) thread.start() rospy.spin()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_ros_node(self): #pylint: disable=no-self-use\n print(\"rospy init node\")\n rospy.init_node('ispy_ROS_receiver', anonymous = True)", "def initNode():\n\n # 0) General Setup\n #initialize listener node!\n rospy.init_node('main', anonymous=True)\n\n #Create instances of subscriber objects\n joint_state_sub = rospy.Subscriber(\"joint_states\", JointState, joint_state_callback)", "def run(self):\n # Environment should be setup here if needed ( we re in another process ).\n sys.modules[\"pyros_setup\"] = pyros_setup.delayed_import_auto(distro='indigo', base_path=self.base_path)\n\n # master has to be running here or we just wait for ever\n m, _ = pyros_setup.get_master(spawn=False)\n while not m.is_online():\n time.sleep(0.5)\n\n enable_rocon = rospy.get_param('~enable_rocon', False)\n self.enable_rocon = self.enable_rocon and enable_rocon\n\n self.enable_cache = rospy.get_param('~enable_cache', False)\n self.ros_if = RosInterface(enable_cache=self.enable_cache)\n\n if self.ros_if_params:\n self.ros_if.reinit(*self.ros_if_params)\n\n if self.enable_rocon:\n\n rospy.logerr(\"ENABLE_ROCON IS TRUE !!\")\n self.rocon_if = RoconInterface(self.ros_if)\n pass\n\n # we initialize the node here, in subprocess, passing ros parameters.\n # disabling signal to avoid overriding callers behavior\n rospy.init_node(self.name, argv=self.str_argv, disable_signals=True)\n rospy.logwarn('PyrosROS {name} node started with args : {argv}'.format(name=self.name, argv=self.str_argv))\n\n if self.dynamic_reconfigure:\n # Create a dynamic reconfigure server ( needs to be done after node_init )\n self.server = Server(pyros_cfg, self.reconfigure)\n\n # TODO : install shutdown hook to shutdown if detected\n\n try:\n logging.debug(\"zmp[{name}] running, pid[{pid}]\".format(name=__name__, pid=os.getpid()))\n\n super(PyrosROS, self).run()\n\n logging.debug(\"zmp[{name}] shutdown, pid[{pid}]\".format(name=__name__, pid=os.getpid()))\n\n except KeyboardInterrupt:\n rospy.logwarn('PyrosROS node stopped by keyboad interrupt')", "def init_node(self):\n # publishers\n self.pub_poly_traj_points = rospy.Publisher('/espeleo/traj_points_polygon', Polygon, latch=True, queue_size=1)\n self.pub_path_short = rospy.Publisher('/robot_path_shortest', Path, latch=True, queue_size=1)\n self.pub_path_energy = rospy.Publisher('/robot_path_energy', Path, latch=True, queue_size=1)\n self.pub_path_traver = rospy.Publisher('/robot_path_traversal', Path, latch=True, queue_size=1)\n self.pub_path_traver_pybullet = rospy.Publisher('/robot_path_traversal_pybullet', Path, latch=True,\n queue_size=1)\n self.pub_path_traver_op = rospy.Publisher('/robot_path_traversal_optimization', Path, latch=True, queue_size=1)\n self.pub_path_traver_pybullet_normal = rospy.Publisher('/robot_path_traversal_pybullet_normal', Path,\n latch=True,\n queue_size=1)\n self.pub_path_traver_op_normal = rospy.Publisher('/robot_path_traversal_optimization_normal', Path, latch=True,\n queue_size=1)\n self.pub_path_straight = rospy.Publisher('/robot_path_straightest', Path, latch=True, queue_size=1)\n self.pub_path_combined = rospy.Publisher('/robot_path_combined', Path, latch=True, queue_size=1)\n\n\n self.pub_src_point = rospy.Publisher('/source_path_point', Marker, latch=True, queue_size=1)\n self.pub_dst_point = rospy.Publisher('/target_path_point', Marker, latch=True, queue_size=1)\n self.pub_frontiers_ground_pts = rospy.Publisher('/frontiers_ground_pts', MarkerArray, latch=True,\n queue_size=1)\n self.pub_frontiers_ground_centroids = rospy.Publisher('/frontiers_ground_centroids', MarkerArray, latch=True,\n queue_size=1)\n self.pub_frontiers_ground_centroids_labels = rospy.Publisher('/frontiers_ground_centroids_labels', MarkerArray,\n latch=True, queue_size=1)\n self.pub_frontiers_ground_trav_labels = rospy.Publisher('/frontiers_ground_centroids_traversability_labels', MarkerArray,\n latch=True, queue_size=1)\n\n # subscribers\n rospy.Subscriber('/laser_cloud_surround2', PointCloud2, self.map_point_cloud_callback)\n rospy.Subscriber('/integrated_to_init2', Odometry, self.odom_callback)", "def __init__(self) :\n\n # Init node\n rospy.init_node('DWM1001_Double_Initiator', anonymous=False)\n\n # Get port and tag name\n self.dwm_port1 = rospy.get_param('~port1', '/dev/ttyACM1')\n self.dwm_port2 = rospy.get_param('~port2', '/dev/ttyACM2')\n self.tag_name = rospy.get_param('~tag_name', \"1by4\")\n self.network = rospy.get_param('~network', \"default\")\n self.verbose = rospy.get_param('~verbose', False)\n \n # Set a ROS rate\n self.rate = rospy.Rate(100)\n \n # Empty dictionary to store topics being published\n self.topics = {}\n \n # Serial port settings\n self.serialPortDWM1001_1 = serial.Serial(\n port = self.dwm_port1,\n baudrate = 115200,\n timeout = 0.1,\n parity = serial.PARITY_ODD,\n stopbits = serial.STOPBITS_TWO,\n bytesize = serial.SEVENBITS\n )\n \n self.serialPortDWM1001_2 = serial.Serial(\n port = self.dwm_port2,\n baudrate = 115200,\n timeout = 0.1,\n parity = serial.PARITY_ODD,\n stopbits = serial.STOPBITS_TWO,\n bytesize = serial.SEVENBITS\n )", "def init():\n\t# init the node\n\trospy.init_node(\"gps_node\")\n\t\n\t# init the publishers\n\tD.gpsPub = rospy.Publisher(\"gps_data\",RosGPS)\n\n\t# init gps connection\n\t#init_gpsd()\n\tinit_serial()\n\t\n\t# gps data\n\tD.NSatellites = 0\t# the number of satellites in view\n\n\t# time conversion info\n\tD.tzOffset = -8 \t# offset in hours due to timezones\n\tD.dst = 1\t\t# daylight savings. 1 = yes, 0 = no", "def __init__(self):\n \n # Initialize ROS Node\n rospy.init_node('imu_tf_broadcaster')\n\n # Get parameters\n self.topic_name = rospy.get_param('~topic', '/imu/data')\n self.reference_frame_name = rospy.get_param('~reference_frame', '/world')\n self.z_offset = rospy.get_param('~z_offset', 1.0)\n \n # Subscribe to the imu topic\n rospy.Subscriber(self.topic_name, Imu, self.handle_imu_orientation)\n \n rospy.spin()", "def init():\n rino.initialize.initialize()", "def __init__(self, nodeNames):\n masterUri = 'http://%s:%s' % (self.RosMasterHost, self.RosMasterPort)\n\n # Create an XMLRPC client to connect to the ROS master\n self.__client = xmlrpclib.ServerProxy(masterUri)\n\n # Register all ROS master methods with this class to allow custom\n # functionality to be executed prior to sending the data to the\n # ROS master\n for method in self.RosMasterMethods:\n wrapper = self.__getWrapper(method)\n setattr(self, method, wrapper)\n\n self.__nodeNames = nodeNames\n\n # Add a prefix slash if one is not given\n for i in range(len(self.__nodeNames)):\n if not nodeNames[i].startswith(\"/\"):\n self.__nodeNames[i] = \"/%s\" % nodeNames[i]\n\n self.__docWriter = RosDocWriter(self.__nodeNames)", "def __init__(self):\n \"\"\" rospy.init_node('my_node_name', anonymous=True) \"\"\"\n \"\"\" or \"\"\"\n \"\"\" rospy.init_node('my_node_name') \"\"\"\n rospy.init_node('test_vision_node', anonymous=True)\n\n \"\"\" Give the OpenCV display window a name \"\"\"\n self.cv_window_name = \"OpenCV Image\"\n\n \"\"\" rospy.Publisher initialization \"\"\"\n \"\"\" pub = rospy.Publisher('topic_name', std_msgs.msg.String, queue_size=10) \"\"\"\n \"\"\" The only required arguments to create a rospy.Publisher are the topic name, the Message class, and the queue_size \"\"\"\n\n \"\"\" Publish as the opencv image topic \"\"\"\n self.image_pub = rospy.Publisher(\"/opencv_img\", Image, queue_size=10)\n\n \"\"\" Create the cv_bridge object \"\"\"\n self.bridge = CvBridge()\n\n \"\"\" subscribe to a topic using rospy.Subscriber class \"\"\"\n \"\"\" sub = rospy.Subscriber('TOPIC_NAME', TOPIC_MESSAGE_TYPE, name_callback) \"\"\"\n\n \"\"\" Subscribe to the raw camera image topic \"\"\"\n self.image_sub = rospy.Subscriber(\"/camUSB/image_raw\", Image, self.callback)\n\n \"\"\" Subscribe to the info camera topic \"\"\"\n self.imgInfo_sub = rospy.Subscriber(\"/camUSB/camera_info\", CameraInfo, self.getCameraInfo)", "def initialize(self):\n self.ros.enable()\n self.phone_link.enable()", "def __init__(self):\n # Manage command line args\n args = ut_generic.getParserArgsRobot().parse_args()\n self.gzclient = args.gzclient\n self.realSpeed = args.realSpeed\n # self.realSpeed = True\n self.debug = args.debug\n self.multiInstance = args.multiInstance\n self.port = args.port\n # Set the path of the corresponding URDF file\n if self.realSpeed:\n urdf = \"biped.urdf\"\n self.urdfPath = get_prefix_path(\n \"lobot_description\") + \"/share/lobot_description/robots/\" + urdf\n else:\n print(\"Non real speed not yet supported. Use real speed instead. \")\n\n # TODO: Include launch logic here, refer to code from the .launch.py files\n # Note that after including the launch logic the code will no longer be debuggable due to multi process stuff\n\n # Create the node after the new ROS_DOMAIN_ID is set in generate_launch_description()\n rclpy.init()\n self.node = rclpy.create_node(self.__class__.__name__)\n\n # class variables\n self._observation_msg = None\n self.max_episode_steps = 1024 # default value, can be updated from baselines\n self.iterator = 0\n self.reset_jnts = True\n self._collision_msg = None\n\n #############################\n # Environment hyperparams\n #############################\n EE_POINTS = np.asmatrix([[0, 0, 0]])\n EE_VELOCITIES = np.asmatrix([[0, 0, 0]])\n\n # # Topics for the robot publisher and subscriber.\n JOINT_PUBLISHER = '/lobot_arm/control'\n # Get Joint names from the parameter server\n get_joints_client = self.node.create_client(GetAllJoints, \"/GetAllControlJoints\",\n qos_profile=qos_profile_services_default)\n req = GetAllJoints.Request()\n req.robot = \"lobot_arm\"\n while not get_joints_client.wait_for_service(timeout_sec=3.0):\n self.node.get_logger().info('service not available, waiting again...')\n\n future = get_joints_client.call_async(req)\n rclpy.spin_until_future_complete(self.node, future)\n if future.result() is not None:\n joint_names = future.result().joints\n self.node.get_logger().info(\n 'Number of joints: %d' %\n (len(joint_names)))\n else:\n self.node.get_logger().info('Service call failed %r' % (future.exception(),))\n JOINT_ORDER = joint_names\n INITIAL_JOINTS = np.full((len(joint_names)), 0.0).tolist()\n reset_condition = {\n 'initial_positions': INITIAL_JOINTS,\n 'initial_velocities': []\n }\n #############################\n\n m_jointOrder = copy.deepcopy(JOINT_ORDER)\n\n # Initialize target end effector position\n self.environment = {\n 'jointOrder': m_jointOrder,\n 'reset_conditions': reset_condition,\n 'tree_path': self.urdfPath,\n 'end_effector_points': EE_POINTS,\n }\n\n # Subscribe to the appropriate topics, taking into account the particular robot\n self._pub = self.node.create_publisher(JointControl, JOINT_PUBLISHER, qos_profile=qos_profile_sensor_data)\n self._sub = self.node.create_subscription(JointState, \"/joint_states\", self.observation_callback,\n qos_profile_sensor_data)\n\n # TODO: Make the clock node run on a separate thread so weird issues like outdated clock can stop happening\n self.lock = threading.Lock()\n self.clock_node = rclpy.create_node(self.__class__.__name__ + \"_clock\")\n self._sub_clock = self.clock_node.create_subscription(RosClock, '/clock', self.clock_callback,\n qos_profile=qos_profile_sensor_data)\n self.exec = rclpy.executors.MultiThreadedExecutor()\n self.exec.add_node(self.clock_node)\n t1 = threading.Thread(target=self.spinClockNode, daemon=True)\n t1.start()\n # self._imu_sub = self.node.create_subscription(JointState, \"/lobot_IMU_controller/out\", self.imu_callback, qos_profile_sensor_data)\n # self._sub = self.node.create_subscription(JointTrajectoryControllerState, JOINT_SUBSCRIBER, self.observation_callback, qos_profile=qos_profile_sensor_data)\n self._reset_sim = self.node.create_client(Empty, '/reset_simulation')\n self._physics_pauser = self.node.create_client(Empty, '/pause_physics')\n self._robot_resetter = self.node.create_client(Empty, '/lobot_arm/reset')\n self._physics_unpauser = self.node.create_client(Empty, '/unpause_physics')\n self.delete_entity = self.node.create_client(DeleteEntity, '/delete_entity')\n self.numJoints = len(JOINT_ORDER)\n # Initialize a KDL Jacobian solver from the chain.\n # self.jacSolver = ChainJntToJacSolver(self.mara_chain)\n\n # Observable dimensions, each joint has 2 (joint position + joint velocity), the IMU gives 6\n self.obs_dim = self.numJoints * 2 + 6\n\n # # Here idially we should find the control range of the robot. Unfortunatelly in ROS/KDL there is nothing like this.\n # # I have tested this with the mujoco enviroment and the output is always same low[-1.,-1.], high[1.,1.]\n\n low = -np.pi * np.ones(self.numJoints) * 0.4\n high = np.pi * np.ones(self.numJoints) * 0.4\n\n self.action_space = spaces.Box(low, high)\n\n high = np.inf * np.ones(self.obs_dim)\n low = -high\n self.observation_space = spaces.Box(low, high)\n\n self.seed()\n self.buffer_dist_rewards = []\n self.buffer_tot_rewards = []\n self.collided = 0\n\n # Set the time source\n self._sim_time = 0\n self._sim_time_msg = builtin_interfaces.msg.Time()", "def __init__(self):\n rospy.init_node(\"kinect_transformer\")\n self.kinect_depth_sub = rospy.Subscriber(\"kinect/depth/points\", pc2.PointCloud2, self.kinect_cb, queue_size=10)\n self.left_obs_pub = rospy.Publisher(\"left_arm_obstacles\", PointCloud, queue_size=10, latch=True)\n self.right_obs_pub = rospy.Publisher(\"right_arm_obstacles\", PointCloud, queue_size=10, latch=True)\n self.tf = tf.TransformListener()\n self.closest_rgb_points = []\n # create collision checkers with the left and right kin solver instances\n self.left_cc = CollisionChecker([], KDLIKSolver(\"left\"))\n self.right_cc = CollisionChecker([], KDLIKSolver(\"right\"))", "def _initialize(self):\n self.send_init_command()", "def __init__(self, node, mac, sensor_id):\n super().__init__(node, mac)\n self.sensor_id = sensor_id\n self.sensor_type = SENSORS[sensor_id]\n self.node_callbacks = (AVAILABLE_SENSOR_ID, sensor_id)", "def __init__(self):\n ros_ws_abspath = rospy.get_param(\"/drone/ros_ws_abspath\", None)\n assert ros_ws_abspath is not None, \"You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \\'YOUR/SIM_WS/PATH\\'\"\n assert os.path.exists(ros_ws_abspath), \"The Simulation ROS Workspace path \" + ros_ws_abspath + \\\n \" DOESNT exist, execute: mkdir -p \" + ros_ws_abspath + \\\n \"/src;cd \" + ros_ws_abspath + \";catkin_make\"\n\n ROSLauncher(rospackage_name=\"drone_construct\",\n launch_file_name=\"start_world.launch\",\n ros_ws_abspath=ros_ws_abspath)\n\n # Load Params from the desired Yaml file\n LoadYamlFileParamsTest(rospackage_name=\"openai_ros\",\n rel_path_from_package_to_file=\"src/openai_ros/task_envs/parrotdrone/config\",\n yaml_file_name=\"parrotdrone_goto.yaml\")\n\n # Only variable needed to be set here\n number_actions = rospy.get_param('/drone/n_actions')\n self.action_space = spaces.Discrete(number_actions)\n\n # We set the reward range, which is not compulsory but here we do it.\n self.reward_range = (-numpy.inf, numpy.inf)\n\n # Actions and Observations\n self.linear_forward_speed = rospy.get_param(\n '/drone/linear_forward_speed')\n self.angular_turn_speed = rospy.get_param('/drone/angular_turn_speed')\n self.angular_speed = rospy.get_param('/drone/angular_speed')\n\n self.init_linear_speed_vector = Vector3()\n self.init_linear_speed_vector.x = rospy.get_param(\n '/drone/init_linear_speed_vector/x')\n self.init_linear_speed_vector.y = rospy.get_param(\n '/drone/init_linear_speed_vector/y')\n self.init_linear_speed_vector.z = rospy.get_param(\n '/drone/init_linear_speed_vector/z')\n\n self.init_angular_turn_speed = rospy.get_param(\n '/drone/init_angular_turn_speed')\n\n self.min_sonar_value = rospy.get_param('/drone/min_sonar_value')\n self.max_sonar_value = rospy.get_param('/drone/max_sonar_value')\n\n # Get WorkSpace Cube Dimensions\n self.work_space_x_max = rospy.get_param(\"/drone/work_space/x_max\")\n self.work_space_x_min = rospy.get_param(\"/drone/work_space/x_min\")\n self.work_space_y_max = rospy.get_param(\"/drone/work_space/y_max\")\n self.work_space_y_min = rospy.get_param(\"/drone/work_space/y_min\")\n self.work_space_z_max = rospy.get_param(\"/drone/work_space/z_max\")\n self.work_space_z_min = rospy.get_param(\"/drone/work_space/z_min\")\n\n # Maximum RPY values\n self.max_roll = rospy.get_param(\"/drone/max_roll\")\n self.max_pitch = rospy.get_param(\"/drone/max_pitch\")\n self.max_yaw = rospy.get_param(\"/drone/max_yaw\")\n\n # Get Desired Point to Get\n self.desired_point = Point()\n self.desired_point.x = rospy.get_param(\"/drone/desired_pose/x\")\n self.desired_point.y = rospy.get_param(\"/drone/desired_pose/y\")\n self.desired_point.z = rospy.get_param(\"/drone/desired_pose/z\")\n\n self.desired_point_epsilon = rospy.get_param(\n \"/drone/desired_point_epsilon\")\n\n # We place the Maximum and minimum values of the X,Y,Z,R,P,Yof the pose\n\n high = numpy.array([self.work_space_x_max,\n self.work_space_y_max,\n self.work_space_z_max,\n self.max_roll,\n self.max_pitch,\n self.max_yaw,\n self.max_sonar_value])\n\n low = numpy.array([self.work_space_x_min,\n self.work_space_y_min,\n self.work_space_z_min,\n -1*self.max_roll,\n -1*self.max_pitch,\n -numpy.inf,\n self.min_sonar_value])\n\n self.observation_space = spaces.Box(low, high)\n\n rospy.logdebug(\"ACTION SPACES TYPE===>\"+str(self.action_space))\n rospy.logdebug(\"OBSERVATION SPACES TYPE===>\" +\n str(self.observation_space))\n\n # Rewards\n self.closer_to_point_reward = rospy.get_param(\n \"/drone/closer_to_point_reward\")\n self.not_ending_point_reward = rospy.get_param(\n \"/drone/not_ending_point_reward\")\n self.end_episode_points = rospy.get_param(\"/drone/end_episode_points\")\n\n self.cumulated_steps = 0.0\n\n # Here we will add any init functions prior to starting the MyRobotEnv\n super(ParrotDroneGotoEnv, self).__init__(ros_ws_abspath)", "def __init__(self, name, rate):\n super(ControlNode, self).__init__(name, rate)\n self.mutex = RLock()\n self.controller = InverseDynamicController()\n self.ready = False\n\n # Physical quantities from sensors\n self.eta2 = np.zeros((3, 1))\n self.ni = np.zeros((6, 1))\n\n # References from topic\n self.speed_ref = np.zeros((3, 1))\n self.eta1_ref_body = Quantity(np.zeros((3, 1)))\n self.eta2_ref = Quantity(np.zeros((3, 1)))\n self.ni_ref = Quantity(np.zeros((6, 1)))\n\n # Error value\n self.ni_tilde = np.zeros((6, 1))\n\n # flags to wait first cycle\n self.reference_flags = {'ll': False, 'rpy': False, 'depth': False}\n\n # ROS\n rospy.init_node(self.node_name, anonymous=False)\n self.node_loop = rospy.Rate(self.node_rate)\n self.StartSubscriptions()\n self.pub_tau = rospy.Publisher('/control/tau', Tau, queue_size=1)\n self.pub_measurement = rospy.Publisher('/measurement', Measurement, queue_size=1)", "def __init__(self):\n \n # Publishers\n self.pub_vel_prop = rospy.Publisher('/aer1217_ardrone/vel_prop', \n MotorCommands, queue_size=300)\n \n self.model_name = 'ARDroneCarre'\n \n self.pub_vicon_data = rospy.Publisher('/vicon/{0}/{0}'.format(\n self.model_name),\n TransformStamped, queue_size=30)\n\n \n # Subscribers\n self.sub_gazebo_pose = rospy.Subscriber('/aer1217_ardrone/gazebo_state', \n GazeboState,\n self.update_quadrotor_state)\n \n self.sub_cmd_vel = rospy.Subscriber('cmd_vel_RHC', \n Twist,\n self.update_offboard_command)\n \n \n # Initialize messages for publishing\n self.vel_prop_msg = MotorCommands()\n self.quadrotor_state = TransformStamped()\n \n # Run the onboard controller at 200 Hz\n self.onboard_loop_frequency = 200.\n \n # Create an onboard controller for calculation of the motor commands\n self.onboard_controller = ARDroneOnboardController()\n \n # Run this ROS node at the onboard loop frequency\n self.pub_prop_vel = rospy.Timer(rospy.Duration(1. / \n self.onboard_loop_frequency), self.update_motor_speeds)\n \n # Keep time for differentiation and integration within the controller\n self.old_time = rospy.get_time()", "def initialize(self):\n if self.real:\n self.agent.connect(self)\n else:\n self.connect() # Connect python client to VREP\n self.agent.connect(self)", "def __init__(self):\n # Call parent initialisers\n # SecmUtilityCore.__init__(self)\n Node.__init__(self, \"vehicle_sim\")\n # super().__init__('vehicle_sim')\n\n self.vehicle_marker_array = MarkerArray()\n self.vehicle_marker = Marker()\n self.pose_msg = Pose()\n self.control_msg = Control()\n\n self.model = Model()\n\n # Create subscribers to listen to SECM output\n self.create_subscription(\n msg_type=Control,\n topic=\"/control\",\n callback=self.receive_control_msg,\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Create pose publisher\n self.pose_publisher = self.create_publisher(\n msg_type=Pose,\n topic=\"/pose\",\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Create marker publisher\n self.vehicle_marker_publisher = self.create_publisher(\n msg_type=Marker,\n topic=\"/vehicle_marker\",\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Setup timers to spin the execution loop. \n self.create_timer(1.0/30.0, self.execute)", "def __init__(self):\n self.node_name = \"face_ctrl\"\n rospy.init_node(self.node_name)\n\n rospy.loginfo(\"[FACE] initializing controller\")\n\n self.ros_param_data = self.read_ros_parameters()\n\n # initializing camera\n self.face_cascade_name = self.ros_param_data[\"frontal_face_xml_path\"]\n self.eyes_cascade_name = self.ros_param_data[\"eye_xml_path\"]\n self.face_cascade = cv.CascadeClassifier()\n self.eyes_cascade = cv.CascadeClassifier()\n self.camera_device = 2\n self.cap = cv.VideoCapture(self.camera_device, cv.CAP_V4L)\n if self.cap is None or not self.cap.isOpened():\n rospy.logerr(\"[FACE] Could not connect to the camera!\")\n self.check_camera()\n _, frame = self.cap.read()\n self.image_height, self.image_width = frame.shape[:2]\n\n # initialize publisher\n self.ros_pub_servo_array = rospy.Publisher(\"/cmd_servo_array\", ServoMotorArray, queue_size=1)\n self.bridge = CvBridge()\n self.ros_pub_image = rospy.Publisher(\"camera/image\", Image, queue_size=1)\n rospy.loginfo(\"[FACE] initalized publisher\")\n\n # initialize subscriber\n self.servo_ids = []\n self.servo_angles = []\n self.servo_array_msg = ServoMotorArray()\n self.servo_array_msg.servos = []\n for i in range(3):\n self.servo_array_msg.servos.append(ServoMotor_msg())\n\n self.ros_sub_servo_array = rospy.Subscriber(\"/low_level_ctrl/servo_array\", ServoMotorArray, self.store_servo_state)\n rospy.loginfo(\"[FACE] initialized subscriber\")\n\n # initialize the controllers\n param_pid_yaw = self.ros_param_data[\"yaw_controller_gains\"]\n self.pid_controller_yaw = PIDController(param_pid_yaw[\"kp\"], param_pid_yaw[\"ki\"], param_pid_yaw[\"kd\"])\n param_pid_pitch = self.ros_param_data[\"pitch_controller_gains\"]\n self.pid_controller_pitch = PIDController(param_pid_pitch[\"kp\"], param_pid_pitch[\"ki\"], param_pid_pitch[\"kd\"])\n\n rospy.loginfo(\"[FACE] node initialization finished\")", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def __init__(self):\n self.available_angles = [-30, -15, 0, 15, 30]\n self.ros_service = rospy.Service(\"turn_camera\", TurnCamera, self.send_image)", "def initnodes(self):\n newnodes = self.config[\"nodes\"]\n newpynodes = self.config[\"pynodes\"]\n logging.info('Loading initial nodes: {}'.format(newnodes))\n logging.info('Loading initial python nodes: {}'.format(newpynodes))\n for node in newnodes:\n self.runnode(node)\n for node in newpynodes:\n self.runnode(node, True)", "def __init__(self):\n rospy.init_node('TruckSimNode')\n\n self.steer_angle_topic = rospy.get_param('~steer_angle_topic', \"steer_angle\")\n self.chassis_force_topic = rospy.get_param('~chassis_force_topic', \"chassis_force\")\n\n rospy.Subscriber(\"joy\", Joy, self.joyCb)\n\n self.steer_pub = rospy.Publisher(self.steer_angle_topic, Float64, queue_size=1)\n self.chassis_force_pub = rospy.Publisher(self.chassis_force_topic, Float64, queue_size=1)\n\n # array of joy axes:\n # 0: turn - (+ve = left)\n # 1: acceleration (+ve = increase in current direction)\n # 2: gear\n self.steer = 0\n self.accel = 0\n self.gear = 0\n self.steer_joint = Float64()\n self.chassis_force = Float64()", "def __init__(self):\n rospy.logdebug(\"Start ParrotDroneEnv INIT...\")\n\n #Spawn Parrot AR Drone through launch file\n self.ros_pkg_name=\"drone_construct\"\n self.launch_file_name=\"put_drone_in_world.launch\"\n \n super(ParrotDroneEnv, self).__init__(\n ros_pkg_name=self.ros_pkg_name,\n launch_file=self.launch_file_name,\n start_init_physics_parameters=True,\n reset_world_or_sim='WORLD')\n\n rospy.logdebug(\"Finished ParrotDroneEnv INIT...\")", "def setup(self, **kwargs):\n self.logger.debug(\"{}.setup()\".format(self.qualified_name))\n # Store the node. You can use it for any ROS-related functionality\n try:\n self.node = kwargs['node']\n except KeyError as e:\n error_message = \"didn't find 'node' in setup's kwargs \" + \\\n \"[{}][{}]\".format(self.qualified_name,\n self.__class__.__name__)\n raise KeyError(error_message) from e # 'direct cause' traceability\n # Not get setup the blackboard keys acess\n self.blackboard = py_trees.blackboard.Client()\n self.blackboard.register_key(key=self.pos_key,\n access=py_trees.common.Access.WRITE)\n self.blackboard.register_key(key=self.angle_key,\n access=py_trees.common.Access.WRITE)\n self.logger.debug(\" %s [GenerateNextPose::setup()]\" % self.name)", "def __init__(self):\n self.current_state_g = State()\n self.current_pose_g = Odometry()\n self.correction_vector_g = Pose()\n self.local_offset_pose_g = Point()\n self.waypoint_g = PoseStamped()\n\n self.current_heading_g = 0.0\n self.local_offset_g = 0.0\n self.correction_heading_g = 0.0\n self.local_desired_heading_g = 0.0\n\n self.ns = rospy.get_namespace()\n if self.ns == \"/\":\n rospy.loginfo(CBLUE2 + \"Using default namespace\" + CEND)\n else:\n rospy.loginfo(CBLUE2 + \"Using {} namespace\".format(self.ns) + CEND)\n\n self.local_pos_pub = rospy.Publisher(\n name=\"{}mavros/setpoint_position/local\".format(self.ns),\n data_class=PoseStamped,\n queue_size=10,\n )\n\n self.currentPos = rospy.Subscriber(\n name=\"{}mavros/global_position/local\".format(self.ns),\n data_class=Odometry,\n queue_size=10,\n callback=self.pose_cb,\n )\n\n self.state_sub = rospy.Subscriber(\n name=\"{}mavros/state\".format(self.ns),\n data_class=State,\n queue_size=10,\n callback=self.state_cb,\n )\n\n rospy.wait_for_service(\"{}mavros/cmd/arming\".format(self.ns))\n\n self.arming_client = rospy.ServiceProxy(\n name=\"{}mavros/cmd/arming\".format(self.ns), service_class=CommandBool\n )\n\n rospy.wait_for_service(\"{}mavros/cmd/land\".format(self.ns))\n\n self.land_client = rospy.ServiceProxy(\n name=\"{}mavros/cmd/land\".format(self.ns), service_class=CommandTOL\n )\n\n rospy.wait_for_service(\"{}mavros/cmd/takeoff\".format(self.ns))\n\n self.takeoff_client = rospy.ServiceProxy(\n name=\"{}mavros/cmd/takeoff\".format(self.ns), service_class=CommandTOL\n )\n\n rospy.wait_for_service(\"{}mavros/set_mode\".format(self.ns))\n\n self.set_mode_client = rospy.ServiceProxy(\n name=\"{}mavros/set_mode\".format(self.ns), service_class=SetMode\n )\n\n rospy.wait_for_service(\"{}mavros/cmd/command\".format(self.ns))\n\n self.command_client = rospy.ServiceProxy(\n name=\"{}mavros/cmd/command\".format(self.ns), service_class=CommandLong\n )\n rospy.loginfo(CBOLD + CGREEN2 + \"Initialization Complete.\" + CEND)", "def __init__(__self__,\n resource_name: str,\n args: NodeDriverArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def initialize(self, node: MComputeNode):\n raise Exception(\"Subclass responsibility\")", "def _start_oef_node(self, network_node):", "def __init__(self):\r\n self.pub_tf = rospy.Publisher(\"/tf\", tf.msg.tfMessage, queue_size=1)\r\n\r\n #Loads the robot model, which contains the robot's kinematics information\r\n self.robot = URDF.from_parameter_server()\r\n\r\n #Subscribes to information about what the current joint values are.\r\n rospy.Subscriber(\"joint_states\", JointState, self.callback)", "def __init__(self, total_nodes_to_create):\n Topo.__init__(self)\n\n # Directory where this file / script is located\"\n selfPath = os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe()))) # script directory\n\n # Initialize a service helper for Quagga with default options\n self.quaggaSvc = QuaggaService(autoStop=False)\n\n # Path configurations for mounts\n self.quaggaBaseConfigPath = selfPath + '/configs/'\n\n # List of Quagga host configs\n self.base_ip_address = [172, 0, 1, 1]\n self.subnet_mask = 16\n self.loopback_address = '127.0.0.1/24'\n self.host_prefix = 'a'\n self.total_nodes = 0\n\n # Add switch for IXP fabric\n self.ixpfabric = self.addSwitch('fabric-sw1')\n\n for i in range(total_nodes_to_create):\n self.add_node()", "def init():", "def __init__(self):\n rospy.init_node(\"navigate_map\") # start node\n\tself.frontier_show = []\n\tself.first = True\n\tself.initialS = None\n\tself.once = False\n self.frontier = []\n self.centroidValue = None\n self.regions = []\n self.detected = False\n\tself.resolution = 0.05\n \tself.start = None\n\tself.transformed_map = None\n \tself.odom_sub = rospy.Subscriber('/odom', Odometry, self.odom_callback)\n \tself.sub = rospy.Subscriber(\"/map\", OccupancyGrid, self.handle_navigate_map)\n \tself.robot_path_pub = rospy.Publisher('/robot_path', Path, queue_size = 5)\n self.cent_pub = rospy.Publisher('/cent_set', GridCells, queue_size = 5)\n\tself.front_pub = rospy.Publisher('/frontier_set', GridCells, queue_size = 5)\n\tself.goal_pos_pub = rospy.Subscriber('/move_base_simple/goal',PoseStamped ,self.goal_call_back)\n\tself.tf_listener = tf.TransformListener()", "def __init__(self):\n # Manage command line args\n args = ut_generic.getArgsParser().parse_args()\n self.gzclient = args.gzclient\n self.multiInstance = args.multiInstance\n self.port = args.port\n\n # Launch simulation in a new Process\n self.launch_subp = ut_launch.startLaunchServiceProcess(\n ut_launch.generateLaunchDescription(\n self.gzclient, self.multiInstance, self.port))\n\n # Create the node after the new ROS_DOMAIN_ID is set in\n # generate_launch_description()\n rclpy.init(args=None)\n self.node = rclpy.create_node(self.__class__.__name__)\n\n # class variables\n # self._observation_msg = None\n self._observation_img = None\n # self.max_episode_steps = 1024 # default value, can be updated from baselines\n self.max_episode_steps = 100\n self.iterator = 0\n self.reset_flag = True\n\n # ai_agent\n self.pub = self.node.create_publisher(String, '/pos/action_id')\n camera_names = ['/cam/custom_camera/image_raw']\n self.sub_img = self.node.create_subscription(\n Image, camera_names[0], self.observation_img_callback)\n self.sub_odom = self.node.create_subscription(Odometry,'/pos/odom_pos', self.odom_get_callback)\n self.reset_sim = self.node.create_client(Empty, '/reset_simulation')\n\n # 0: \"forward\", 1: \"left\", 2: \"right\"\n self.action_space = gym.spaces.Discrete(3)\n\n self.pos = np.array([0, 0])\n self.target_pos = np.array([-6, 1])\n\n # observation = (240,320,3)\n screen_height, screen_width = (240, 320)\n self.observation_space = spaces.Box(\n low=0, high=255, shape=(\n screen_height, screen_width, 3), dtype=np.uint8)\n\n self.bridge = CvBridge()", "def __init__(self, nodeid, sessionID):\r\n \r\n # iv = b\"1234567890123456\" is an aexample\r\n # \r\n self.nodeid = nodeid\r\n self.iv = bytes(random.getrandbits(8) for _ in range(16))\r\n self.staticiv = b'like' * 4\r\n self.ivkey = b'hihi' * 4\r\n self.datakey = b'bye!' * 4\r\n self.passphrase = b'calv' * 4\r\n self.sessionID = sessionID\r\n self.G_LED = PWM(Pin(21),freq=10,duty=256)\r\n self.R_LED = Pin(17,Pin.OUT, value=1)\r\n self.x_accel = None\r\n self.y_accel = None\r\n self.z_accel = None\r\n self.temp = None", "def __init__(self, log=None):\n\t\trospy.logdebug('MAVROSListener initialization')\n\t\t\n\t\t# ROS custom subscribers to store data from topics\n\t\tself.__subs = {}\n\t\tself.__initSubscribers()\n\t\t\n\t\trospy.logdebug('MAVROSListener initialized')", "def __init__(self):\n\n self.nodes = {}", "def __init__(self):\n topics = load_topics()\n\n # init node\n rospy.init_node('sonar_sim', anonymous=False)\n\n # load params\n sampling_freq = rospy.get_param(\"~sampling_freq\", 1e6)\n pinger_freq = rospy.get_param(\"~pinger_freq\", 27e3)\n noise = rospy.get_param(\"~noise_stdev\", 20)\n pinger_x = rospy.get_param(\"~x\", 0.0)\n pinger_y = rospy.get_param(\"~y\", 0.0)\n rate = rospy.get_param(\"~rate\", 1)\n zmq_port = rospy.get_param(\"~data_port\", \"tcp://*:12345\")\n rospy.loginfo(\"Initialized sonar sim\")\n rospy.loginfo(\"Sampling freq: {:.0f} Hz\".format(sampling_freq))\n rospy.loginfo(\"Pinger freq: {:.0f} Hz\".format(pinger_freq))\n rospy.loginfo(\"Noise stdev: {:.1f} deg\".format(noise))\n rospy.loginfo(\"Pinger location: {:.1f}, {:.1f}\".format(\n pinger_x, pinger_y))\n rospy.loginfo(\"Ping rate: {:.1f} Hz\".format(rate))\n rospy.loginfo(\"ZMQ data port: {}\".format(zmq_port))\n\n # robot pose sub\n state_topic = topics['/topic/sensor/dynamics_state']\n self._state_sub = rospy.Subscriber(\n state_topic, DynamicsState, self.__state_cb, queue_size=1)\n rospy.loginfo('Subscribing for state to ' + state_topic)\n self.state = None\n\n # init zmq\n context = zmq.Context()\n self.__socket = context.socket(zmq.PUB)\n self.__socket.bind(zmq_port)\n rospy.loginfo('Publishing sonar data to ' + zmq_port)\n\n # init sim\n self.sim = SonarSim(\n pinger_freq=pinger_freq,\n sampling_freq=sampling_freq,\n noise=noise,\n pinger_loc=(pinger_x, pinger_y))\n\n # set timer\n self.rate = rospy.Rate(rate)", "def test_init(self):\n\n with patch.object(rospy, \"wait_for_service\", return_value=True), \\\n patch.object(rospy, \"get_param\", mock_get_param), \\\n patch.object(rospy, \"init_node\", return_value=None), \\\n patch.object(rospy, 'spin', return_value=None), \\\n patch.object(rospy.Service, '__init__', return_value=None) as mock_service_init, \\\n patch.object(rospy.Publisher, '__init__', return_value=None) as mock_publisher_init, \\\n patch.object(Thread, 'start', return_value=None) as mock_start_thread, \\\n patch.object(Thread, 'join', return_value=None), \\\n patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \\\n patch.object(socket.socket, 'bind', return_value=True) as mock_bind:\n\n src.drivers.hyundai_robot.init()\n\n for sn in [\"move_along\", \"abort\", \"store_poses\", \"move_pose\", \"set_speed\", \"move_between\"]:\n # both required services are advertised\n assert len([call for call in mock_service_init.mock_calls if call[1][0] == sn]) == 1\n\n # topic is advertised\n assert mock_publisher_init.call_count == 2\n assert mock_publisher_init.mock_calls[0][1][0] == \"robot_state\"\n assert mock_publisher_init.mock_calls[0][1][1] == RobotState\n\n assert mock_publisher_init.mock_calls[1][1][0] == \"robot_controller_joint_state\"\n assert mock_publisher_init.mock_calls[1][1][1] == JointState", "def configure_ros(self):\n rospy.init_node('collision_warning', anonymous=True)\n self.soundPub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=1)\n rospy.Subscriber('/mobile_base/events/bumper', BumperEvent , self.bumperCallback)", "def __init__(self):\n rospy.init_node('square')\n rospy.Subscriber('/odom', Odometry, self.processOdom)\n self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n\n\n self.sleepy = rospy.Rate(2)\n\n # make dictionary that calls functions\n self.state = {'i':self.forward, ',':self.backward,\n 'l':self.rightTurn, 'j':self.leftTurn,\n 'k':self.stop}\n\n self.x = 0 # position in meters\n self.y = 0 # position in meters\n self.z = 0 # angle in degrees\n self.desiredX = 0\n self.desiredY = 0\n self.desiredZ = 0\n\n self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.sendMessage()\n\n self.start = time()\n\n # get key interupt things\n self.settings = termios.tcgetattr(sys.stdin)\n self.key = None", "def __init__(self):\n self.image_subscriber = rospy.Subscriber('/raspicam_node/image/compressed', CompressedImage, self.imageCallback)\n print 'Waiting for classifier service to come up...'\n rospy.wait_for_service('/classifier_node/classify')\n self.classify_client = rospy.ServiceProxy('/classifier_node/classify', Classify)", "def agent_init(self):\n pass", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):\n\t\tpass", "def __init__(self):\n rospy.init_node('approach')\n\n rospy.Subscriber('/scan', LaserScan, self.scan_callback)\n self.vel_pub = rospy.Publisher('/cmd_vel_mux/input/navi', Twist,\n queue_size=10)\n self.scan = None", "def init(self):", "def init(self):", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def __init__(self,\n nodeName,\n hostname=\"localhost\",\n port=33133,\n verbose=False):\n # Create the XMLRPC server\n self.__server = SimpleXMLRPCServer(\n (hostname, port),\n logRequests=verbose)\n\n # Register XMLRCP introspection methods like:\n # system.listMethods, system.methodHelp and system.methodSignature\n # NOTE: These are not supported by the proper ROS master\n self.__server.register_introspection_functions()\n\n # Support multi-call methods which are used by roslaunch\n self.__server.register_multicall_functions()\n\n # Register the XMLRPC functions to support the ROS master API\n self.__masterFunctions = RosMasterFunctions(nodeName)\n self.__server.register_instance(self.__masterFunctions)", "def __init__(self):\n\n rospy.init_node('mcl_tf')\n br = tf.TransformBroadcaster()\n self.tf_listener = tf.TransformListener()\n \n # Give the listener some time to accumulate transforms... \n rospy.sleep(1.0) \n\n rospy.Subscriber('amcl_pose', PoseStamped, self.pose_callback)\n\n self.transform_position = np.array([0., 0., 0.])\n self.transform_quaternion = np.array([0., 0., 0., 1.0])\n \n # Broadcast the transform at 10 HZ\n while not rospy.is_shutdown():\n br.sendTransform(self.transform_position,\n self.transform_quaternion,\n rospy.Time.now(),\n \"odom\",\n \"map\")\n rospy.sleep(.1)", "def init(self) -> None:\n ...", "def __init__(self, node):\n super().__init__(node, USB_MOTION_ID)\n self.node_callbacks = (USB_AVAILABLE_ID, USB_MOTION_ID)", "def __init__(self):\n # Variables that we give through the constructor.\n # None in this case\n\n # Internal Vars\n # TODO[done] add controler Hint: $ rosservice call /jetbot_0/controller_manager/list_controllers\n self.controllers_list = ['jetbot_joint_state_controller',\n 'jetbot_velocity_controller'\n ]\n # TODO[done] add namespace Hint: $ rostopic list | grep controller\n self.robot_name_space = \"jetbot_0\"\n\n # We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv\n super(JetbotRobotEnv, self).__init__(controllers_list=self.controllers_list,\n robot_name_space=self.robot_name_space,\n reset_controls=True)\n\n\n\n \"\"\"\n To check any topic we need to have the simulations running, we need to do two things:\n 1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations\n that are pause for whatever the reason\n 2) If the simulation was running already for some reason, we need to reset the controlers.\n This has to do with the fact that some plugins with tf, dont understand the reset of the simulation\n and need to be reseted to work properly.\n \"\"\"\n self.gazebo.unpauseSim()\n self.controllers_object.reset_controllers()\n self._check_all_sensors_ready()\n\n # We Start all the ROS related Subscribers and publishers\n # TODO[done] add subscriber publisher\n rospy.Subscriber(\"/jetbot_0/joint_states\", JointState, self._joints_callback)\n rospy.Subscriber(\"/jetbot_0/jetbot_velocity_controller/odom\", Odometry, self._odom_callback)\n\n self._vel_pub = rospy.Publisher('/jetbot_0/jetbot_velocity_controller/cmd_vel',\n Twist, queue_size=6) # ??? queue size\n\n self._check_publishers_connection()\n \n self.gazebo.pauseSim()", "def __init__(self, robot, feedback):\n rospy.init_node(\"MecademicRobot_driver\", anonymous=True)\n self.joint_subscriber = rospy.Subscriber(\"MecademicRobot_joint\", JointState, self.joint_callback)\n self.pose_subscriber = rospy.Subscriber(\"MecademicRobot_pose\", Pose, self.pose_callback)\n self.command_subscriber = rospy.Subscriber(\"MecademicRobot_command\", String, self.command_callback)\n self.gripper_subscriber = rospy.Subscriber(\"MecademicRobot_gripper\", Bool, self.gripper_callback)\n self.reply_publisher = rospy.Publisher(\"MecademicRobot_reply\", String, queue_size=1)\n self.joint_publisher = rospy.Publisher(\"MecademicRobot_joint_fb\", JointState, queue_size=1)\n self.pose_publisher = rospy.Publisher(\"MecademicRobot_pose_fb\", Pose, queue_size=1)\n self.status_publisher = rospy.Publisher(\"MecademicRobot_status\", UInt8MultiArray, queue_size=1)\n\n self.robot = robot\n self.feedback = feedback\n\n self.socket_available = True\n\n self.feedbackLoop()", "async def init(self) -> None:", "async def init(self) -> None:", "async def initialize(self):", "def init(self) -> None:", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def init():\n\n global registry, fsk_router, ook_router\n\n radio.init()\n OpenThings.init(Devices.CRYPT_PID)\n\n fsk_router = Registry.Router(\"fsk\")\n\n #OOK receive not yet written\n #It will be used to be able to learn codes from Energenie legacy hand remotes\n ##ook_router = Registry.Router(\"ook\")\n\n registry = Registry.DeviceRegistry()\n registry.set_fsk_router(fsk_router)\n ##registry.set_ook_router(ook_router\n\n path = os.path.join(sys.path[0], registry.DEFAULT_FILENAME)\n if os.path.isfile(path):\n registry.load_from(path)\n print(\"loaded registry from file\")\n registry.list()\n fsk_router.list()\n\n # Default discovery mode, unless changed by app\n ##discovery_none()\n ##discovery_auto()\n ##discovery_ask(ask)\n discovery_autojoin()\n ##discovery_askjoin(ask)", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def __init__(self, *args):\n _snap.TModeNetNodeI_swiginit(self, _snap.new_TModeNetNodeI(*args))", "def init(self, parameters, agent_parameters):\n pass", "def __init__(self):\r\n # Global\r\n self._gzip_lvl = None\r\n self._dev_mode = None\r\n self._pw_file = None\r\n self._host_ubuntu = None\r\n self._host_ros = None\r\n self._container_ubuntu = None\r\n self._container_ros = None\r\n\r\n # Network\r\n self._container_if = None\r\n self._external_ip = None\r\n self._internal_ip = None\r\n self._container_ip = None\r\n self._localhost_ip = None\r\n\r\n # Comm\r\n self._http_port = None\r\n self._ws_port = None\r\n self._internal_port = None\r\n self._external_port = None\r\n self._comm_port = None\r\n self._ros_proxy_port = None\r\n\r\n # Converters\r\n self._converters = None\r\n\r\n # Machine\r\n self._size = None\r\n self._cpu = None\r\n self._memory = None\r\n self._bandwidth = None\r\n self._special_features = None\r\n self._rootfs = None\r\n self._conf_dir = None\r\n self._data_dir = None\r\n self._packages = None", "def __init__(self):\n\tself.position1 = NavSatFix()\n\tself.position2 = NavSatFix()\n\n rospy.init_node(\"communicator\", anonymous=True)\n\n rospy.Subscriber('/whole/pirate/position', NavSatFix, self.update_position1)\n rospy.Subscriber('/whole/greenBoat/position', NavSatFix, self.update_position2)\n\n self.comu1_pub = rospy.Publisher('position1', NavSatFix, queue_size=10)\n self.comu2_pub = rospy.Publisher('position2', NavSatFix, queue_size=10)\n\n self.freq = rospy.get_param(\"config/rate\")\n self.rate = rospy.Rate(self.freq)\n\t\n\tself.position1_publisher()\n\t#self.position2_publisher()", "def initialize_element(self):\n init_command = {\n \"StartLearning\": True,\n \"AgentID\": 1854\n }\n msg = json.dumps(init_command).encode('unicode_escape')\n self.socket_control.send(msg)", "def __init__(self, nodes=None):\r\n self.nodes = nodes", "def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None", "def __init__(self, nodes):\n\n self._nodes = nodes", "def __init__(self, client, nr, uid, data):\r\n self._client = client\r\n self._nr = nr\r\n self._name = name = 'C{0}'.format(nr)\r\n self._terminating = None\r\n\r\n # Additional container parameters to use\r\n # TODO: At the moment not used; currently data also does not contain\r\n # these information\r\n# self._size = data.get('size', 1)\r\n# self._cpu = data.get('cpu', 0)\r\n# self._memory = data.get('memory', 0)\r\n# self._bandwidth = data.get('bandwidth', 0)\r\n# self._specialFeatures = data.get('specialFeatures', [])\r\n\r\n client.registerContainer(self)\r\n\r\n # Create the directories for the container\r\n self._confDir = confDir = pjoin(client.confDir, name)\r\n self._dataDir = dataDir = pjoin(client.dataDir, name)\r\n\r\n if os.path.isdir(confDir):\r\n raise ValueError('There is already a configuration directory for '\r\n \"'{0}' \\n Please remove it manually if the engine \"\r\n 'did not shut down correctly on last execution and '\r\n 'you are sure it is not in use. \\n dir: {1}.'.format(name, confDir))\r\n\r\n if os.path.isdir(dataDir):\r\n raise ValueError('There is already a data directory for '\r\n \"'{0}' \\n Please remove it manually if the engine \"\r\n 'did not shut down correctly on last execution and '\r\n 'you are sure it is not in use. \\n dir: {1}.'.format(name, dataDir))\r\n os.mkdir(confDir)\r\n os.mkdir(dataDir)\r\n\r\n # Create additional folders for the container\r\n rceDir = pjoin(dataDir, 'rce')\r\n rosDir = pjoin(dataDir, 'ros')\r\n\r\n os.mkdir(rceDir)\r\n os.mkdir(rosDir)\r\n\r\n if client.rosRel > 'fuerte':\r\n # TODO: Switch to user 'ros' when the launcher is used again\r\n shutil.copytree(pjoin(client.rootfs, 'root/.ros/rosdep'),\r\n pjoin(rceDir, '.ros/rosdep'))\r\n\r\n # Create network variables\r\n bridgeIP = client.bridgeIP\r\n ip = '{0}.{1}'.format(bridgeIP.rsplit('.', 1)[0], nr)\r\n self._address = '{0}:{1}'.format(ip, client.envPort)\r\n self._rosproxyAddress = '{0}:{1}'.format(ip, client.rosproxyPort)\r\n self._fwdPort = str(nr + 8700)\r\n self._rosproxyFwdPort = str(nr + 10700)\r\n\r\n ovsname = data.get('name')\r\n ovsip = data.get('ip')\r\n\r\n if ovsname and ovsip:\r\n ovsif = 'eth1'\r\n ovsup = pjoin(confDir, 'ovsup')\r\n\r\n if client.ubuntuRel > 'quantal':\r\n ovsdown = pjoin(confDir, 'ovsdown')\r\n else:\r\n ovsdown = None\r\n else:\r\n ovsif = ovsup = ovsdown = None\r\n\r\n # Construct password\r\n passwd = encodeAES(cipher(client.masterPassword),\r\n salter(uid, client.infraPassword))\r\n\r\n # Create upstart scripts\r\n upComm = pjoin(confDir, 'upstartComm')\r\n with open(upComm, 'w') as f:\r\n f.write(_UPSTART_COMM.format(masterIP=client.masterIP,\r\n masterPort=client.masterPort,\r\n internalPort=client.envPort,\r\n uid=uid, passwd=passwd))\r\n\r\n upRosapi = pjoin(confDir, 'upstartRosapi')\r\n with open(upRosapi, 'w') as f:\r\n f.write(_UPSTART_ROSAPI.format(proxyPort=client.rosproxyPort))\r\n\r\n # TODO: For the moment there is no upstart script for the launcher.\r\n# upLauncher = pjoin(confDir, 'upstartLauncher')\r\n# with open(upLauncher, 'w') as f:\r\n# f.write(_UPSTART_LAUNCHER)\r\n\r\n # Setup network\r\n networkIF = pjoin(confDir, 'networkInterfaces')\r\n with open(networkIF, 'w') as f:\r\n f.write('auto lo\\n')\r\n f.write('iface lo inet loopback\\n')\r\n f.write('\\n')\r\n f.write('auto eth0\\n')\r\n f.write('iface eth0 inet static\\n')\r\n f.write(' address {0}\\n'.format(ip))\r\n f.write(' gateway {0}\\n'.format(bridgeIP))\r\n f.write(' dns-nameservers {0} 127.0.0.1\\n'.format(bridgeIP))\r\n\r\n if ovsif:\r\n f.write('\\n')\r\n f.write('auto {0}\\n'.format(ovsif))\r\n f.write('iface {0} inet static\\n'.format(ovsif))\r\n f.write(' address {0}\\n'.format(ovsip))\r\n\r\n # Create up/down script for virtual network interface if necessary\r\n if ovsup:\r\n with open(ovsup, 'w') as f:\r\n f.write(_LXC_NETWORK_SCRIPT.format(if_op='up', ovs_op='add',\r\n name=ovsname))\r\n\r\n os.chmod(ovsup, stat.S_IRWXU)\r\n\r\n if ovsdown:\r\n with open(ovsdown, 'w') as f:\r\n f.write(_LXC_NETWORK_SCRIPT.format(if_op='down', ovs_op='del',\r\n name=ovsname))\r\n\r\n os.chmod(ovsdown, stat.S_IRWXU)\r\n\r\n # TODO: SSL stuff\r\n# if self._USE_SSL:\r\n# # Create a new certificate and key for environment node\r\n# caCertPath = pjoin(self._SSL_DIR, 'Container.cert')\r\n# caCert = loadCertFile(caCertPath)\r\n# caKey = loadKeyFile(pjoin(self._SSL_DIR, 'container/env.key'))\r\n# (cert, key) = createKeyCertPair(commID, caCert, caKey)\r\n#\r\n# # Copy/save file to data directory\r\n# shutil.copyfile(caCertPath, os.path.join(rceDir, 'ca.pem'))\r\n# writeCertToFile(cert, os.path.join(rceDir, 'cert.pem'))\r\n# writeKeyToFile(key, os.path.join(rceDir, 'key.pem'))\r\n\r\n # Create the container\r\n self._container = container = Container(client.reactor, client.rootfs,\r\n confDir, name)\r\n\r\n # Add lxc bridge\r\n container.addNetworkInterface('eth0', client.bridgeIF, ip)\r\n\r\n # Add the virtual network bridge if necessary\r\n if ovsname and ovsip:\r\n container.addNetworkInterface(ovsif, None, ovsip, ovsup, ovsdown)\r\n\r\n # Add additional lines to fstab file of container\r\n container.extendFstab(rosDir, 'home/ros', False)\r\n container.extendFstab(rceDir, 'opt/rce/data', False)\r\n container.extendFstab(upComm, 'etc/init/rceComm.conf', True)\r\n # TODO: For the moment there is no upstart script for the launcher.\r\n# container.extendFstab(upLauncher, 'etc/init/rceLauncher.conf', True)\r\n container.extendFstab(upRosapi, 'etc/init/rceRosapi.conf', True)\r\n container.extendFstab(networkIF, 'etc/network/interfaces', True)\r\n\r\n for srcPath, destPath in client.pkgDirIter:\r\n container.extendFstab(srcPath, destPath, True)", "def __init__(self, *args):\n _snap.TNEANetNodeI_swiginit(self, _snap.new_TNEANetNodeI(*args))", "def Setup(self):\n self.Peers = [] # active nodes that we're connected to\n self.KNOWN_ADDRS = [] # node addresses that we've learned about from other nodes\n self.DEAD_ADDRS = [] # addresses that were performing poorly or we could not establish a connection to\n self.MissionsGlobal = []\n self.NodeId = random.randint(1294967200, 4294967200)", "def __init__(self, name=None, node_type=\"REGULAR\", create=False, vm=None):\n self.bootstraped = False\n self.name = name\n self.type = node_type\n self.vm = None\n if not vm is None:\n # init a node from a VM\n self.from_vm(vm)\n if create:\n self.create()", "def initialise(self):", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self):\n rospy.init_node('shape_recognizer')\n self.cv_image = np.zeros((480,640)) # the latest image from the camera\n self.hsv_image = np.zeros((480,640))\n self.bridge = CvBridge() # used to convert ROS messages to OpenCV\n cv2.namedWindow('video_window')\n cv2.moveWindow('video_window', 600, 600)\n rospy.Subscriber(\"/camera/image_raw\", Image, self.process_image)\n self.edge_detected = np.zeros((480,640))\n self.contour_image = np.zeros((480,640))\n self.minVal = 50\n self.maxVal = 87\n self.res = []\n self.test_image = cv2.imread(\"./square_base.png\",-1)", "def initialize(self, create_new=True, sysid=\"\"):", "def __init__(self):\n super().__init__('node_name')\n self.create_timer(0.2, self.timer_callback)\n\n self.count = 1", "def _init_node_parm(self, key):\n wf_net_conf = WorkFlowNetConfML(key)\n self.model_path = wf_net_conf.model_path\n self.ml_class = wf_net_conf.ml_class\n self.config = wf_net_conf.config\n self.batch_size = 10000\n self.model_type = wf_net_conf.model_type\n\n #Todo 어떻게 꺼내는지 승우씨한테 물어볼것\n _wf_data_conf = wf_data_conf(key.split('_')[0]+'_'+key.split('_')[1]+'_'+'dataconf_node')\n self.data_conf = _wf_data_conf.conf\n self.label = _wf_data_conf.label\n self.cell_feature = _wf_data_conf.cell_feature\n self.cross_cell = _wf_data_conf.cross_cell\n self.extend_cell_feature = _wf_data_conf.extend_cell_feature\n self.label_values = _wf_data_conf.label_values\n\n _wf_data_node = wf_data_node(key.split('_')[0] + '_' + key.split('_')[1] + '_' + 'data_node')\n self.multi_read_flag = _wf_data_node.multi_node_flag\n self.predict_path = _wf_data_node.predict_path", "def __init__(self):\r\n\t\t# Publishers\r\n\t\tself._pub_rate = rospy.Publisher('robot/joint_state_publish_rate', UInt16, queue_size=10)\r\n\t\tself.image_pub = rospy.Publisher(\"baxter_view\",Image,queue_size=4)\r\n\t\tself._obj_state = rospy.ServiceProxy(\"/gazebo/set_model_state\",SetModelState)\r\n\t\t\r\n\t\t# Link with baxter interface\r\n\t\tself._left_arm = baxter_interface.limb.Limb(\"left\")\r\n\t\tself._right_arm = baxter_interface.limb.Limb(\"right\")\r\n\t\tself._left_joint_names = self._left_arm.joint_names()\r\n\t\tself.grip_left = baxter_interface.Gripper('left', CHECK_VERSION)\r\n\r\n\t\tprint(\"Getting robot state... \")\r\n\t\tself._rs = baxter_interface.RobotEnable(CHECK_VERSION)\r\n\t\tself._init_state = self._rs.state().enabled\r\n\t\tprint(\"Enabling robot... \")\r\n\t\tself._rs.enable()\r\n\t\t\r\n\t\t# Control parameters\r\n\t\tself._rate = 500.0 # Hz\r\n\t\tself._pub_rate.publish(self._rate)\r\n\t\tself.bridge = CvBridge()\r\n\t\tself._left_arm.set_joint_position_speed(0.3)\r\n\t\tself._object_type = 0\r\n\t\tself.object_position = Point(x=0.0, y=0.0, z=0.0)\r\n\t\tself.object_v = 0.0" ]
[ "0.8261381", "0.7474935", "0.73012865", "0.7279841", "0.6911988", "0.6786043", "0.67791384", "0.67517745", "0.6729871", "0.6711965", "0.66651976", "0.66531044", "0.66189027", "0.6601425", "0.6570407", "0.6547083", "0.6542969", "0.65383095", "0.65187514", "0.65016264", "0.64830697", "0.6479049", "0.64574194", "0.6440824", "0.64192575", "0.63898647", "0.63888717", "0.63871616", "0.63717604", "0.6366319", "0.6337337", "0.63338554", "0.6331078", "0.6330945", "0.6299445", "0.62953174", "0.6290705", "0.6287869", "0.62842315", "0.6264967", "0.6243434", "0.62340444", "0.62257904", "0.6222048", "0.6217526", "0.6198875", "0.6198875", "0.6198875", "0.6198875", "0.61664146", "0.61639005", "0.6159399", "0.6159399", "0.61551017", "0.61551017", "0.61551017", "0.61463815", "0.61462545", "0.6145228", "0.61449254", "0.6134633", "0.61278933", "0.6125934", "0.6125934", "0.612387", "0.6116567", "0.60961187", "0.60961187", "0.60961187", "0.60961187", "0.60961187", "0.6090057", "0.60898626", "0.60898626", "0.608123", "0.6080106", "0.6072372", "0.606747", "0.6061733", "0.60589373", "0.60570747", "0.60470307", "0.6038909", "0.602568", "0.6025063", "0.6013443", "0.6011493", "0.60021025", "0.60021025", "0.60021025", "0.60021025", "0.60021025", "0.60021025", "0.60021025", "0.60021025", "0.6001268", "0.59932035", "0.5987357", "0.5980143", "0.5968299" ]
0.6678193
10
Checkout code for CESM If sandbox exists, check that the right tag has been checkedout. Otherwise, download the code, checkout the tag and run manage_externals. The scripts don't seem to like multiple applications of manage_externals.
def code_checkout(cesm_repo, coderoot, tag): sandbox = os.path.split(coderoot)[-1] if os.path.exists(coderoot): print('Check for right tag: '+coderoot) p = Popen('git status', shell=True, cwd=coderoot, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() stdout = stdout.decode('UTF-8') stderr = stderr.decode('UTF-8') print(stdout) print(stderr) if tag not in stdout.split('\n')[0]: raise ValueError('tag does not match') else: stat = check_call(['mkdir', '-p', coderoot]) if stat != 0: sys.exit(1) # clone the repo p = Popen('git clone '+cesm_repo+' '+sandbox, shell=True, cwd=coderoot+'/..', stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if stdout: print(stdout) if stderr: print(stderr) if p.returncode != 0: raise Exception('git error') # check out the right tag p = Popen('git checkout %s'%tag, shell=True, cwd=coderoot) stdout, stderr = p.communicate() if stdout: print(stdout) if stderr: print(stderr) if p.returncode != 0: raise Exception('git error') # check out externals p = Popen('./manage_externals/checkout_externals -v', shell=True, cwd=coderoot) stdout, stderr = p.communicate() if stdout: print(stdout) if stderr: print(stderr) if p.returncode != 0: raise Exception('git error')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n sandbox = create_sandbox()\n directory = download_package_to_sandbox(\n sandbox,\n 'https://pypi.python.org/packages/source/c/checkmyreqs/checkmyreqs-0.1.6.tar.gz'\n )\n print(directory)\n destroy_sandbox(sandbox)", "def checked_out_MPS():\n\n checked_out_packages = os.path.join(os.environ[\"CMSSW_BASE\"], \"src\", \".git\",\n \"info\", \"sparse-checkout\")\n checked_out = False\n git_initialized = False\n try:\n with open(checked_out_packages, \"r\") as f:\n packages = (\"/Alignment/\", \"/Alignment/MillePedeAlignmentAlgorithm/\",\"/*/\")\n for line in f:\n if line.strip() in packages:\n checked_out = True\n break\n git_initialized = True # since the sparse checkout file is there\n except IOError as e:\n if e.args != (2, 'No such file or directory'): raise\n\n return checked_out, git_initialized", "def execute(self):\r\n _logger.info(\"=== Stage=checkout = %s\" % self._config.name)\r\n _logger.info(\"++ Started at %s\" % time.strftime(\"%H:%M:%S\", time.localtime()))\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n result = self.__find_project(project)\r\n # for testing: result = session.create(\"ppd_sw-fa1f5132#wbernard2:project:sa1spp#1\")\r\n if (result != None):\r\n _logger.info(\"Project found: '%s'\" % result)\r\n\r\n # setting up the project\r\n self.__setup_project(project, result)\r\n else:\r\n _logger.info(\"Checking out from '%s'.\" % project)\r\n \r\n purpose = None\r\n if self._config.has_key('purpose'):\r\n purpose = self._config['purpose']\r\n _logger.info(\"Using purpose: '%s'\" % purpose)\r\n \r\n version = None\r\n if self._config.has_key('version'):\r\n version = self._config['version']\r\n _logger.info(\"Using version: '%s'\" % version)\r\n\r\n try:\r\n if (not self._config.get_boolean('use.default_wa_path', True)):\r\n wa_path = self._config['dir']\r\n _logger.info(\"Using work area path to checkout directly\")\r\n result = project.checkout(session.create(self._config['release']), version=version, purpose=purpose, path=wa_path)\r\n else:\r\n result = project.checkout(session.create(self._config['release']), version=version, purpose=purpose)\r\n ccm.log_result(result, ccm.CHECKOUT_LOG_RULES, _logger)\r\n self.__setRole(session)\r\n except ccm.CCMException, exc:\r\n ccm.log_result(exc.result, ccm.CHECKOUT_LOG_RULES, _logger)\r\n raise exc\r\n finally:\r\n self.__restoreRole(session)\r\n _logger.info('Checkout complete')\r\n \r\n if result.project != None and result.project.exists(): \r\n _logger.info(\"Project checked out: '%s'\" % result.project)\r\n \r\n try:\r\n self.__setRole(session)\r\n _logger.info(\"Maintaining the workarea...\")\r\n if self.get_threads() == 1:\r\n output = result.project.work_area(True, True, True, self._config['dir'], result.project.name)\r\n else:\r\n output = ccm.extra.FastMaintainWorkArea(result.project, self._config['dir'], result.project.name, self.get_threads())\r\n ccm.log_result(output, ccm.CHECKOUT_LOG_RULES, _logger)\r\n finally:\r\n self.__restoreRole(session)\r\n self.__setup_project(project, result.project)\r\n else:\r\n raise Exception(\"Error checking out '%s'\" % project)\r\n\r\n _logger.info(\"++ Finished at %s\" % time.strftime(\"%H:%M:%S\", time.localtime()))", "def checkout_qmk():\n if exists('qmk_firmware'):\n rmtree('qmk_firmware')\n\n if not fetch_source(repo_name(QMK_GIT_URL)):\n git_clone(QMK_GIT_URL, QMK_GIT_BRANCH)", "def lifecycle_approve_for_my_org(self, orderer_url, orderer_tls_rootcert, channel_name, cc_name,\n chaincode_version, policy, sequence=1):\n res, installed = self.lifecycle_query_installed(\"3s\")\n cc_label = cc_name+\"_\"+chaincode_version\n package_id = \"\"\n for each in installed['installed_chaincodes']:\n if each['label'] == cc_label:\n package_id = each['package_id']\n break\n if package_id == \"\":\n return 1, \"not exist the chaincode, please check chaincode_name and chaincode_version\"\n\n if os.getenv(\"CORE_PEER_TLS_ENABLED\") == \"false\" or os.getenv(\"CORE_PEER_TLS_ENABLED\") is None:\n if self.version in BasicEnv.binary_versions_v2:\n res = os.system(\"./../bin/{}/bin/peer lifecycle chaincode approveformyorg -o {} \"\n \" --channelID {} --name {} --version {} --init-required --package-id {} --sequence {}\"\n \" --signature-policy {} > ./approve.txt\"\n .format(self.version, orderer_url, channel_name, cc_name,\n chaincode_version, package_id, sequence, policy))\n else:\n if self.version in BasicEnv.binary_versions_v2:\n res = subprocess.Popen(\"./../bin/{}/bin/peer lifecycle chaincode approveformyorg -o {} --tls \"\n \"--cafile {} --channelID {} --name {} --version {} --init-required --package-id \"\n \"{} --sequence {} --signature-policy {}\"\n .format(self.version, orderer_url, orderer_tls_rootcert, channel_name,\n cc_name, chaincode_version, package_id, sequence, policy), shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = res.communicate()\n return_code = res.returncode\n\n if return_code == 0:\n content = str(stdout, encoding=\"utf-8\")\n else:\n stderr = str(stderr, encoding=\"utf-8\")\n return return_code, stderr\n return return_code, content", "def test_checkout_repository(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.contribtool.checkout_repository(TOOLNAME,username,userpass)", "def sync_code_to_masters(\n cluster: Cluster,\n dcos_checkout_dir: Path,\n sudo: bool,\n) -> None:\n local_packages = dcos_checkout_dir / 'packages'\n local_test_dir = local_packages / 'dcos-integration-test' / 'extra'\n if not Path(local_test_dir).exists():\n message = (\n 'DCOS_CHECKOUT_DIR must be set to the checkout of a DC/OS '\n 'repository.\\n'\n '\"{local_test_dir}\" does not exist.'\n ).format(local_test_dir=local_test_dir)\n raise click.BadArgumentUsage(message=message)\n\n dcos_checkout_dir_variant = _dcos_checkout_dir_variant(\n dcos_checkout_dir=dcos_checkout_dir,\n )\n\n node_test_dir = Path('/opt/mesosphere/active/dcos-integration-test')\n\n test_tarstream = _tar_with_filter(\n path=local_test_dir,\n tar_filter=_cache_filter,\n )\n\n dcos_variant = get_cluster_variant(cluster=cluster)\n if dcos_variant is None:\n message = (\n 'The DC/OS variant cannot yet be determined. '\n 'Therefore, code cannot be synced to the cluster.'\n )\n click.echo(message, err=True)\n sys.exit(1)\n\n syncing_oss_to_ee = bool(\n dcos_variant == DCOSVariant.ENTERPRISE\n and dcos_checkout_dir_variant == DCOSVariant.OSS,\n )\n\n node_active_dir = Path('/opt/mesosphere/active')\n node_test_dir = node_active_dir / 'dcos-integration-test'\n\n if syncing_oss_to_ee:\n # This matches part of\n # https://github.com/mesosphere/dcos-enterprise/blob/master/packages/dcos-integration-test/ee.build\n for master in cluster.masters:\n master.run(args=['rm', '-rf', str(node_test_dir / 'util')])\n\n # This makes an assumption that all tests are at the top level.\n master.run(\n args=[\n 'rm',\n '-rf',\n str(node_test_dir / 'open_source_tests' / '*.py'),\n ],\n # We use a wildcard character, `*`, so we need shell expansion.\n shell=True,\n sudo=sudo,\n )\n\n master.run(\n args=[\n 'mkdir',\n '--parents',\n str(node_test_dir / 'open_source_tests'),\n ],\n sudo=sudo,\n )\n\n _send_tarstream_to_node_and_extract(\n tarstream=test_tarstream,\n node=master,\n remote_path=node_test_dir / 'open_source_tests',\n sudo=sudo,\n )\n master.run(\n args=[\n 'rm',\n '-rf',\n str(node_test_dir / 'open_source_tests' / 'conftest.py'),\n ],\n sudo=sudo,\n )\n master.run(\n args=[\n 'mv',\n str(node_test_dir / 'open_source_tests' / 'util'),\n str(node_test_dir),\n ],\n sudo=sudo,\n )\n else:\n _sync_bootstrap_to_masters(\n cluster=cluster,\n dcos_checkout_dir=dcos_checkout_dir,\n sudo=sudo,\n )\n\n for master in cluster.masters:\n # This makes an assumption that all tests are at the top level.\n master.run(\n args=['rm', '-rf', str(node_test_dir / '*.py')],\n # We use a wildcard character, `*`, so we need shell expansion.\n shell=True,\n sudo=sudo,\n )\n _send_tarstream_to_node_and_extract(\n tarstream=test_tarstream,\n node=master,\n remote_path=node_test_dir,\n sudo=sudo,\n )", "def compile_code(self,toolname,adminuser,adminpass):\n\n # ssh into a tool session container as the tools manager\n # compile and install the code\n\n # get into a tool session container.\n cm = ContainerManager()\n ws = cm.access(host=self.hubname,username=adminuser,password=adminpass)\n\n session_number,es = ws.execute('echo $SESSION')\n\n # catch errors that happen in the shell\n # so we can properly exit and close the workspace\n try:\n # become the apps user\n ws.send('sudo su - apps')\n ws.start_bash_shell()\n output,es = ws.execute('whoami')\n exit_apps = True\n if output != 'apps':\n exit_apps = False\n msg = \"doesn't look like we were able to become the apps user\"\n self.logger.error(msg)\n raise Exception(msg)\n\n # catch compile and install errors\n # so we can report them back to the developer\n\n # navigate to the tool directory\n cmd = 'cd /apps/%(toolname)s/dev/src' \\\n % { 'toolname' : toolname, }\n ws.execute(cmd)\n\n # if there is a makefile available\n # run:\n # make clean\n # make all\n # make install\n # don't fail if there is no clean or all targets\n if ws.bash_test('-e Makefile'):\n # allow 30 minutes for the code to compile\n ws.timeout = 1800\n output,es = ws.execute('make clean',False)\n output,es = ws.execute('make all',False)\n no_make_all_text = \"make: *** No rule to make target `all'. Stop.\"\n if es > 0:\n if es == 2 and output == no_make_all_text:\n output,es = ws.execute('make')\n else:\n self.logger.exception(output)\n raise ExitCodeError(output)\n output,es = ws.execute('make install')\n ws.timeout = 10\n else:\n msg = \"No Makefile found\"\n print msg\n self.logger.info(msg)\n\n finally:\n # exit sudo\n ws.stop_bash_shell()\n if exit_apps:\n ws.send('exit')\n\n # shut down the ssh connection\n ws.close()", "def __getFromJEMpage(self):\n\n if not self.__download(self.repo, self.version, self.lib_tar, self.dest_dir): return False\n if not self.__extract(self.lib_tar): return False\n\n if not self.__download(self.repo, self.version, self.launcher_tar, self.dest_dir): return False\n if not self.__extract(self.launcher_tar): return False\n\n self.logger.info(\"successfully downloaded and extracted JEM ver %s from repo %s\" % (self.version, self.repo))\n\n if os.path.exists(self.dest_dir + \"/JEM.py\"):\n os.environ[\"JEM_PACKAGEPATH\"] = self.dest_dir\n\n\n return True", "def main():\n # Parse command line arguments\n configfile = parse_arguments()\n # Parse config file\n (basedir, gituser, add_own_forks, forks, branches) = parse_config(configfile)\n # Check that base directory exists\n if not os.path.exists(basedir):\n raise Exception('Base directory {0} does not exist'.format(basedir))\n # Configure working directory\n workdir = setup_workdir(basedir)\n # Check out the code\n checkout_code(workdir, gituser, add_own_forks, forks, branches)\n print \"Location of code: {0}\".format(workdir)", "def test_error_when_student_code_is_incorrectly_packaged(\n self, default_hooks\n ):\n result = default_hooks.act_on_cloned_repo(NO_DIR_STRUCTURE_REPO)\n\n assert result.status == Status.ERROR", "def checkout(self, checkout, *args):\n return self.cmd('checkout', checkout, *args)", "def test_link_to_checkout(self):\n self.browser.find_element_by_link_text('Checkout').click()\n self.assertEqual(self.browser.current_url,\n self.live_server_url + self.CHECKOUT_URL)", "def update_openblock():\n\n tf = tempfile.mktemp(suffix='-openblock')\n local('git clone git://github.com/openplans/openblock.git {0}'.format(tf))\n dest = os.path.join(PROJECT_ROOT, 'requirements', 'sdists')\n for name in ('obadmin', 'ebdata', 'ebpub'):\n package = os.path.join(tf, name)\n os.chdir(package)\n local('pip install -e {source} -d {dest}'.format(source=package,\n dest=dest))\n shutil.rmtree(tf)", "def start(buildout):\n check = Check(buildout)\n check.extends_cache()\n check.eggs_directory()\n check.download_cache()", "def test_ML_check_cms_aem_emvevex(self):\n\n self.setup_logFile_for_logger('madgraph.check_cmd')\n files = ['acceptance_test_aem_emvevex.pkl',\n 'acceptance_test_aem_emvevex.log',\n 'acceptance_test_aem_emvevex_widths_increased.pkl',\n 'acceptance_test_aem_emvevex_widths_increased.log']\n output_name = 'SAVEDTMP_CHECK_acceptance_test_aem_emvevex__%s__'\n \n try:\n cwd = os.getcwd()\n \n # Change this when we will make the CMS-ready EW model the default\n self.do('import model loop_qcd_qed_sm')\n for mode in ['NWA','CMS']:\n if path.isdir(pjoin(MG5DIR,output_name%mode)):\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n \n # Make sure it works for an initial run\n command = 'check cms -reuse a e- > e- ve ve~ [virt=QCD QED] '\n options = {'name':'acceptance_test_aem_emvevex',\n 'lambdaCMS':'(1.0e-6,2)',\n 'show_plot':'False',\n 'seed':'666',\n 'resonances':'2',\n 'recompute_width':'first_time',\n 'report':'full'}\n cmd = command+' '.join('--%s=%s'%(opt, value) for opt, value in \n options.items())\n # print \"Running first CMS check cmd: \",cmd\n self.do(cmd)\n self.assertEqual(cwd, os.getcwd())\n for mode in ['NWA','CMS']:\n self.assertTrue(path.isdir(pjoin(MG5DIR,output_name%mode)))\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex.pkl')))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue(res.count('=== FAILED ===')==0)\n self.assertTrue(res.count('=== PASSED ===')==2)\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex.log')))\n res = open(pjoin(MG5DIR,'acceptance_test_aem_emvevex.log')).read()\n self.assertTrue(res.count('=== FAILED ===')==0)\n self.assertTrue(res.count('=== PASSED ===')==2)\n \n # Now for a Reuse-run with the widths modified by 1%\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n self.setup_logFile_for_logger('madgraph.check_cmd')\n # Now copy the card with recomputed widths in it\n for mode in ['NWA','CMS']:\n self.assertTrue(path.isfile(pjoin(MG5DIR,output_name%mode,\n 'Cards','param_card.dat_recomputed_widths')))\n shutil.copy(pjoin(MG5DIR,output_name%mode,'Cards',\n 'param_card.dat_recomputed_widths'),\n pjoin(MG5DIR,output_name%mode,'Cards','param_card.dat'))\n options['tweak']='allwidths->1.1*allwidths(widths_increased)'\n options['recompute_width']='never'\n cmd = command+' '.join('--%s=%s'%(opt, value) for opt, value in \n options.items())\n # print \"Running second CMS check cmd: \",cmd\n self.do(cmd)\n self.assertEqual(cwd, os.getcwd())\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.pkl')))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue(res.count('=== FAILED ===')==2)\n self.assertTrue(res.count('=== PASSED ===')==0)\n self.assertTrue(path.isfile(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.log')))\n res = open(pjoin(MG5DIR,\n 'acceptance_test_aem_emvevex_widths_increased.log')).read()\n self.assertTrue(res.count('=== FAILED ===')==2)\n self.assertTrue(res.count('=== PASSED ===')==0)\n \n # Clean up duties\n for mode in ['NWA','CMS']:\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n for file in files:\n try:\n os.remove(pjoin(MG5DIR,file))\n except:\n pass\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n\n except KeyError as e:\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n for mode in ['NWA','CMS']:\n try:\n shutil.rmtree(pjoin(MG5DIR,output_name%mode))\n except:\n pass\n for f in files:\n try:\n os.remove(pjoin(MG5DIR,f))\n except:\n pass\n raise e\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)", "def check_workspace ():\n\n try:\n ex (\"cd $DOC_ROOT/ACE_TAO && git pull -p\")\n print (\"Successfully updated ACE/TAO working copy\")\n except:\n print (\"Unable to update ACE/TAO workspace at \" + doc_root)\n raise\n\n try:\n ex (\"cd $DOC_ROOT/MPC && git pull -p\")\n print (\"Successfully updated MPC working copy to revision \")\n except:\n print (\"Unable to update the MPC workspace at \" + doc_root + \"/ACE/MPC\")\n raise\n\n vprint (\"Repos root URL = \" + opts.repo_root + \"\\n\")\n vprint (\"Repos MPC root URL = \" + opts.mpc_root + \"\\n\")", "def code(ctx):\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n try:\n code_ref = PolyaxonClient().experiment.get_code_reference(user, project_name, _experiment)\n commit = None\n if code_ref:\n commit = code_ref.commit\n Printer.print_header(\n 'Experiment has code ref: `{}`, downloading ...'.format(commit))\n else:\n Printer.print_warning(\n 'Experiment has no code ref, downloading latest code...')\n PolyaxonClient().project.download_repo(user, project_name, commit=commit)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not download outputs for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n Printer.print_success('Files downloaded.')", "def checkGit(directory):", "def test_source_package_exists(self):\n response = self.client.head(\n f'/filemanager/api/{self.upload_id}/content',\n headers={'Authorization': self.token}\n )\n self.assertEqual(response.status_code, status.OK)", "def setup(token_addr: str, box_addr: str) -> None:\n print(f\"\\nSetting up Contracts....\")\n tokenContract = pytezos.contract(token_addr)\n print(f\"-- Performing Initial Mint to Admin : {pub_key_hash}\")\n tokenContract.initialMint(None).inject(_async=False)\n print(\"-- Funding Fishcake Box Contract\")\n tokenContract.transfer([{\"from_\": pub_key_hash, \"txs\": [\n {\"to_\": box_addr, \"token_id\": 0, \"amount\": default_fsck_box_fund}]}]).inject(_async=False)", "def forced_checkout_with_real_obstructions_and_unversioned_files(sbox):\n\n # Make a local tree that partially obstructs the paths coming from the\n # repos, make the obstructing files different from the standard greek\n # tree, and finally add some files that don't exist in the stardard tree.\n expected_output = make_local_tree(sbox, True, True)\n\n expected_wc = svntest.main.greek_state.copy()\n expected_wc.tweak('A/mu',\n contents=\"This is the local version of the file 'mu'.\\n\")\n expected_wc.tweak('iota',\n contents=\"This is the local version of the file 'iota'.\\n\")\n expected_wc.add({'sigma' : Item(\"unversioned sigma\"),\n 'A/upsilon' : Item(\"unversioned upsilon\"),\n 'A/Z' : Item(),\n })\n\n svntest.actions.run_and_verify_checkout(sbox.repo_url,\n sbox.wc_dir, expected_output,\n expected_wc, [], '--force')", "def test_empty_code_for_verification(self, cred):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number))\n assert resp.status_code == 200\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n resp = requests.get(check_url.format('json', cred[0], cred[1],\n request_id, ''))\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '2'\n assert resp.json()['error_text'] == missing_specific_mandatory_parm_msg.format('code')\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def testToolchainDownload(self):\n self.assertEqual('https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.9.0/x86_64-gcc-4.9.0-nolibc_arm-unknown-linux-gnueabi.tar.xz',\n self.toolchains.LocateArchUrl('arm'))", "def checkout_v8():\n if not OFFLINE_MODE:\n exec_cmd('git fetch --tags',\n cwd=V8_HOME,\n msg='Fetch the release tag information')\n\n exec_cmd('git checkout', V8_GIT_TAG,\n cwd=V8_HOME,\n msg='Checkout Google V8 v' + V8_GIT_TAG)", "def checkout_book(book):\n\tno_token = 'Y'\n\tif no_token == 'Y':\n\t\tsuccessful = 200\n\t\treturn successful\n\telse:\n\t\tlist_of_books = check_out_book(book)\n\t\treturn list_of_books\n\t#end if", "def check_installation():\n print(\n 'Hooray! CCurl is installed correctly!'\n if is_installed()\n else 'Aww, man! CCurl is NOT installed correctly!'\n )\n print('For support, visit the #iota-libs-pyota channel on the IOTA Slack.')\n print('https://slack.iota.org/')", "def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")", "def forced_checkout_with_real_obstructions(sbox):\n\n # Make a local tree that partially obstructs the paths coming from the\n # repos and make the obstructing files different from the standard greek\n # tree.\n expected_output = make_local_tree(sbox, True, False)\n\n expected_wc = svntest.main.greek_state.copy()\n expected_wc.tweak('A/mu',\n contents=\"This is the local version of the file 'mu'.\\n\")\n expected_wc.tweak('iota',\n contents=\"This is the local version of the file 'iota'.\\n\")\n\n svntest.actions.run_and_verify_checkout(sbox.repo_url,\n sbox.wc_dir, expected_output,\n expected_wc, [], '--force')", "def _get_code_version():\n git_dir = os.path.dirname(os.path.realpath(__file__))\n cwd = os.getcwd()\n file = os.path.join(cwd, VERSION_FILENAME)\n bash_command = f'cd {git_dir}; ' + \\\n f'git rev-parse HEAD > {file}; ' + \\\n f'cd {cwd}; '\n success = False\n try:\n subprocess.check_call(\n bash_command, stderr=subprocess.DEVNULL, shell=True)\n sucess = True\n except subprocess.CalledProcessError:\n # not a git directory\n bash_command = f'rm {file}; cd {cwd}; '\n subprocess.check_call(bash_command, shell=True)\n except OSError:\n # git command not found\n pass\n return success", "def upload_code(self,toolname,data,username,userpass,msg):\n\n self.logger.info(\"uploading source code for the tool '%s'\" \\\n % (toolname))\n\n repo_url = self.repo_url_template % { 'hubname' : self.hubname,\n 'toolname' : toolname }\n\n # ssh into a tool session container\n cm = ContainerManager()\n ws = cm.access(host=self.hubname,username=username,password=userpass)\n\n svn = Subversion(ws,username,userpass)\n\n session_number = -1\n repo_home = None\n try:\n session_number,es = ws.execute('echo $SESSION')\n if session_number <= 0:\n raise RuntimeError('invalid session number: %s' \\\n % (session_number))\n\n # create a temp directory to hold the repo\n repo_home,es = ws.execute('mktemp -d --tmpdir=`pwd` -t tmp.XXXXXXXX')\n ws.execute('cd %s' % (repo_home))\n\n # do the checkout\n svn.checkout(repo_url,toolname)\n\n # cd into the repo\n ws.execute('cd %s' % (toolname))\n tool_repo,es = ws.execute('pwd')\n\n # add some code\n commit_files = []\n for localpath,remotepath in data.items():\n commit_files.append(remotepath)\n remotepath = os.path.join(tool_repo,remotepath)\n ws.importfile(localpath,remotepath)\n\n # commit the source code repository\n svn.add(commit_files)\n revision = svn.commit(\"initial upload of source code\")\n if revision is None:\n raise RuntimeError('commit failure, revision is None')\n\n finally:\n # FIXME: remove the temp directory\n ws.send_raw('\u0003')\n ws.send_raw('\u0003')\n time.sleep(5)\n if repo_home is not None:\n ws.execute('rm -rf %s' % (repo_home))\n\n # shut down the ssh connection\n ws.close()", "def setup():\n\n # check version and download the latest version\n get_latest_codeql()\n # install vscode?\n # clone codeql libs\n # setup vscode + codeql\n # wait for user", "def checkout(ctx, file_name):\n dufl_root = ctx.obj['dufl_root']\n\n checked_out_file = os.path.abspath(file_name)\n dufl_file = get_dufl_file_path(checked_out_file, ctx.obj)\n\n if not os.path.exists(dufl_file):\n click.echo('The file you want to checkout does not exist. Maybe run dufl fetch first?', err=True)\n exit(1)\n\n if os.path.exists(checked_out_file):\n # Try our best to see if it's been modified\n git = Git(ctx.obj.get('git', '/usr/bin/git'), dufl_root)\n last_modified_ts = os.path.getmtime(checked_out_file)\n last_modified = datetime.fromtimestamp(\n last_modified_ts\n ).strftime('%Y-%m-%d %H:%M:%S')\n commit_at_date = re.sub('[^a-zA-Z0-9]', '', git.get_output(\n 'rev-list', '-1',\n '--before=%s' % last_modified,\n git.working_branch()\n ))\n file_exists_at_commit = False\n if len(commit_at_date) > 0:\n file_exists_at_commit = git.test(\n 'rev-parse', '--verify',\n '%s:%s' % (\n commit_at_date,\n os.path.relpath(dufl_file, dufl_root)\n )\n )\n # If there is no commit at date, or the file didnt' exist at the commit,\n # assume first version of the file ever.\n if not file_exists_at_commit:\n commit_at_date = re.sub('[^a-zA-Z0-9]', '', git.get_output(\n 'log', '--diff-filter=A', '--pretty=format:\\'%H\\'',\n '--', os.path.relpath(dufl_file, dufl_root)\n ))\n if len(commit_at_date) == 0:\n click.echo('File %s exists, but does not seem to be in the git repository?' % dufl_file, err=True)\n exit(1)\n\n # Note: do not be tempted to use 'git show branch@{date}' syntax,\n # as that relies on the reflog which does not contain all commits.\n content_at_date = git.get_output(\n 'show',\n '%s:%s' % (\n commit_at_date,\n os.path.relpath(dufl_file, dufl_root)\n )\n )\n with open(checked_out_file, 'r') as f:\n content_now = f.read()\n if content_at_date != content_now:\n click.echo('It looks like you have local modifications. Will exit for now.', err=True)\n exit(1)\n\n click.echo('Copying %s to %s...' % (dufl_file, checked_out_file))\n if not os.path.exists(os.path.dirname(checked_out_file)):\n os.makedirs(os.path.dirname(checked_out_file))\n shutil.copy(dufl_file, checked_out_file)", "def upload():\n\n # Our credentials are only available from within the main repository and not forks.\n # We need to prevent uploads from all BUT the branches in the main repository.\n # Pull requests and master-branches of forks are not allowed to upload.\n is_pull_request = (\n (\"TRAVIS_PULL_REQUEST\" in os.environ and os.environ[\"TRAVIS_PULL_REQUEST\"] != \"false\") or\n \"APPVEYOR_PULL_REQUEST_NUMBER\" in os.environ\n )\n if is_pull_request:\n click.echo(\"Refusing to upload artifacts from a pull request!\")\n return\n\n if \"AWS_ACCESS_KEY_ID\" in os.environ:\n subprocess.check_call([\n \"aws\", \"s3\", \"cp\",\n \"--acl\", \"public-read\",\n DIST_DIR + \"/\",\n \"s3://snapshots.mitmproxy.org/{}/\".format(UPLOAD_DIR),\n \"--recursive\",\n ])\n\n upload_pypi = (\n TAG and\n \"WHEEL\" in os.environ and\n \"TWINE_USERNAME\" in os.environ and\n \"TWINE_PASSWORD\" in os.environ\n )\n if upload_pypi:\n whl = glob.glob(join(DIST_DIR, 'mitmproxy-*-py3-none-any.whl'))[0]\n click.echo(\"Uploading {} to PyPi...\".format(whl))\n subprocess.check_call([\n \"twine\",\n \"upload\",\n whl\n ])\n\n upload_docker = (\n (TAG or BRANCH == \"master\") and\n \"DOCKER\" in os.environ and\n \"DOCKER_USERNAME\" in os.environ and\n \"DOCKER_PASSWORD\" in os.environ\n )\n if upload_docker:\n docker_tag = \"dev\" if BRANCH == \"master\" else VERSION\n\n click.echo(\"Uploading Docker image to tag={}...\".format(docker_tag))\n subprocess.check_call([\n \"docker\",\n \"login\",\n \"-u\", os.environ[\"DOCKER_USERNAME\"],\n \"-p\", os.environ[\"DOCKER_PASSWORD\"],\n ])\n subprocess.check_call([\n \"docker\",\n \"push\",\n \"mitmproxy/mitmproxy:{}\".format(docker_tag),\n ])", "def checkoutlicense(self,feature_):\n res = __library__.MSK_XX_checkoutlicense(self.__nativep,feature_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def forced_checkout_with_faux_obstructions(sbox):\n\n # Make a local tree that partially obstructs the paths coming from the\n # repos but has no true differences.\n expected_output = make_local_tree(sbox, False, False)\n\n expected_wc = svntest.main.greek_state.copy()\n\n svntest.actions.run_and_verify_checkout(sbox.repo_url,\n sbox.wc_dir, expected_output,\n expected_wc, [], '--force')", "def checkout(url, version=None):\n from grit import Repo\n r = Repo(url)\n\n def _write(item):\n log.debug('writing: %s' % item.name)\n if item.type != 'blob':\n return\n if r.type in ['repo', 'proxy', 'local']:\n path = os.path.join(r.name, item.path)\n pdir = os.path.dirname(path)\n if not os.path.isdir(pdir):\n os.makedirs(pdir)\n else:\n path = item.name\n\n f = open(path, 'w')\n f.write(item.data())\n f.close()\n\n if r.type == 'blob':\n _write(r)\n else:\n items = r.items()\n count = 1\n total = len(items)\n while count <= total:\n print '[%s/%s] %0.2f%%' %(count, total, (float(count) / total) * 100), '*'*count, '\\r',\n _write(items[count-1])\n count += 1\n sys.stdout.flush()\n print", "def test_runs_with_packaged_code(self, default_hooks):\n result = default_hooks.act_on_cloned_repo(PACKAGED_CODE_REPO)\n\n assert result.status == Status.SUCCESS\n assert (\n _output.test_result_header(\n \"se.repobee.fibo.FiboTest\",\n NUM_FIBO_TESTS,\n NUM_FIBO_TESTS,\n _output.SUCCESS_COLOR,\n )\n in result.msg\n )", "def process_online_checkout(msisdn, amount, account_reference='', transaction_desc=''):\n url = settings.C2B_ONLINE_CHECKOUT_URL\n headers = {\"Content-Type\": 'application/json',\n 'Authorization': 'Bearer {}'.format(AuthToken.objects.get_token('c2b'))}\n timestamp = str(datetime.now())[:-7].replace('-', '').replace(' ', '').replace(':', '')\n password = base64.b64encode(bytes('{}{}{}'.format(settings.C2B_ONLINE_SHORT_CODE, settings.C2B_ONLINE_PASSKEY,\n timestamp), 'utf-8')).decode('utf-8')\n body = dict(\n BusinessShortCode=settings.C2B_ONLINE_SHORT_CODE,\n Password=password,\n Timestamp=timestamp,\n TransactionType=settings.C2B_TRANSACTION_TYPE,\n Amount=str(amount),\n PartyA=str(msisdn),\n PartyB=settings.C2B_ONLINE_SHORT_CODE,\n PhoneNumber=str(msisdn),\n CallBackURL=settings.C2B_ONLINE_CHECKOUT_CALLBACK_URL,\n AccountReference='ref-'.format(account_reference),\n TransactionDesc='desc-'.format(transaction_desc)\n )\n response = post(url=url, headers=headers, data=json.dumps(body))\n return response.json()", "def checkout(self, *arguments, **kwargs):\n return self.get_output('checkout', *arguments, **kwargs)", "def test_managed_install(visualstudio, tmp_path):\n assert not visualstudio.managed_install", "def checkout_chibios():\n chibios = ('chibios', CHIBIOS_GIT_URL, CHIBIOS_GIT_BRANCH)\n chibios_contrib = ('chibios-contrib', CHIBIOS_CONTRIB_GIT_URL, CHIBIOS_CONTRIB_GIT_BRANCH)\n\n os.chdir('qmk_firmware/lib')\n\n for submodule, git_url, git_branch in chibios, chibios_contrib:\n if exists(submodule):\n rmtree(submodule)\n\n if not fetch_source(submodule):\n git_clone(git_url, git_branch)\n\n os.chdir('../..')", "def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)", "def checkout_java_acc(force):\n acc_dir = get_java_acc_dir()\n if os.path.exists(acc_dir):\n logging.info(\"Java ACC is already downloaded.\")\n if not force:\n return\n logging.info(\"Forcing re-download.\")\n shutil.rmtree(acc_dir)\n\n logging.info(\"Downloading Java ACC...\")\n\n url = \"https://github.com/lvc/japi-compliance-checker/archive/2.4.tar.gz\"\n scratch_dir = get_scratch_dir()\n path = os.path.join(scratch_dir, os.path.basename(url))\n jacc = urllib.request.urlopen(url)\n with open(path, 'wb') as w:\n w.write(jacc.read())\n\n subprocess.check_call([\"tar\", \"xzf\", path],\n cwd=scratch_dir)\n\n shutil.move(os.path.join(scratch_dir, \"japi-compliance-checker-2.4\"),\n os.path.join(acc_dir))", "def cms_identifier(self):\n engine.setup(self)\n \n targets = [target for target in self.args.target if target.strip()]\n error_count = 0\n for url in targets:\n self.sanitize_url(url)\n msg = \"Getting source for {}\".format(self.url); report.low(msg)\n headers = {'User-Agent': \"Mozilla/5.0 (X11; Fedora; Linux i686;\" +\\\n\t\t\t\"rv:40.0) Gecko/20100101 Firefox/40.1\"}\n response = None\n try:\n response = requests.get(self.url, headers=headers, verify=False)\n if \"Checking your browser before accessing\" in response.content:\n msg =\"Site: {} is using cloudflare. \"\\\n \"Trying to bypass cloudflare protection.\".format(self.url);report.medium(msg)\n #damn cloudflare, lets see if how to circumvert it. \n #TODO: Ask for permision since executing JS might be a security issue.\n # https://github.com/Anorov/cloudflare-scrape\n cfscraper = cfscrape.create_scraper()\n response = cfscraper.get(self.url)\n except Exception as e:\n #print e\n error_count += 1\n msg=\"Something went wrong while getting ({}), moving on...\".format(self.url);report.error(msg)\n if error_count > 3:\n msg = \"Too many error. Exiting...\"; report.error(msg)\n sys.exit()\n \n framework, site = engine.pwn(self,response)\n if framework:\n report.info(\"This is a website based on: {0} from {1}\".format(framework, site))\n else:\n report.high(\"Failed to determine CMS of site.\")", "def step_impl(context):\n\n log.info(\"====> Telnet to the STB IP {stb_ip} and Verify that the {mhc_syncapp_path} path is available and accessible.\".format(stb_ip=stb_parameters.STB_IP, mhc_syncapp_path=stb_parameters.MHC_SYNCAPP_PATH))\n command = \"cd \" + stb_parameters.MHC_SYNCAPP_PATH\n result = send_telnet_command(command)\n\n if \"No such\" in result:\n assert False, \" *****> Failed: Didn't find the correct path\"\n else:\n log.info(\" *****> Passed: The correct path exists\")", "def checkout_btn(self):\n self._checkout_btn.click()", "def main():\n get_obofoundry(force_download=True)", "def check(self):\n json = JsonBackend(\"../src/builder/projects.json\")\n json.load()\n\n TM_ITSELF = 1\n expected_files = TM_ITSELF + sum(p.downloadable is True\n for p in json.projects)\n self.downloads_for_project('tots', expected_files)\n\n expected_files = TM_ITSELF + sum(p.softcatala is True and\n p.downloadable is True\n for p in json.projects)\n\n self.downloads_for_project('softcatala', expected_files)\n\n expected_files = 1\n for project_dto in json.projects:\n if not project_dto.downloadable:\n continue\n\n self.downloads_for_project(project_dto.name, expected_files)\n self.check_project_link(project_dto.projectweb)", "def check_pr_details(self, pr_number):\n pr = self.repo.get_pull(pr_number)\n email_pattern = re.compile(r'^.*@suse\\.(com|cz|de)$')\n\n for commit in pr.get_commits():\n sha = commit.sha\n author = commit.author\n title = message = commit.commit.message\n # Not sure why we need to use the nested commit for the email\n email = commit.commit.author.email\n user_id = f'{author.login}({email})'\n body = ''\n\n # This could be probably smarter but commit contains something like the following\n # message=\"$commit_title\\n\\n$long_commit_message\" and as such maybe we can split it and\n # check for the following limits: title max 50 chars, body max 72 chars per line and at\n # least as long as the commit title to avoid commit message bodies full of whitespaces\n try:\n title, body = message.split('\\n\\n', 1)\n except ValueError:\n print('No commit body was detected')\n\n print(f'Checking commit \"{sha}: {title}\"')\n\n if not email_pattern.fullmatch(email):\n print(f'Checking if {user_id} is part of the SUSE organization...')\n\n if self.org.has_in_members(commit.author):\n print(f'{user_id} is part of SUSE organization but a SUSE e-mail address was not used for commit: {sha}')\n sys.exit(1)\n\n # replace case-insensitive \"(bsc#)\" (or []) and surrounding spaces\n # with a single space, then prune leading/trailing spaces\n title = re.sub(r'\\s*[([]\\s*(?i:bsc)#\\d+\\s*[)\\]]\\s*', ' ', title).strip()\n if len(title) > 50:\n print('Commit message title should be less than 50 characters (excluding the bsc# reference)')\n sys.exit(1)\n\n # No body detected. Nothing else to do here.\n if not body:\n continue\n\n if len(body) < len(title):\n print('Commit message body is too short')\n sys.exit(1)\n\n # strip multi-line '```code```' blocks & lines starting w\\ `code`\n code_pattern = re.compile(\n r'''\n ((?m:^)\\s*```) # multi-line beginning, 0-more whitespace, ```\n (?s:.*?) # non-greedy, zero or more chars, including \\n\n \\1 # whatever matched at the beginning\n | # or...\n (?m:^)\\s*` # start of line, optional whitespace, backtick\n [^`]+ # oneor more non-backtick chars\n `\\s*(?m:$) # and a backtick at the end of the line\n ''',\n re.VERBOSE\n )\n for body_line in re.sub(code_pattern, '', body).splitlines():\n if len(body_line) > 72:\n print('Each line in the commit body should be less than 72 characters')\n sys.exit(1)\n\n print(f'PR-{pr_number} commits verified.')", "def source(dirname, filename, gen_content):\n if dirname in lut['sources']:\n s.add('MD5SUM=\"$(find \"{0}\" -printf %T@\\\\\\\\n | md5sum)\"', dirname)\n if secret is None:\n s.add('tar xf \"{0}\" -C \"{1}\"',\n filename,\n dirname,\n sources={filename: gen_content()})\n else:\n s.add('wget \"{0}/{1}/{2}/{3}\"', server, secret, b.name, filename)\n s.add('tar xf \"{0}\" -C \"{1}\"', filename, dirname)\n for manager, service in lut['sources'][dirname]:\n s.add('[ \"$MD5SUM\" != \"$(find \"{0}\" -printf %T@\\\\\\\\n ' # No ,\n '| md5sum)\" ] && {1}=1',\n dirname,\n manager.env_var(service))", "def test_download_file_no_sha(token):\n\n # github => repo => release => asset_list => asset => url => download\n\n g_h = github.Github(token, per_page=100)\n repo = g_h.get_repo(TEST_SLUG, lazy=False)\n release = repo.get_release(TEST_TAG)\n asset_list = release.get_assets()\n sha_filename = Template(Arguments.HASH_FILE).safe_substitute({\n 'platform': platform.system().lower()\n })\n\n pass_test = True\n\n for check_asset in asset_list:\n # look through list of assets for uploaded file and sha file\n\n if check_asset.name == sha_filename:\n\n pass_test = False\n\n assert pass_test", "def test_update_software_asset_install_script(self):\n pass", "def check_mitm_status_page(self, check_url):\n response = requests.get(check_url)\n if response.status_code == 200:\n return response\n else:\n sys.exit(2)", "def codebuild_insecure_ssl_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for projects in get_code_build_projects(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(projects,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n buildProjectName = str(projects[\"name\"])\n buildProjectArn = str(projects[\"arn\"])\n # check if Insecure SSL is enabled for your Source - if KeyError is thrown it means your Source\n # (or lack thereof) does not have this argument\n try:\n insecureSsl = str(projects[\"source\"][\"insecureSsl\"])\n except KeyError:\n # this is a passing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{buildProjectArn}/insecure-ssl\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": buildProjectArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\",\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[CodeBuild.2] CodeBuild projects should not have insecure SSL configured\",\n \"Description\": f\"CodeBuild project {buildProjectName} does not have a source that supports the SSL setting and is thus exempt from this check.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"If your project should not have insecure SSL configured refer to the Troubleshooting CodeBuild section of the AWS CodeBuild User Guide\",\n \"Url\": \"https://docs.aws.amazon.com/codebuild/latest/userguide/troubleshooting.html#troubleshooting-self-signed-certificate\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Developer Tools\",\n \"AssetService\": \"AWS CodeBuild\",\n \"AssetComponent\": \"Project\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCodeBuildProject\",\n \"Id\": buildProjectArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\"AwsCodeBuildProject\": {\"Name\": buildProjectName}},\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-2\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-11\",\n \"NIST SP 800-53 Rev. 4 SC-12\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding\n if insecureSsl != \"False\":\n # this is a failing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{buildProjectArn}/insecure-ssl\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": buildProjectArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\",\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[CodeBuild.2] CodeBuild projects should not have insecure SSL configured\",\n \"Description\": \"CodeBuild project \"\n + buildProjectName\n + \" has insecure SSL configured. Refer to the remediation instructions if this configuration is not intended\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"If your project should not have insecure SSL configured refer to the Troubleshooting CodeBuild section of the AWS CodeBuild User Guide\",\n \"Url\": \"https://docs.aws.amazon.com/codebuild/latest/userguide/troubleshooting.html#troubleshooting-self-signed-certificate\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Developer Tools\",\n \"AssetService\": \"AWS CodeBuild\",\n \"AssetComponent\": \"Project\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCodeBuildProject\",\n \"Id\": buildProjectArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\"AwsCodeBuildProject\": {\"Name\": buildProjectName}},\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-2\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-11\",\n \"NIST SP 800-53 Rev. 4 SC-12\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n # this is a passing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{buildProjectArn}/insecure-ssl\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": buildProjectArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\",\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[CodeBuild.2] CodeBuild projects should not have insecure SSL configured\",\n \"Description\": \"CodeBuild project \"\n + buildProjectName\n + \" doesnt have insecure SSL configured.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"If your project should not have insecure SSL configured refer to the Troubleshooting CodeBuild section of the AWS CodeBuild User Guide\",\n \"Url\": \"https://docs.aws.amazon.com/codebuild/latest/userguide/troubleshooting.html#troubleshooting-self-signed-certificate\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Developer Tools\",\n \"AssetService\": \"AWS CodeBuild\",\n \"AssetComponent\": \"Project\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCodeBuildProject\",\n \"Id\": buildProjectArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\"AwsCodeBuildProject\": {\"Name\": buildProjectName}},\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.DS-2\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-11\",\n \"NIST SP 800-53 Rev. 4 SC-12\",\n \"AICPA TSC CC6.1\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def checkout(self): \n mtool = getToolByName(self.context, \"portal_membership\")\n ICheckoutManagement(self.context).redirectToNextURL(\"AFTER_START\")", "def refresh_cts():\n local('cd ../../cts-js && grunt')\n local('cp ../../cts-js/release/cts.js ../static/hotlink/cts.js')\n local('cd ../../cts-ui && grunt')\n local('cp ../../cts-ui/release/cts-ui.js ../static/hotlink/cts-ui.js')\n local('git add ../static/hotlink/cts.js')\n local('git add ../static/hotlink/cts-ui.js')\n local('git commit -m \"refreshing CTS JS and UI [fabfile]\"')\n local('git push origin master')", "def checkLocalSE(analyJob):\n\n status = False\n\n # Select the correct mover\n (copycmd, setup) = getCopytool()\n\n tolog(\"Copy command: %s\" % (copycmd))\n tolog(\"Setup: %s\" % (setup))\n sitemover = getSiteMover(copycmd, setup)\n tolog(\"Got site mover: %s\" % str(sitemover))\n tolog(\"Checking local SE...\")\n\n # move code to site mover, SiteMover to contain default function returning \"NotSupported\" message\n\n # determine which timeout option to use\n timeout = 120\n if sitemover.isNewLCGVersion(\"%s lcg-ls\" % (setup)):\n timeout_option = \"--connect-timeout=300 --sendreceive-timeout=%d\" % (timeout)\n else:\n timeout_option = \"-t %d\" % (timeout)\n\n _se = readpar('se').split(\",\")[0]\n token, se = sitemover.extractSE(_se)\n\n # build a proper path\n if analyJob:\n sepath = sitemover.filterSE(readpar('sepath'))\n else:\n sepath = sitemover.filterSE(readpar('seprodpath'))\n destinationList = sitemover.getDirList(sepath)\n destination = sitemover.getMatchingDestinationPath(token, destinationList)\n path = se + destination\n\n cmd = \"%s lcg-ls -l -b %s -T srmv2 %s\" % (setup, timeout_option, path)\n tolog(\"Executing command: %s\" % (cmd))\n\n try:\n ec, rs = commands.getstatusoutput(cmd)\n except Exception, e:\n tolog(\"!!WARNING!!1111!! Command failed with exception: %s\" % (e))\n else:\n if ec != 0:\n tolog(\"!!WARNING!!1111!! Command failed: %d, %s\" % (ec, rs))\n else:\n # tolog(\"SE responded with: %s\" % (rs))\n status = True\n\n return status", "def download_package_to_sandbox(sandbox, package_url):\n\n response = requests.get(package_url)\n\n package_tar = os.path.join(sandbox, 'package.tar.gz')\n\n with open(package_tar, 'w') as f:\n f.write(response.content)\n\n os.chdir(sandbox)\n\n with tarfile.open('package.tar.gz', 'r:gz') as tf:\n tf.extractall()\n\n directory = [d for d in os.listdir(sandbox) if os.path.isdir(d)][0]\n\n return os.path.join(sandbox, directory)", "def addon_phlex(self):\n print(\"Checking phlex version\")\n repo = self.github.get_repo('d8ahazard/Phlex')\n remote_version = list(repo.get_commits())[0].sha\n file = \"{}/Dockerfile\".format(self.name)\n remote_file = self.get_file_obj(file)\n masterfile = self.repoupdater.get_file_content(remote_file)\n file_version = masterfile.split('Phlex/archive/')[1]\n file_version = file_version.split('.zip')[0]\n if self.verbose:\n print(\"Current version\", file_version)\n print(\"Available version\", remote_version)\n if remote_version != file_version:\n msg = COMMIT_MSG.format('Phlex', remote_version)\n new_content = self.repoupdater.get_file_content(remote_file)\n new_content = new_content.replace(file_version, remote_version)\n self.repoupdater.commit(file, msg, new_content, remote_file.sha)\n else:\n print(\"Phlex already have the newest version\", file_version)", "def addon_tasmoadmin(self):\n print(\"Checking TasmoAdmin version\")\n repo = self.github.get_repo('reloxx13/TasmoAdmin')\n releases = list(repo.get_releases())\n index = 0\n while True:\n remote_version = releases[index].tag_name\n if 'b' in remote_version:\n index = index + 1\n else:\n break\n file = \"{}/Dockerfile\".format(self.name)\n remote_file = self.get_file_obj(file)\n masterfile = self.repoupdater.get_file_content(remote_file)\n file_version = masterfile.split('--branch ')[1]\n file_version = file_version.split(' --depth')[0]\n if self.verbose:\n print(\"Current version\", file_version)\n print(\"Available version\", remote_version)\n if remote_version != file_version:\n msg = COMMIT_MSG.format('TasmoAdmin', remote_version)\n new_content = self.repoupdater.get_file_content(remote_file)\n new_content = new_content.replace(file_version, remote_version)\n self.repoupdater.commit(file, msg, new_content, remote_file.sha)\n else:\n print(\"TasmoAdmin already have the newest version\", file_version)", "def push_admin_install_button(self,toolname):\n\n self.logger.info(\"clicking install link for tool '%s'\" % (toolname))\n\n # navigate to the tool status page as a tool manager\n # press the install link\n po = self.catalog.load_pageobject('ToolsStatusUploadedAdminPage',toolname)\n po.goto_page()\n checkout_status,output = po.do_install()\n\n # wait for the output success / failure block to appear\n if checkout_status is False:\n raise RuntimeError(\"checkout failed: %s\" % (output))\n\n # FIXME: add function to get the passed/failed message", "def step_impl(context):\n log.info(\"====> verify the NSA files are not placed under container_nsa directory\")\n\n nsa_container = get_nsa_container_string(context)\n container_nsa_path = \"/tmp/cme/local/GFD/{nsa_container}/container_nsa\".format(nsa_container=nsa_container)\n log.info(\" ****> DEBUG: nsa container path is: {container_nsa_path}\".format(container_nsa_path=container_nsa_path))\n\n command = \"cd {container_nsa_path}; ls -a | wc -l\".format(container_nsa_path=container_nsa_path)\n if \"2\" in context.cme_session.send_ssh_command(command=command): # Means that directory is empty\n log.info(\" ****> Passed The NSA files are placed under the container_nsa directory - As expected.\")\n else:\n assert False, \" ****> Failed: The NSA files are placed under the container_nsa directory - While they should not be there.\"", "def step(self, name, cmd, env=None, **kwargs):\n skia_dir = self._skia_api.m.path['checkout']\n lsan_suppressions = skia_dir.join('tools', 'lsan.supp')\n tsan_suppressions = skia_dir.join('tools', 'tsan.supp')\n ubsan_suppressions = skia_dir.join('tools', 'ubsan.supp')\n env = dict(env or {})\n env['ASAN_OPTIONS'] = 'symbolize=1 detect_leaks=1'\n env['LSAN_OPTIONS'] = ('symbolize=1 print_suppressions=1 suppressions=%s' %\n lsan_suppressions)\n env['TSAN_OPTIONS'] = 'suppressions=%s' % tsan_suppressions\n env['UBSAN_OPTIONS'] = 'suppressions=%s' % ubsan_suppressions\n\n path_to_app = self._skia_api.out_dir.join(\n self._skia_api.configuration, cmd[0])\n new_cmd = [path_to_app]\n new_cmd.extend(cmd[1:])\n return self._skia_api.run(self._skia_api.m.step, name, cmd=new_cmd, env=env,\n **kwargs)", "def sandbox(verbose, app, archive):\n return _deploy_in_mode(\n mode=\"sandbox\", verbose=verbose, log=log, app=app, archive=archive\n )", "def forced_checkout_with_versioned_obstruction(sbox):\n\n # Make a greek tree working copy\n sbox.build(read_only = True)\n\n # Create a second repository with the same greek tree\n repo_dir = sbox.repo_dir\n repo_url = sbox.repo_url\n other_repo_dir, other_repo_url = sbox.add_repo_path(\"other\")\n svntest.main.copy_repos(repo_dir, other_repo_dir, 1, 1)\n\n fresh_wc_dir = sbox.add_wc_path('fresh')\n fresh_wc_dir_A = os.path.join(fresh_wc_dir, 'A')\n os.mkdir(fresh_wc_dir)\n\n other_wc_dir = sbox.add_wc_path(\"other\")\n other_wc_dir_A = os.path.join(other_wc_dir, \"A\")\n os.mkdir(other_wc_dir)\n\n # Checkout \"A\" from the first repos to a fresh dir.\n svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],\n \"co\", repo_url + \"/A\",\n fresh_wc_dir_A)\n\n # Checkout \"A\" from the second repos to the other dir.\n svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],\n \"co\", other_repo_url + \"/A\",\n other_wc_dir_A)\n\n # Checkout the entire first repos into the fresh dir. This should\n # fail because A is already checked out. (Ideally, we'd silently\n # incorporate A's working copy into its parent working copy.)\n expected_output = svntest.wc.State(fresh_wc_dir, {\n 'iota' : Item(status='A '),\n 'A' : Item(verb='Skipped'),\n })\n expected_wc = svntest.main.greek_state.copy()\n svntest.actions.run_and_verify_checkout(repo_url, fresh_wc_dir,\n expected_output, expected_wc,\n [], '--force')\n\n # Checkout the entire first repos into the other dir. This should\n # fail because it's a different repository.\n expected_output = svntest.wc.State(other_wc_dir, {\n 'iota' : Item(status='A '),\n 'A' : Item(verb='Skipped'),\n })\n expected_wc = svntest.main.greek_state.copy()\n svntest.actions.run_and_verify_checkout(repo_url, other_wc_dir,\n expected_output, expected_wc,\n [], '--force')\n\n #ensure that other_wc_dir_A is not affected by this forced checkout.\n svntest.actions.run_and_verify_svn(None,\n [], \"st\", other_wc_dir_A)\n exit_code, sout, serr = svntest.actions.run_and_verify_svn(\n None, [], \"info\",\n other_wc_dir_A)\n\n #TODO rename test_stderr to test_regex or something.\n test_stderr(\"URL: \" + other_repo_url + '/A$', sout)\n\n #ensure that other_wc_dir is in a consistent state though it may be\n #missing few items.\n exit_code, sout, serr = svntest.actions.run_and_verify_svn(\n None, [], \"info\",\n other_wc_dir)\n #TODO rename test_stderr to test_regex or something.\n test_stderr(\"URL: \" + sbox.repo_url + '$', sout)", "def test_check_no_download(self):\n output = self.run_command(\"selfupdate --check\", exitcode=0)\n contains_latest_version = (\"Already at latest version\" in output)\n contains_new_version = (\"New version available\" in output)\n assert (contains_latest_version or contains_new_version)\n self.assertNotIn(\"Url: \", output)\n self.assertNotIn(\"Update completed.\", output)\n self.assertNotIn(\"Failed to update. Please try again.\", output)", "def checkout_with_obstructions(sbox):\n\n make_local_tree(sbox, False, False)\n\n #svntest.factory.make(sbox,\n # \"\"\"# Checkout with unversioned obstructions lying around.\n # svn co url wc_dir\n # svn status\"\"\")\n #svntest.factory.make(sbox,\n # \"\"\"# Now see to it that we can recover from the obstructions.\n # rm -rf A iota\n # svn up\"\"\")\n #exit(0)\n\n wc_dir = sbox.wc_dir\n url = sbox.repo_url\n\n # Checkout with unversioned obstructions causes tree conflicts.\n # svn co url wc_dir\n expected_output = svntest.wc.State(wc_dir, {\n 'iota' : Item(status=' ', treeconflict='C'),\n 'A' : Item(status=' ', treeconflict='C'),\n # And the updates below the tree conflict\n 'A/D' : Item(status=' ', treeconflict='A'),\n 'A/D/gamma' : Item(status=' ', treeconflict='A'),\n 'A/D/G' : Item(status=' ', treeconflict='A'),\n 'A/D/G/rho' : Item(status=' ', treeconflict='A'),\n 'A/D/G/pi' : Item(status=' ', treeconflict='A'),\n 'A/D/G/tau' : Item(status=' ', treeconflict='A'),\n 'A/D/H' : Item(status=' ', treeconflict='A'),\n 'A/D/H/chi' : Item(status=' ', treeconflict='A'),\n 'A/D/H/omega' : Item(status=' ', treeconflict='A'),\n 'A/D/H/psi' : Item(status=' ', treeconflict='A'),\n 'A/B' : Item(status=' ', treeconflict='A'),\n 'A/B/E' : Item(status=' ', treeconflict='A'),\n 'A/B/E/beta' : Item(status=' ', treeconflict='A'),\n 'A/B/E/alpha' : Item(status=' ', treeconflict='A'),\n 'A/B/F' : Item(status=' ', treeconflict='A'),\n 'A/B/lambda' : Item(status=' ', treeconflict='A'),\n 'A/C' : Item(status=' ', treeconflict='A'),\n 'A/mu' : Item(status=' ', treeconflict='A'),\n })\n\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('A/B', 'A/B/E', 'A/B/E/beta', 'A/B/E/alpha', 'A/B/F',\n 'A/B/lambda', 'A/D', 'A/D/G', 'A/D/G/rho', 'A/D/G/pi', 'A/D/G/tau',\n 'A/D/H', 'A/D/H/psi', 'A/D/H/omega', 'A/D/H/chi', 'A/D/gamma', 'A/C')\n\n actions.run_and_verify_checkout(url, wc_dir, expected_output,\n expected_disk)\n\n # svn status\n expected_status = actions.get_virginal_state(wc_dir, 1)\n # A and iota are tree conflicted and obstructed\n expected_status.tweak('A', 'iota', status='D ', wc_rev=1,\n treeconflict='C')\n\n expected_status.tweak('A/D', 'A/D/G', 'A/D/G/rho', 'A/D/G/pi', 'A/D/G/tau',\n 'A/D/H', 'A/D/H/chi', 'A/D/H/omega', 'A/D/H/psi', 'A/D/gamma', 'A/B',\n 'A/B/E', 'A/B/E/beta', 'A/B/E/alpha', 'A/B/F', 'A/B/lambda', 'A/C',\n status='D ')\n # A/mu exists on disk, but is deleted\n expected_status.tweak('A/mu', status='D ')\n\n actions.run_and_verify_unquiet_status(wc_dir, expected_status)\n\n\n # Now see to it that we can recover from the obstructions.\n # rm -rf A iota\n svntest.main.safe_rmtree( os.path.join(wc_dir, 'A') )\n os.remove( os.path.join(wc_dir, 'iota') )\n\n\n svntest.main.run_svn(None, 'revert', '-R', os.path.join(wc_dir, 'A'),\n os.path.join(wc_dir, 'iota'))\n\n # svn up\n expected_output = svntest.wc.State(wc_dir, {\n })\n\n expected_disk = svntest.main.greek_state.copy()\n\n expected_status = actions.get_virginal_state(wc_dir, 1)\n\n actions.run_and_verify_update(wc_dir, expected_output, expected_disk,\n expected_status,)", "def check_verify_code(self):\n r = self.session.get(self.check_url)\n s = r.text\n data = json.loads(s[s.index('{'):-1])\n if data.get('codestring'):\n return data.get('codestring', \"\")\n return \"\"", "def try_attach_sandbox(cmd, sandbox = None):\n if not get_setting_async('use_cabal_dev'):\n return cmd\n return attach_sandbox(cmd, sandbox)", "def is_sandbox(self) -> bool:\n from hubble.executor.helper import is_valid_sandbox_uri\n\n uses = getattr(self.args, 'uses') or ''\n return is_valid_sandbox_uri(uses)", "async def source(self, ctx):\n \"\"\" Check out my source code <3 \"\"\"\n # Do not remove this command, this has to stay due to the GitHub LICENSE.\n # TL:DR, you have to disclose source according to MIT.\n # Reference: https://github.com/AlexFlipnote/discord_bot.py/blob/master/LICENSE\n await ctx.send(f\"**{ctx.bot.user}** is powered by this source code:\\nhttps://github.com/AlexFlipnote/discord_bot.py With modifications by user: snow-blade\")", "def __gitVerify(self):\n self.vcs.gitVerify(self.project.getProjectPath())", "def test_file_managed_http_source_skip_verify(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=True)\n assert ret.result is True", "def checkout(branch=\"lf-dev\"):\n with cd(FOLDER):\n sudo('git fetch', user='tomcat')\n sudo('git checkout %s' % branch, user='tomcat')\n status()", "def external(self) -> bool:\n return getattr(self.args, 'external', False) or self.is_sandbox", "def checkout(user_data):\n checkout_res = requests.post(url=\"http://127.0.0.1:5000/checkout\", json=user_data)\n return checkout_res.text", "def lifecycle_package(self, cc_name, cc_version, cc_path, language):\n if self.version in BasicEnv.binary_versions_v2:\n label = cc_name+\"_\"+cc_version\n res = os.system(\"./../bin/{}/bin/peer lifecycle chaincode package {}.tar.gz --path {} --lang {} --label {}\"\n .format(self.version, cc_name, cc_path, language, label))\n res = res >> 8\n print(\"res\", res)\n return", "def test_main():\n\n temp_dir = \"./deepreg_download_temp_dir\"\n branch = Repo(\".\").head.object.hexsha\n\n main(args=[\"--output_dir\", temp_dir, \"--branch\", branch])\n\n # Check downloading all req'd folders into temp, verify that they are the same as in main branch.\n config_dcmp = dircmp(\"./config\", os.path.join(temp_dir, \"config\"))\n assert not has_diff_files(config_dcmp)\n\n data_dcmp = dircmp(\"./data\", os.path.join(temp_dir, \"data\"))\n assert not has_diff_files(data_dcmp)\n\n demos_dcmp = dircmp(\"./demos\", os.path.join(temp_dir, \"demos\"))\n assert not has_diff_files(demos_dcmp)\n\n shutil.rmtree(temp_dir)", "def checkout(driver, user_details) -> None:\n\t\n\t# wait until checkout page loads\n\ttry: # look for twitter button as indicator of load\n\t\twait = WebDriverWait(driver, 10)\n\t\telement = wait.until(EC.element_to_be_clickable(\n\t\t\t(By.XPATH, \"//a[@href='https://twitter.com/tendmoney']\")))\n\texcept NoSuchElementException: # page didn't load in a reasonable amount of time\n\t\tmanual_takeover() # swap to manual mode\n\t# fill out checkout details\n\tdriver.find_element_by_id(\"billing_first_name\").send_keys(user_details[0]) # first name\n\tdriver.find_element_by_id(\"billing_last_name\").send_keys(user_details[1]) # last name\n\tdriver.find_element_by_id(\"ak_venmo\").send_keys(user_details[2]) # venmo username\n\tdriver.find_element_by_id(\"billing_email\").send_keys(user_details[3]) # email address\n\t# switch to popup/new page (https://stackoverflow.com/a/29052586/4513452)\n\t# try: # look for clear cart button as indicator of load\n\t# \twait = WebDriverWait(driver, 15)\n\t# \t# not working even though i swear it's right\n\t# \t# element = wait.until(EC.element_to_be_clickable(\n\t# \t# \t(By.XPATH, '/html/body/div[1]/div/div[1]/div')))\n\t# \telement = wait.until(EC.element_to_be_clickable(\n\t# \t\t(By.XPATH, \"//a[contains(text(), 'Clear Cart')]\")))\n\t# except: # page didn't load in a reasonable amount of time\n\t# \tmanual_takeover() # swap to manual mode\n\t# driver.find_element_by_xpath('/html/body/div[1]/div/div[1]/div').click() # click pay\n\ttime.sleep(10) # i have no idea why this ^ shit doesn't work, f this\n\t# this is so janky lol don't tell anyone\n\tdriver.find_element_by_id(\"billing_email\").click() # click email address\n\tdriver.find_element_by_id(\"billing_email\").send_keys(Keys.TAB) # tab over to paypal\n\tActionChains(driver).key_down(Keys.RETURN).key_up(Keys.RETURN).perform() # hit enter\n\t# driver.find_element_by_xpath(\"/html\").send_keys(Keys.RETURN) # hit enter on paypal\n\ttime.sleep(10)\n\t# try: # look for overlay as indicator of load\n\t# \twait = WebDriverWait(driver, 15)\n\t# \telement = wait.until(EC.element_to_be_clickable(\n\t# \t\t(By.XPATH, \"//a[contains(text(), 'Click to Continue')]\")))\n\t# except: # page didn't load in a reasonable amount of time\n\t# \tmanual_takeover() # swap to manual mode\n\tmain_window_handle = driver.window_handles[0] # get main window handle for later\n\tpaypal_window_handle = driver.window_handles[1] # get paypal popup handle\n\t# print(\"pp handle: \", paypal_window_handle)\n\tdriver.switch_to.window(paypal_window_handle) # focus on popup\n\ttry: # look for final payment button as indicator of load\n\t\twait = WebDriverWait(driver, 25)\n\t\telement = wait.until(EC.element_to_be_clickable(\n\t\t\t(By.ID, \"payment-submit-btn\")))\n\texcept NoSuchElementException: # page didn't load in a reasonable amount of time\n\t\tmanual_takeover() # swap to manual mode\n\t# testing\n\t# pp = driver.find_element_by_id(\"payment-submit-btn\") # .click()\n\t# print(\"pp: \", pp) # dont want to actually check out during testing\n\t# real checkout\n\tdriver.find_element_by_id(\"payment-submit-btn\").click()", "def do_check(name, tmpdir, sha):\n path = write_tmp_blob(tmpdir, name, sha)\n puppethooks.checkers.check(path)\n os.unlink(path)", "def check_has_network_code_checkbox(self):\n self.click_element(self.has_network_code_checkbox_locator)", "def test_curl_command_is_avail_can_access_marklogic(\n command_curl, test_cma_url, test_cma_creds\n):\n cmd = [\n command_curl,\n \"--anyauth\",\n \"--user\",\n test_cma_creds,\n f\"{test_cma_url}?format=json\",\n ]\n curl_result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n assert curl_result.stdout is not None\n assert curl_result.returncode == 0", "def step_impl(context, test_case_num):\n log.info(\"=====> Check that the file {test_case_num}.container.nsa is available and its md5sum matches the value previously obtained\".format(test_case_num=test_case_num))\n command = \"md5sum \" + stb_parameters.MHC_SYNCAPP_PATH + \"/S_bcc_\" + context.cme_session.cme_gfd_application + \"/\" + test_case_num + \".container.nsa | awk {'print $1'}\"\n log.info(\" ****> DEBUG: Command = {command}\".format(command=command))\n\n result = send_telnet_command(command)\n log.info(\" ****> DEBUG: Result = {result}\".format(result=result))\n\n elapse_time = 0\n while elapse_time <= resourceset_parameters.TIME_TO_WAIT_AFTER_POSTING:\n if md5_of_files[test_case_num + '_container_nsa'] in result:\n log.info(\" *****> Passed: The md5sum of the {test_case_num}_container_nsa matches the value previously obtained. The match was about: {elapse_time} seconds\".format(test_case_num=test_case_num, elapse_time=str(elapse_time)))\n break\n else:\n time.sleep(resourceset_parameters.ELAPSE_TIME)\n result = send_telnet_command(command)\n log.info(\" ****> Waiting {elapse_time} seconds after posting for all the chunks to be committed to disk...\".format(elapse_time=str(elapse_time)))\n elapse_time += resourceset_parameters.ELAPSE_TIME\n if elapse_time > resourceset_parameters.TIME_TO_WAIT_AFTER_POSTING:\n log.info(\" ****> DEBUG: The md5sum can be found in the result message: [{result}]\".format(result=result))\n assert False, \" l \" + test_case_num + \"_container_nsa in the STB does not match the checksum of the source file(\" + md5_of_files[test_case_num + '_container_nsa'] + \"). Waited about: {sec} seconds\".format(test_case_num=test_case_num, sec=str(elapse_time))", "def __gitVerifyBundle(self):\n self.vcs.gitVerifyBundle(self.project.getProjectPath())", "def code(ctx, show_hidden, query, single, password, remember):\n\n _init_session(ctx, password, remember)\n\n session = ctx.obj[\"session\"]\n entries = session.calculate_all()\n creds = _search(entries.keys(), query, show_hidden)\n\n if len(creds) == 1:\n cred = creds[0]\n code = entries[cred]\n if cred.touch_required:\n prompt_for_touch()\n try:\n if cred.oath_type == OATH_TYPE.HOTP:\n with prompt_timeout():\n # HOTP might require touch, we don't know.\n # Assume yes after 500ms.\n code = session.calculate_code(cred)\n elif code is None:\n code = session.calculate_code(cred)\n except ApduError as e:\n if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:\n raise CliFail(\"Touch account timed out!\")\n entries[cred] = code\n\n elif single and len(creds) > 1:\n _error_multiple_hits(ctx, creds)\n\n elif single and len(creds) == 0:\n raise CliFail(\"No matching account found.\")\n\n if single and creds:\n if is_steam(cred):\n click.echo(calculate_steam(session, cred))\n else:\n click.echo(code.value)\n else:\n outputs = []\n for cred in sorted(creds):\n code = entries[cred]\n if code:\n if is_steam(cred):\n code = calculate_steam(session, cred)\n else:\n code = code.value\n elif cred.touch_required:\n code = \"[Requires Touch]\"\n elif cred.oath_type == OATH_TYPE.HOTP:\n code = \"[HOTP Account]\"\n else:\n code = \"\"\n outputs.append((_string_id(cred), code))\n\n longest_name = max(len(n) for (n, c) in outputs) if outputs else 0\n longest_code = max(len(c) for (n, c) in outputs) if outputs else 0\n format_str = \"{:<%d} {:>%d}\" % (longest_name, longest_code)\n\n for name, result in outputs:\n click.echo(format_str.format(name, result))", "def test_download_linux(salt_test_command, pkg_container, root_url, salt_release):\n res = pkg_container.container.run(salt_test_command)\n assert res.returncode == 0", "def complie_soldity():\n assert(os.path.exists(src_entry) and os.path.isfile(src_entry) )\n\n commands = [\n [SOLC, \"--optimize\", \"--bin\", \"--overwrite\", \"-o\", os.path.relpath(bin_dir), os.path.relpath(src_entry) ]\n , [SOLC, \"--optimize\", \"--ast\", \"--overwrite\", \"-o\", os.path.relpath(ast_dir), os.path.relpath(src_entry) ]\n , [SOLC, \"--optimize\", \"--abi\", \"--overwrite\", \"-o\", os.path.relpath(dst_dir), os.path.relpath(src_entry) ]\n ]\n # commands = [\n # [SOLC, \"--optimize\", \"--bin\", \"-o\", bin_dir, os.path.relpath(src_entry) ]\n # , [SOLC, \"--optimize\", \"--ast\", \"-o\", ast_dir, os.path.relpath(src_entry) ]\n # , [SOLC, \"--optimize\", \"--abi\", \"-o\", dst_dir, os.path.relpath(src_entry) ]\n # ]\n\n print(\"======================Complie Solidity Language=========================\")\n for cmd in commands:\n command = \" \".join(cmd)\n print(command)\n os.system(command)\n \n os.system(\"cp %s/* %s\" %(bin_dir, dst_dir))\n\n # result = map(lambda cmd: os.system(\" \".join(cmd)), commands )\n # print(result)", "def _setup_chromium_source(config_bundle, buildspace_downloads, buildspace_tree,\n show_progress, pruning_set):\n source_archive = buildspace_downloads / 'chromium-{}.tar.xz'.format(\n config_bundle.version.chromium_version)\n source_hashes = source_archive.with_name(source_archive.name + '.hashes')\n\n if source_archive.exists() and not source_archive.is_file():\n raise NotAFileError(source_archive)\n if source_hashes.exists() and not source_hashes.is_file():\n raise NotAFileError(source_hashes)\n\n get_logger().info('Downloading Chromium source code...')\n _download_if_needed(\n source_archive,\n _SOURCE_ARCHIVE_URL.format(config_bundle.version.chromium_version),\n show_progress)\n _download_if_needed(\n source_hashes,\n _SOURCE_HASHES_URL.format(config_bundle.version.chromium_version),\n False)\n get_logger().info('Verifying hashes...')\n with source_archive.open('rb') as file_obj:\n archive_data = file_obj.read()\n for hash_name, hash_hex in _chromium_hashes_generator(source_hashes):\n get_logger().debug('Verifying %s hash...', hash_name)\n hasher = hashlib.new(hash_name, data=archive_data)\n if not hasher.hexdigest().lower() == hash_hex.lower():\n raise HashMismatchError(source_archive)\n get_logger().info('Extracting archive...')\n _extract_tar_file(source_archive, buildspace_tree, Path(), pruning_set,\n Path('chromium-{}'.format(config_bundle.version.chromium_version)))", "def test_checkout_process(self):\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)", "def codebuild_public_build_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for projects in get_code_build_projects(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(projects,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n buildProjectName = str(projects[\"name\"])\n buildProjectArn = str(projects[\"arn\"])\n # check if the build is public\n if projects[\"projectVisibility\"] == \"PUBLIC_READ\":\n # this is a failing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{buildProjectArn}/public-access-build-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": buildProjectArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\",\n \"Sensitive Data Identifications\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"MEDIUM\"},\n \"Confidence\": 99,\n \"Title\": \"[CodeBuild.7] CodeBuild projects should not be publicly accessible\",\n \"Description\": f\"CodeBuild project {buildProjectName} is publicly accessible. When you make your project's builds available to the public, all of a project's build results, logs, and artifacts, including builds that were run when the project was private, are made available to the public. You should ensure that sensitive details are not stored within your project. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on public builds refer to the Public build projects in AWS CodeBuild section of the AWS CodeBuild User Guide\",\n \"Url\": \"https://docs.aws.amazon.com/codebuild/latest/userguide/public-builds.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Developer Tools\",\n \"AssetService\": \"AWS CodeBuild\",\n \"AssetComponent\": \"Project\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCodeBuildProject\",\n \"Id\": buildProjectArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCodeBuildProject\": {\n \"Name\": buildProjectName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n # this is a passing check\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{buildProjectArn}/public-access-build-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": buildProjectArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\",\n \"Sensitive Data Identifications\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[CodeBuild.7] CodeBuild projects should not be publicly accessible\",\n \"Description\": f\"CodeBuild project {buildProjectName} is not publicly accessible.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For more information on public builds refer to the Public build projects in AWS CodeBuild section of the AWS CodeBuild User Guide\",\n \"Url\": \"https://docs.aws.amazon.com/codebuild/latest/userguide/public-builds.html\",\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Developer Tools\",\n \"AssetService\": \"AWS CodeBuild\",\n \"AssetComponent\": \"Project\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsCodeBuildProject\",\n \"Id\": buildProjectArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsCodeBuildProject\": {\n \"Name\": buildProjectName\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def checkout(self, renderer, info):\n self._renderer = renderer\n if info.get('path') is not None:\n paths = info.path\n if not isinstance(paths, Sequence):\n paths = [paths]\n for path in paths:\n wc, todo, checkout_pkg = self._inspect_path(path)\n if checkout_pkg:\n self._checkout_package(wc.apiurl, wc.path, todo[0], info)\n else:\n wc.revert(*todo)\n if info.get('package') is not None:\n self._checkout_package(info.apiurl, info.project, info.package,\n info)\n elif info.get('project') is not None:\n self._checkout_project(info)", "def test_public_pending_exists(self):\n self.change_status(self.version_1_2_2, amo.STATUS_PENDING)\n self.change_status(self.version_1_2_0, amo.STATUS_PENDING)\n self.change_version(self.version_1_2_0, '1.2beta')\n\n version, file = self.get('1.2', self.version_int,\n self.app, self.platform)\n\n assert version == self.version_1_2_1", "async def check(self, code):\n await self.bot.reply(self.bot.check_code(code))", "def checkout_code(workdir, gituser, add_own_forks, forks, branches):\n # Compose url of main repository for the default fork\n url = REPOSITORY_URL_TEMPLATE.format(fork=PARENT_REPOSITORY_DEFAULT_FORK, repo=PARENT_REPOSITORY)\n # Check out main repository into the work directory\n cmd = 'git clone --branch {branch} {url} {dirname}'.format(url=url,\n branch=PARENT_REPOSITORY_DEFAULT_BRANCH,\n dirname=workdir)\n execute(cmd)\n # Change to parent repository (work directory)\n os.chdir(workdir)\n # Initialize the submodules\n cmd = 'git submodule update --init'\n execute(cmd)\n # For each of the submodules and the main repository,\n # add the user fork if asked to do so, and check\n # out the code from the requested fork/branch\n for repo in forks.keys():\n if repo in SUBMODULES.keys():\n subdir = os.path.join(workdir, SUBMODULES[repo])\n os.chdir(subdir)\n # Rename default repository from origin to upstream\n cmd = 'git remote rename origin upstream'\n execute(cmd)\n # Add user fork if requested\n if add_own_forks[repo]:\n remote = REPOSITORY_URL_TEMPLATE.format(fork=gituser, repo=repo)\n cmd = 'git remote add origin {remote}'.format(remote=remote)\n execute(cmd)\n # Update from remote\n cmd = 'git remote update'\n execute(cmd)\n # Checkout requested fork/branch\n if forks[repo] == gituser:\n if add_own_forks[repo]:\n remote = 'origin'\n else:\n message = 'Logic error: requested to check out branch {branch}'.format(branch=branches[repo])\n message += ' from user fork for repository {repo}, but add_own_fork is False'.format(repo=repo)\n raise Exception(message)\n elif forks[repo] == PARENT_REPOSITORY_DEFAULT_FORK:\n remote = 'upstream'\n else:\n message = 'Logic error: requested to check out branch {branch}'.format(branch=branches[repo])\n message += ' from unknown fork {fork} for repository {repo}'.format(fork=forks[repo], repo=repo)\n raise Exception(message)\n cmd = 'git checkout {remote}/{branch}'.format(remote=remote, branch=branches[repo])\n execute(cmd)\n if repo in SUBMODULES.keys():\n os.chdir(workdir)\n return", "def up(config):\n\n os.system(\"sudo apt-get install cloc --yes\")", "def deploy_code(ref=None):\n ref = ref or env.default_deploy_ref\n puts(\"Deploying %s\" % ref)\n if not files.exists(env.code_dir):\n sudo('git clone %s %s' % (env.git_url, env.code_dir))\n with cd(env.code_dir):\n sudo('git fetch && git reset --hard %s' % ref)", "def update_code_from_git():\n if not files.exists(CODE_DIR):\n with cd(HOME_DIR):\n run(\"git clone %s\" % MAIN_GITHUB_REP )\n\n with cd(CODE_DIR):\n git_pull()", "def get_obscode(self, dest_list):\n OBSCODE_SRC = \"http://www.minorplanetcenter.net/iau/lists/ObsCodes.html\"\n OBSCODE_NAME = \"OBSCODE.dat\"\n \n out_fh = None\n in_fh = None \n \n # get observatory codes from http://www.minorplanetcenter.net/iau/lists/ObsCodes.html\n obscode_file = self.get_file(OBSCODE_SRC)\n if (obscode_file == None): return MOPSUpdater.FAIL\n \n try:\n # remove the first two and last line from obscode.\n in_fh = open(obscode_file, \"r\")\n lines = in_fh.readlines()\n out_fh = open(os.path.join(self._workDir, OBSCODE_NAME), \"w\")\n out_fh.writelines(lines[2:-1])\n except Exception, e:\n self._logger.error(\"UPDATE_MOPS_DATA: %s\" % (str(e)))\n return MOPSUpdater.FAIL\n finally:\n if (in_fh): in_fh.close()\n if (out_fh): out_fh.close()\n # <-- end try\n \n try:\n # Install OBSCODE.dat in mops\n for d in dest_list:\n # Verify destination directory\n if (not os.path.exists(os.path.dirname(d))):\n self._logger.error(\"UPDATE_MOPS_DATA: The destination directory %s does not exist.\" % (os.path.dirname(d)))\n continue\n # <-- end if \n shutil.copyfile(os.path.join(self._workDir, OBSCODE_NAME),d)\n # <-- end for \n except Exception, e:\n self._logger.error(\"UPDATE_MOPS_DATA: %s\" % (str(e))) \n return MOPSUpdater.FAIL\n else:\n self._logger.info(\"UPDATE_MOPS_DATA: obscode.dat file update complete.\") \n self._logger.debug(\"UPDATE_MOPS_DATA: obscode.dat file contents\")\n self._logger.debug(\"%s\" % (lines[2:-1]))\n return MOPSUpdater.SUCCESS\n # <-- end try", "def _check_vggish_ckpt_exists():\n util.maybe_create_directory(params.VGGISH_CHECKPOINT_DIR)\n if not util.is_exists(params.VGGISH_CHECKPOINT):\n url = \"https://storage.googleapis.com/audioset/vggish_model.ckpt\"\n util.maybe_download(url, params.VGGISH_CHECKPOINT_DIR)" ]
[ "0.57098264", "0.5443898", "0.52824104", "0.5211898", "0.50929743", "0.5078513", "0.5051536", "0.50458026", "0.5009098", "0.50035506", "0.49857637", "0.49795955", "0.490863", "0.4892152", "0.4889629", "0.4881726", "0.48687592", "0.48336747", "0.4761207", "0.47558445", "0.47543064", "0.47429267", "0.47189486", "0.47159085", "0.4707934", "0.46640337", "0.46609634", "0.46394268", "0.46389657", "0.46382028", "0.4633919", "0.46335933", "0.46311754", "0.46177998", "0.4615082", "0.46128768", "0.46028897", "0.45965847", "0.4595417", "0.45817545", "0.45683455", "0.4565513", "0.4565062", "0.4560595", "0.45605746", "0.4557221", "0.45306465", "0.45294634", "0.45281795", "0.45275992", "0.45169833", "0.4512719", "0.45115605", "0.4502878", "0.4502256", "0.44924152", "0.44909698", "0.44808412", "0.44803283", "0.44771427", "0.447507", "0.4467994", "0.44607136", "0.4460301", "0.4456886", "0.4450641", "0.4447588", "0.44453764", "0.44404355", "0.44379035", "0.44341102", "0.44340765", "0.44290254", "0.44281843", "0.4425458", "0.4422401", "0.4414924", "0.44094557", "0.44027144", "0.44022563", "0.44008765", "0.43954444", "0.43810266", "0.43791488", "0.4373872", "0.43738547", "0.43713588", "0.43706098", "0.43664148", "0.43579504", "0.4353567", "0.43505207", "0.4345648", "0.43427518", "0.43414575", "0.43401107", "0.43390745", "0.43378472", "0.43299726", "0.43295023" ]
0.6543128
0
Test adding basic InputNode
def test0_init(self): print("\nTest 0: Initialization") builder = StaticBuilder() in1_name = builder.addInput(10) in1 = builder.input_nodes[in1_name] print('Node keys in builder:', list(builder.input_nodes.keys())) self.assertEqual(in1.label, 0, "The label has not been assigned correctly") self.assertEqual(builder.num_nodes, 1, "The number of nodes has not been " "assigned correctly") self.assertEqual(in1.num_declared_outputs, 0, "The number of outputs of " "the InputNode has not been assigned correctly") self.assertEqual(in1.num_declared_inputs, 0, "The number of outputs of " "the InputNode has not been assigned correctly")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testInputDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'input_desc')\n )\n\n self.assertEqual(\n None,\n self.node.input_desc\n )", "def test_addOutput(self):\n print(\"\\nTest 2: Adding OutputNode\")\n builder = StaticBuilder()\n builder.addInput(10, name=\"In\")\n builder.addInner(3, name=\"Det\")\n o_name = builder.addOutput(name=\"Out\")\n \n o1 = builder.nodes[o_name]\n print(\"\\nNode keys in builder:\", list(builder.nodes.keys()))\n print(\"This node's key:\", o_name)\n self.assertEqual(o1.label, 2, \"The label has not been assigned correctly\")\n self.assertEqual(builder.num_nodes, 3, \"The number of nodes has not been \"\n \"assigned correctly\")\n self.assertEqual(o1.num_declared_outputs, 0, \"The number of outputs of the \"\n \"OutputNode has not been assigned correctly\")\n self.assertEqual(o1.num_declared_inputs, 0, \"The number of inputs of the \"\n \"OutputNode has not been assigned correctly\")", "def _add_input(self, node_entry, idx):\n if node_entry[\"name\"] in self._params:\n self._add_params(node_entry, idx)\n else:\n node_type = node_entry[\"types\"][0]\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(node_type.dtype)]\n input = onnx.helper.make_tensor_value_info(\n node_entry[\"name\"], dtype, shape=get_node_shape(node_type)\n )\n self._mc.add_inputs([input])", "def testInputDesc(self):\n\n self.assertTrue(\n hasattr(self.node, 'input_desc')\n )\n\n self.assertEqual(\n None,\n self.node.input_desc\n )\n\n self.node.input_desc = 'Sunset with an Eizo'\n\n self.assertEqual(\n 'Sunset with an Eizo',\n self.node.input_desc\n )", "def add_input(self, var):\n raise NotImplementedError", "def addInput(self, input):\n\t\tself.config._WITH_ACTIONS = True\n\t\tself.config.ACTIONS.append((\"input\", input))", "def add_node(self, node):", "def add_input_param(self, name, ptype, default_value=NULL_VALUE): \n param_name = self._get_unique_param_name(name, NodeParam.INPUT) \n p = NodeParam(self, param_name, ptype, NodeParam.INPUT, \n default_value=default_value, user_param=self._params_created) \n self._input_params[param_name] = p\n return p", "def test_enough_inputs(self):\n n = Node('a') | Node('b')\n n.validate()", "def add_input(self, sinput):\r\n self.sinputs.append(sinput)\r\n self.variables.append(sinput.variable)", "def add_node (self, node):\n raise NotImplementedError", "def add_input(self, input, number, logid='default-log'):\n cell = self.get_cell(number, logid)\n in_element = ET.SubElement(cell, 'input')\n in_element.text = input", "def __init__(self, name, node, value=None):\n super(InputPlug, self).__init__(name, node, (OutputPlug, ))\n self.value = value\n self.is_dirty = True\n self.node.inputs[self.name] = self", "def check(self, input, node):\n assert False # Must be redefined", "def create_input_element(self, **kwargs):\r\n return None", "def input_nodes(self):\n pass", "def add_inputt(self, name='T', control=False):\n inpt = InputT(name=name)\n self.nodes[name] = inpt\n self.rc.add_node(inpt)\n if control: # control input\n if name in self.inp.keys():\n raise Exception('Input temperature already defined')\n self.inp[name] = inpt\n else: # disturbance\n if name in self.dist.keys():\n raise Exception('Input temperature already defined')\n self.dist[name] = inpt", "def add(self, inp, out):\n self.curr_node.input_frequencies[inp] += 1\n if inp not in self.curr_node.children.keys() or out not in self.curr_node.children[inp].keys():\n node = Node(out)\n self.curr_node.children[inp][out] = node\n\n self.curr_node = self.curr_node.children[inp][out]\n self.curr_node.frequency += 1", "def set_input(self, nodeVal: NodeValue) -> None:\n\n self.inputs_.append(nodeVal)", "def addInput(self, *args):\n return _libsbml.Transition_addInput(self, *args)", "def test_add_00():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [\n info(\"A\", TensorProto.FLOAT, a_shape),\n info(\"B\", TensorProto.FLOAT, b_shape),\n ]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n b = np.random.rand(*b_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a, \"B\": b}, outputs).run()", "def create_test_node():\n node = cmds.createNode(\"unknown\")\n _add_test_attrs_to_node(node)\n return node", "def test_parse_add(self):\n self.assertEqual(parse_input.parse([\"1\", \"+\", \"2\"]), 3)", "def test_add_network(self):\n pass", "def __init__(self, name, node, value=None):\n self.accepted_plugs = (OutputPlug,)\n\n super(InputPlug, self).__init__(name, node)\n self.value = value\n self.is_dirty = True\n if not isinstance(self, SubPlug):\n self.node.inputs[self.name] = self", "def add_inputq(self, name='T', control=False):\n inpq = InputQ(name=name)\n self.rc.add_node(inpq)\n self.nodes[name] = inpq\n if control: # control input\n if name in self.inp.keys():\n raise Exception('Input heat flow already defined')\n self.inp[name] = inpq\n\n else: # disturbance\n if name in self.dist.keys():\n raise Exception('Disturbance heat flow already defined')\n\n self.dist[name] = inpq", "def test_insert_node(self):\r\n myObj = DLinkedList()\r\n myObj.append(120)\r\n myObj.append(100)\r\n self.assertEqual(myObj.insert_node(Node(1000), myObj.head), [120, 1000, 100])", "def test_init_node():\n from dll import Node\n new_node = Node(5)\n assert new_node.value == 5", "def test_trie_node_init_inputs_two():\n from trie import Node\n test_case = Node(\"a\", True)\n assert test_case.end is True", "def test_create_named_input_edge(self):\n n1, n2 = Node(), Node()\n result = n1 | 'foo' * n2\n self.assertEqual(result, n2)\n self.assertEqual(n1.eout, [Edge(n1, n2, input_name='foo')])\n self.assertEqual(n2.ein, [Edge(n1, n2, input_name='foo')])", "def add_inputs(self, inputs):\n self.inputs += inputs", "def test_inputs(self):\n # Add\n txhash, txhash_len = make_cbuffer('00'*32)\n script, script_len = make_cbuffer('00')\n for args in [\n (None, txhash, txhash_len, 0, 0xffffffff, script, script_len, None, 0), # Empty tx\n (wally_tx(), None, txhash_len, 0, 0xffffffff, script, script_len, None, 0), # Empty hash\n (wally_tx(), txhash, txhash_len-1, 0, 0xffffffff, script, script_len, None, 0), # Invalid hash length\n (wally_tx(), txhash, txhash_len, 0, 0xffffffff, None, script_len, None, 0), # Empty script\n (wally_tx(), txhash, txhash_len, 0, 0xffffffff, script, 0, None, 0), # Invalid script length\n (wally_tx(), txhash, txhash_len, 0, 0xffffffff, script, script_len, None, 1), # Unsupported flags\n ]:\n self.assertEqual(WALLY_EINVAL, wally_tx_add_raw_input(*args))\n # Testing only wally_tx_add_raw_input, because it calls wally_tx_add_input\n\n # Remove\n for args in [\n (None, 0), # Invalid tx\n (wally_tx(), 0), # Remove from empty tx\n (self.tx_deserialize_hex(TX_FAKE_HEX), 1), # Invalid index\n ]:\n self.assertEqual(WALLY_EINVAL, wally_tx_remove_input(*args))\n\n # Add and then remove, then test that serialization remains the same\n wit = wally_tx_witness_stack()\n for args, expected in [\n ((self.tx_deserialize_hex(TX_FAKE_HEX), txhash, txhash_len, 0, 0xffffffff, script, script_len, wit, 0),\n None),\n ((self.tx_deserialize_hex(TX_WITNESS_HEX), txhash, txhash_len, 0, 0xffffffff, script, script_len, wit, 0),\n TX_WITNESS_HEX[:13]+utf8('2')+TX_WITNESS_HEX[14:96]+utf8('00'*36)+utf8('0100ffffffff')+TX_WITNESS_HEX[96:-8]+utf8('00')+TX_WITNESS_HEX[-8:]),\n ]:\n before = self.tx_serialize_hex(args[0])\n self.assertEqual(WALLY_OK, wally_tx_add_raw_input(*args))\n if expected:\n self.assertEqual(utf8(self.tx_serialize_hex(args[0])), expected)\n self.assertEqual(WALLY_OK, wally_tx_remove_input(byref(args[0]), args[0].num_inputs-1))\n self.assertEqual(before, self.tx_serialize_hex(args[0]))\n\n script2, script2_len = make_cbuffer('77' * 16)\n tx = self.tx_deserialize_hex(TX_FAKE_HEX)\n ret = wally_tx_add_raw_input(tx, txhash, txhash_len, 1, 0xfffffffe, script2, script2_len, wit, 0)\n self.assertEqual(ret, WALLY_OK)\n before_hex = self.tx_serialize_hex(tx)\n num_inputs = tx.num_inputs\n\n def remove_and_test(idx):\n self.assertNotEqual(before_hex, self.tx_serialize_hex(tx))\n self.assertEqual(WALLY_OK, wally_tx_remove_input(tx, idx))\n self.assertEqual(before_hex, self.tx_serialize_hex(tx))\n\n for idx in range(0, num_inputs + 1):\n ret = wally_tx_add_raw_input_at(tx, idx, txhash, txhash_len,\n 2, 0xfffffffd, script, script_len, wit, 0)\n self.assertEqual(ret, WALLY_OK)\n remove_and_test(idx)\n\n ret = wally_tx_add_raw_input_at(tx, num_inputs + 1, txhash, txhash_len,\n 2, 0xfffffffd, script, script_len, wit, 0)\n self.assertEqual(ret, WALLY_EINVAL) # Invalid index", "def set_input(self, input):\n pass", "def set_input(self, input):\n pass", "def test_creating_todo(todoApp, input):\n # Create new todo\n new_todo_input = todoApp.find_new_todo_input()\n print new_todo_input\n new_todo_input.send_keys(input, Keys.ENTER)\n\n # ASSERTION\n # Check whether the new todo exist in the todo list or not.\n todo = todoApp.find_todo(input)\n \n # Check the new todo status, it should active.\n assert todoApp.is_active_todo(todo)\n \n # Check the active todo count\n assert todoApp.count_active_todos() == '1 item left'", "def build_graph_from_input(self, input_node):\n raise NotImplementedError", "def insert_input(self, action):\n children = self.get_direct_inputs()\n # FIXME: Not sure why calling remove_input()\n # works only for first child, but we should not deal\n # with self.inputs directly (subject of change).\n self.inputs = []\n for child in children:\n action.add_input(child)\n # and node to our inputs (making it our child)\n self.add_input(action)\n return True", "def test_add_01():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (1, 1, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_add_znode(self):\n z = self.test_start_empty()\n self.test_start_one_value(z)", "def add_input(input_string, trie):\n trie.insert(input_string) # add name to Trie", "def add_input(self, action):\n # Shell we raise here?\n if action == self:\n raise TypeError(\"Can't make self an input.\")\n\n # TODO: Clean it up!\n if issubclass(action.__class__, HaAction):\n if action not in self.inputs:\n self.inputs.append(action)\n return True\n else:\n raise TypeError(\"Child %s is sublcass of %s\" % (action, type(HaAction)))\n return", "def input(self):", "def input(self):\r\n pass", "def test_add_03():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (3, 4)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def add_input(acc_r, acc_s, url_1, url_2, url_3,\n checksum_1, checksum_2, checksum_3,\n retrieval_method, my_session):\n i = Input(acc_r=acc_r, acc_s=acc_s, url_1=url_1, url_2=url_2, url_3=url_3,\n checksum_1=checksum_1, checksum_2=checksum_2, checksum_3=checksum_3,\n retrieval_method=retrieval_method)\n my_session.add(i)\n my_session.commit()\n log.info('Added 1 input', 'input.py')\n return i.id", "def _expected_inputs():", "def test_add_02():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def add_input(self, accumulator, element):\n raise NotImplementedError", "def get_node_inputs(self, node, attr=None):\n raise NotImplementedError()", "def add_input(self, name, path, type_=None):\n if not name:\n raise ValueError(\"Input name not defined\")\n self._inputs[name] = {\"name\": name, \"file\": path if isinstance(path, list) else [path], \"type\": type_}", "def __init__(self, name, node_type, items: dict = None, inputs: dict = None):\n if items is None:\n items = {}\n self.node_type = node_type\n self.name = name\n self.items = items\n self.inputs = {} if inputs is None else inputs", "def test_trie_node_init_inputs_one():\n from trie import Node\n test_case = Node(\"a\", True)\n assert test_case.lookup[\"a\"] == []", "def test_add_node(num_mutations):\n net = WeightAgnosticNetwork(10, 2, 0.5)\n for _ in range(num_mutations):\n net.mutate()\n\n num_connections_pre, num_neurons_pre, num_layers_pre = get_network_stats(net)\n net.add_node()\n assert net.get_num_connections() == num_connections_pre + 1\n assert net.num_neurons == num_neurons_pre + 1\n assert len(net.neurons_in_layer) == num_layers_pre or len(\n net.neurons_in_layer) == num_layers_pre + 1", "def __init__(self, type_=\"text\", name=\"\"):\n super().__init__(\"input\")\n self.type = type_\n self.name = name", "def test_addInner(self):\n print(\"\\nTest 1: Adding InnerNode\")\n try:\n builder = StaticBuilder()\n builder.addInput(10, name=\"In\")\n enc_name = builder.addInner(3, name=\"In\")\n except AttributeError:\n print(\"\\nCAUGHT! Trying to assign the same name to two nodes! \"\n \"AttributeError exception\\n\")\n builder = StaticBuilder()\n builder.addInput(10, name=\"In\")\n enc_name = builder.addInner(3, name=\"Det\")\n\n enc1 = builder.nodes[enc_name]\n print('\\nNode keys in builder:', list(builder.nodes.keys()))\n print(\"This node's key:\", enc_name)\n self.assertEqual(enc1.label, 1, \"The label has not been assigned correctly\")\n self.assertEqual(builder.num_nodes, 2, \"The number of nodes has not been \"\n \"assigned correctly\")\n self.assertEqual(enc1.num_declared_outputs, 0, \"The number of outputs of the \"\n \"DeterministicNode has not been assigned correctly\")\n self.assertEqual(enc1.num_declared_inputs, 0, \"The number of inputs of the \"\n \"DeterministicNode has not been assigned correctly\")", "def test_0_put(self):\n self.assertIsNotNone(save_node_info(self.node.name, self.node))", "def test_empty_node():\n try:\n Node({})\n except Exception as e:\n assert str(e) == 'input info has more than 1 entry!'\n # create node with empty connection\n try:\n node_b = Node({'A':[]})\n except Exception:\n assert False\n assert node_b.name == 'A'", "def add_special_input(self, input, number, logid='default-log'):\n cell = self.get_cell(number, logid)\n in_element = ET.SubElement(cell, 'special')\n in_element.text = input", "def _add_node(self, input_tensors, output_tensors):\n raise NotImplementedError", "def test_single_value(self, test_input, expected, sc):\n assert sc.add(test_input) == expected", "def __init__(self, name=None):\n from uuid import uuid4\n self.inputs = []\n self.uuid = uuid4()\n self.name = name\n\n # RootAction will return new instance only \n # for first node in module\n self.root = RootAction()\n self.root.add_node(self)", "def test_integration_test_add_model(\n test_input, properties, required, defaults\n):\n with patch_registry() as registry:\n swagger.add_model(test_input)\n\n assert test_input.__name__ in registry[\"models\"]\n assert \"description\" in registry[\"models\"][test_input.__name__]\n assert \"notes\" in registry[\"models\"][test_input.__name__]\n\n if \"resource_fields\" not in dir(test_input) and \"__init__\" not in dir(\n test_input\n ):\n # in py2, classes without __init__ or resource_fields defined\n # will cause issues.\n # note, no issue in PY3.\n pytest.fail(\n \"do not call without resource_fields or __init__ defined.\"\n )\n\n if \"resource_fields\" in dir(test_input):\n if hasattr(test_input, \"required\"):\n assert \"required\" in registry[\"models\"][test_input.__name__]\n elif \"__init__\" in dir(test_input):\n assert \"required\" in registry[\"models\"][test_input.__name__]\n\n assert \"properties\" in registry[\"models\"][test_input.__name__]", "def add_node(self, node):\n if node in self.nodes:\n return\n\n self.nodes_need_process.add(node)\n self.nodes.add(node)\n self.inputs.discard(node)\n self.inputs.update(\n {\n n\n for n in node.all_input_nodes\n if n.op in CALLABLE_NODE_OPS and n not in self.nodes\n }\n )", "def add(self, script, inputs, outputs):", "def test_star_input_edge(self):\n with self.assertRaises(ValidationError):\n Node('a') | '*' * Node('b')", "def add_input(self,form,prefix,**item):\n #print item\n item['name'] = prefix + item.get('name',self.autoname.next())\n if not 'value' in item:\n # no value: try to find one\n if 'choices' in item:\n item['value'] = item['choices'][0]\n # DO NOT USE A TEST if self.store: HERE\n # THAT DOES NOT SEEM TO WORK: ALWAYS RETURNS FALSE\n try:\n item['value'] = self.store[item['name']]\n except:\n pass\n\n # we should have a value now, or we can't continue!\n if not 'value' in item:\n raise ValueError,\"No value specified for item '%s'\" % item['name']\n \n if not 'itemtype' in item or item['itemtype'] is None:\n item['itemtype'] = defaultItemType(item)\n\n itemtype = item['itemtype']\n\n if type(itemtype) is str:\n if itemtype.endswith('radio') or itemtype.endswith('push'):\n if itemtype[0] in 'hv':\n item['direction'] = itemtype[0]\n item['itemtype'] = itemtype[1:]\n else:\n # default horizontal\n item['direction'] = 'h'\n \n\n if itemtype == 'slider':\n value = item['value']\n if type(value) == int:\n pass\n elif type(value) == float:\n item['itemtype'] = 'fslider'\n else:\n raise ValueError,\"Invalid value type for slider: %s\" % value\n\n item['parent'] = self\n\n field = inputAny(**item)\n self.fields.append(field)\n form.addWidget(field)", "def CreateInput(self, name=None, type=None, data=None):\n\n\n inp = self._input_registry.Create(name, type, data)\n\n self._inputs.append(inp)\n\n return inp", "def addChild(node):", "def d_input(self):\n pass", "def build_test(base_url, node, input_test = None):\n\n mytest = input_test\n if not mytest:\n mytest = Test()\n\n node = lowercase_keys(flatten_dictionaries(node)) #Clean up for easy parsing\n\n #Copy/convert input elements into appropriate form for a test object\n for configelement, configvalue in node.items():\n #Configure test using configuration elements\n if configelement == u'url':\n assert isinstance(configvalue,str) or isinstance(configvalue,unicode) or isinstance(configvalue,int)\n mytest.url = base_url + unicode(configvalue,'UTF-8').encode('ascii','ignore')\n elif configelement == u'method': #Http method, converted to uppercase string\n var = unicode(configvalue,'UTF-8').upper()\n assert var in HTTP_METHODS\n mytest.method = var\n elif configelement == u'group': #Test group\n assert isinstance(configvalue,str) or isinstance(configvalue,unicode) or isinstance(configvalue,int)\n mytest.group = unicode(configvalue,'UTF-8')\n elif configelement == u'name': #Test name\n assert isinstance(configvalue,str) or isinstance(configvalue,unicode) or isinstance(configvalue,int)\n mytest.name = unicode(configvalue,'UTF-8')\n elif configelement == u'validators':\n #TODO implement more validators: regex, file/schema match, etc\n if isinstance(configvalue, list):\n for var in configvalue:\n myquery = var.get(u'query')\n myoperator = var.get(u'operator')\n myexpected = var.get(u'expected')\n myexportas = var.get(u'export_as')\n\n # NOTE structure is checked by use of validator, do not verify attributes here\n # create validator and add to list of validators\n if mytest.validators is None:\n mytest.validators = list()\n validator = Validator()\n validator.query = myquery\n validator.expected = myexpected\n validator.operator = myoperator if myoperator is not None else validator.operator\n validator.export_as = myexportas if myexportas is not None else validator.export_as\n mytest.validators.append(validator)\n else:\n raise Exception('Misconfigured validator, requires type property')\n elif configelement == u'body': #Read request body, either as inline input or from file\n #Body is either {'file':'myFilePath'} or inline string with file contents\n if isinstance(configvalue, dict) and u'file' in lowercase_keys(configvalue):\n var = lowercase_keys(configvalue)\n assert isinstance(var[u'file'],str) or isinstance(var[u'file'],unicode)\n mytest.body = os.path.expandvars(read_file(var[u'file'])) #TODO change me to pass in a file handle, rather than reading all bodies into RAM\n elif isinstance(configvalue, str):\n mytest.body = configvalue\n else:\n # TODO add ability to handle input of directories or file lists with wildcards to test against multiple bodies\n raise Exception('Illegal input to HTTP request body: must be string or map of file -> path')\n\n elif configelement == 'headers': #HTTP headers to use, flattened to a single string-string dictionary\n mytest.headers = flatten_dictionaries(configvalue)\n elif configelement == 'expected_status': #List of accepted HTTP response codes, as integers\n expected = list()\n #If item is a single item, convert to integer and make a list of 1\n #Otherwise, assume item is a list and convert to a list of integers\n if isinstance(configvalue,list):\n for item in configvalue:\n expected.append(int(item))\n else:\n expected.append(int(configvalue))\n mytest.expected_status = expected\n elif configelement == 'stop_on_failure':\n mytest.stop_on_failure = safe_to_bool(configvalue)\n\n #Next, we adjust defaults to be reasonable, if the user does not specify them\n\n #For non-GET requests, accept additional response codes indicating success\n # (but only if not expected statuses are not explicitly specified)\n # this is per HTTP spec: http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.5\n if 'expected_status' not in node.keys():\n if mytest.method == 'POST':\n mytest.expected_status = [200,201,204]\n elif mytest.method == 'PUT':\n mytest.expected_status = [200,201,204]\n elif mytest.method == 'DELETE':\n mytest.expected_status = [200,202,204]\n\n return mytest", "def _add_input(config, input_file, folder, scale_factor, weight, nick, nick_suffix=\"\", proxy_prefix=\"\"):\n\t\tconfig.setdefault(\"files\", []).append(input_file)\n\t\tconfig.setdefault(\"folders\", []).append(folder)\n\t\tconfig.setdefault(\"scale_factors\", []).append(scale_factor)\n\t\tconfig.setdefault(\"weights\", []).append(weight)\n\t\tconfig.setdefault(\"nicks\", []).append(nick+nick_suffix)\n\t\tconfig.setdefault(\"tree_draw_options\", []).append(\"proxy\" if len(proxy_prefix)>0 else \"\")\n\t\tconfig.setdefault(\"proxy_prefixes\", []).append(proxy_prefix)\n\t\t\n\t\treturn config", "def test_init_empty_node():\n from dll import Node\n new_node = Node()\n assert new_node.value is None", "def __init__(self):\n self.inputs = {}", "def add_new_node(self):\n\n\n new_node = str(self.form.newnode_text.toPlainText())\n if not new_node:\n self.form.newnode_text.clear()\n self.show_dialog(\"Empty argument.\")\n return\n \n self.form.newnode_text.clear()\n \n if self.G.has_node(new_node):\n self.show_dialog(f\"{new_node} is already constructed.\")\n \n else:\n self.G.add_node(new_node)\n self.form.plot_canvas.plot(self.G)", "def _create_node(\n self,\n name,\n ):\n pass", "def __init__(self,name,value,*args,**kargs):\n \n kargs['text'] = '' # Force no label\n self.input = value\n InputItem.__init__(self,name,*args,**kargs)\n self.layout().insertWidget(1,self.input)", "def input(self, input):\n\n self._input = input", "def test_not_enough_inputs(self):\n n = Node('a')\n with self.assertRaises(ValidationError):\n n.validate()\n n.validate(False)", "def add(self):\n self.inp.inputs.add(self)\n self.out.outputs.add(self)", "def __init__(self):\r\n super(AppendNode, self).__init__()", "def test_form_inputs(self):\n self.assertContains(self.response, '<input', 4)\n self.assertContains(self.response, 'type=\"text\"', 1)\n self.assertContains(self.response, 'type=\"password\"', 2)", "def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]", "def test_good_node():\n node_a = Node({'A':['B','C']})\n assert node_a.name == 'A'\n assert node_a.connections == ['B','C']", "def validate(self, node):", "def install_inputs():\n dest = os.path.join(safe_dir, \"input\")\n sys.stdout.write(\"Moving directory %r to %r...\\n\" % (\"input\", dest))\n try:\n shutil.move(\"input\", dest)\n except (OSError, shutil.Error), exc:\n sys.sdterr.write(\"Failed to move %r to %r\\n\" % (\"input\", dest))\n sys.sdterr.write(\" %s\\n\" % exc)\n return 1\n undo_actions.append(restore_inputs)\n\n source = os.path.join(ref_test_data.test_data_dir, \"input\")\n sys.stdout.write(\"Copying directory %r to %r...\\n\" % (source, \"input\"))\n try:\n shutil.copytree(source, \"input\")\n except (OSError, shutil.Error), exc:\n sys.sdterr.write(\"Failed to move %r to %r\\n\" % (source, \"input\"))\n sys.sdterr.write(\" %s\\n\" % exc)\n return 1\n undo_actions.append(remove_test_input)\n\n return 0", "def test_node_instantiation(create_empty_node):\n from linked_list import Node\n assert create_empty_node.value is None", "def test_instantiate_six_nodes():\n input = [13, 42, 7, 3, 9, 99]\n six = BinaryTree(input)\n assert isinstance(six, BinaryTree)", "def test_gre_input_node(self):\n pkt = (\n Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4)\n / GRE()\n )\n\n self.pg0.add_stream(pkt)\n self.pg_start()\n # no tunnel created, gre-input not registered\n err = self.statistics.get_counter(\"/err/ip4-local/unknown_protocol\")[0]\n self.assertEqual(err, 1)\n err_count = err\n\n # create gre tunnel\n gre_if = VppGreInterface(self, self.pg0.local_ip4, \"1.1.1.2\")\n gre_if.add_vpp_config()\n\n self.pg0.add_stream(pkt)\n self.pg_start()\n # tunnel created, gre-input registered\n err = self.statistics.get_counter(\"/err/ip4-local/unknown_protocol\")[0]\n # expect no new errors\n self.assertEqual(err, err_count)", "def check(self, input, ast):\n assert False # Must be redefined", "def test_node_exists():\n assert Node", "def test_node_exists():\n assert Node", "def add_node(self, name, state):\n if self.has_node(name):\n raise ValueError('Node {} already exists'.format(name))\n self.source_net.add_node(name, attr_dict=state)", "def test_input_type_errors(self):\n\n def net_func():\n input_value = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0])\n paddle.bincount(input_value)\n\n with self.assertRaises(TypeError):\n self.run_network(net_func)", "def add_node(self, id, document, source):\n raise NotImplementedError()", "def _expected_inputs():\n return 1", "def test_add_question(self):\n model = get_model()\n note = add_question(question='Test Ques', answer='Ans', curr_model=model)\n self.assertEqual(genanki.Note, type(note))", "def get_input(self):\n pass", "def test_append_left_head_is_new_node(dq_1):\n dq_1.append_left('threve')\n assert dq_1._dll.head.data == 'threve'", "def testAppendChildBadType(self):\n self.assertRaises(\n TypeError,\n self.node.append_child,\n 'I ama a banana'\n )", "def createViewerInput():\n if 'VIEWER_INPUT' not in [node.name() for node in nuke.allNodes()]:\n for node in nuke.allNodes():\n node['selected'].setValue(False)\n nuke.createNode(\"dmpViewerInput\")\n node = nuke.toNode('VIEWER_INPUT')\n node.showControlPanel()\n node['selected'].setValue(False)\n else:\n nuke.toNode('VIEWER_INPUT').showControlPanel()\n #nuke.delete(nuke.toNode('VIEWER_INPUT'))" ]
[ "0.6525281", "0.6475737", "0.6424497", "0.6410956", "0.6354569", "0.63182724", "0.62617815", "0.62125105", "0.61966574", "0.60417753", "0.6039684", "0.60300416", "0.59842473", "0.59750694", "0.5963595", "0.59632045", "0.59500754", "0.59398043", "0.58992374", "0.58532447", "0.5806817", "0.57755876", "0.5768129", "0.5764643", "0.57644176", "0.5759815", "0.5733708", "0.5696297", "0.5690267", "0.56800103", "0.56757814", "0.56731546", "0.56672555", "0.56672555", "0.56657356", "0.5663458", "0.566199", "0.5647347", "0.5638524", "0.56382775", "0.5629824", "0.5620299", "0.5608919", "0.5608828", "0.55762106", "0.5572631", "0.55682653", "0.55678564", "0.55474657", "0.5542201", "0.5541649", "0.55349964", "0.5530941", "0.55309117", "0.55191207", "0.5513321", "0.550232", "0.54959476", "0.5488232", "0.54829556", "0.54695964", "0.54672205", "0.54636246", "0.5443809", "0.5437528", "0.5431726", "0.54260963", "0.5400113", "0.5389695", "0.5387325", "0.5383004", "0.53807044", "0.5367638", "0.53602797", "0.53530836", "0.53527594", "0.53324366", "0.5332039", "0.53296965", "0.5327484", "0.5320498", "0.53123176", "0.5302635", "0.52942735", "0.52890056", "0.5278289", "0.52711666", "0.52706516", "0.52658415", "0.5241867", "0.5241867", "0.5238238", "0.5236528", "0.5231276", "0.5230733", "0.52231055", "0.52166176", "0.5209706", "0.5204149", "0.5197603" ]
0.6362094
4
Test adding basic Deterministic InnerNode.
def test_addInner(self): print("\nTest 1: Adding InnerNode") try: builder = StaticBuilder() builder.addInput(10, name="In") enc_name = builder.addInner(3, name="In") except AttributeError: print("\nCAUGHT! Trying to assign the same name to two nodes! " "AttributeError exception\n") builder = StaticBuilder() builder.addInput(10, name="In") enc_name = builder.addInner(3, name="Det") enc1 = builder.nodes[enc_name] print('\nNode keys in builder:', list(builder.nodes.keys())) print("This node's key:", enc_name) self.assertEqual(enc1.label, 1, "The label has not been assigned correctly") self.assertEqual(builder.num_nodes, 2, "The number of nodes has not been " "assigned correctly") self.assertEqual(enc1.num_declared_outputs, 0, "The number of outputs of the " "DeterministicNode has not been assigned correctly") self.assertEqual(enc1.num_declared_inputs, 0, "The number of inputs of the " "DeterministicNode has not been assigned correctly")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_znode(self):\n z = self.test_start_empty()\n self.test_start_one_value(z)", "def test_add_new_child(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n '_convert_entity_refs',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n\n root.add_new_child('options', zapi_fakes.FAKE_INVOKE_DATA)\n\n self.assertEqual(zapi_fakes.FAKE_XML2, root.to_string())", "def test_Tree():", "def _gen_test_tree_1():\n tree = BinaryNode(5)\n tree.left = BinaryNode(5)\n return tree", "def _gen_test_tree_5():\n tree = BinaryNode(30)\n tree.right = BinaryNode(30)\n return tree", "def test_add_node_with_children(self):\n root = netapp_api.NaElement('root')\n self.mock_object(netapp_api.NaElement,\n 'create_node_with_children',\n return_value=zapi_fakes.FAKE_INVOKE_DATA)\n mock_invoke = self.mock_object(root, 'add_child_elem')\n\n root.add_node_with_children('options')\n\n mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA)", "def test_add_network(self):\n pass", "def create_test_node():\n node = cmds.createNode(\"unknown\")\n _add_test_attrs_to_node(node)\n return node", "def _gen_test_tree_6():\n tree = BinaryNode(20)\n tree.left = BinaryNode(10)\n tree.right = BinaryNode(30)\n tree.left.right = BinaryNode(25)\n return tree", "def test_tree_two_nodes_right(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1", "def test_instantiate_leaf_node(self):\n try:\n LeafNode('my_label')\n except Exception:\n message = \"LeafNode instantiation failed\"\n self.fail(message)", "def test_insert_node(self):\r\n myObj = DLinkedList()\r\n myObj.append(120)\r\n myObj.append(100)\r\n self.assertEqual(myObj.insert_node(Node(1000), myObj.head), [120, 1000, 100])", "def testAppendChildDecision(self):\n self.node.append_child(self.color_decisions[0])\n\n self.assertEqual(\n [self.color_decisions[0]],\n self.node.color_decisions\n )", "def add_node(self, node):", "def test_init_node():\n from dll import Node\n new_node = Node(5)\n assert new_node.value == 5", "def test_create_node_with_children(self):\n root = netapp_api.NaElement('root')\n self.mock_object(root, 'add_new_child', return_value='abc')\n\n result_xml = str(root.create_node_with_children(\n 'options', test1=zapi_fakes.FAKE_XML_STR,\n test2=zapi_fakes.FAKE_XML_STR))\n\n # No ordering is guaranteed for elements in this XML.\n self.assertTrue(result_xml.startswith(\"<options>\"), result_xml)\n self.assertIn(\"<test1>abc</test1>\", result_xml)\n self.assertIn(\"<test2>abc</test2>\", result_xml)\n self.assertTrue(result_xml.rstrip().endswith(\"</options>\"), result_xml)", "def test_tree_binary_tree() -> None:\n t = generate_binary_tree_resources(4, 3)\n field(t, (\"root\", \"ds\", \"f1\")).identity = \"email\"\n field(t, (\"root.0.1.0\", \"ds.0.1.0\", \"f1\")).identity = \"ssn\"\n field(t, (\"root.1.1\", \"ds.1.1\", \"f1\")).identity = \"user_id\"\n assert generate_traversal({\"email\": \"X\"}, *t)\n assert generate_traversal({\"ssn\": \"X\"}, *t)\n assert generate_traversal({\"user_id\": \"X\"}, *t)", "def test_append_left_head_is_new_node(dq_1):\n dq_1.append_left('threve')\n assert dq_1._dll.head.data == 'threve'", "def test_add_node(num_mutations):\n net = WeightAgnosticNetwork(10, 2, 0.5)\n for _ in range(num_mutations):\n net.mutate()\n\n num_connections_pre, num_neurons_pre, num_layers_pre = get_network_stats(net)\n net.add_node()\n assert net.get_num_connections() == num_connections_pre + 1\n assert net.num_neurons == num_neurons_pre + 1\n assert len(net.neurons_in_layer) == num_layers_pre or len(\n net.neurons_in_layer) == num_layers_pre + 1", "def _gen_test_tree_2():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.left.left = BinaryNode(1)\n tree.left.right = BinaryNode(4)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n tree.right.right.right = BinaryNode(9)\n return tree", "def test_dummy():\n dummyblock = DummyBlockNode(\n name=\"None\",\n parameters=(),\n ancestor=None,\n dirty=False,\n filepath=\"/some/random/path\"\n )\n dummydirective = DummyDirectiveNode(\n name=\"Name\",\n ancestor=None,\n filepath=\"/another/path\"\n )\n dummycomment = DummyCommentNode(\n comment=\"Comment\",\n ancestor=dummyblock,\n filepath=\"/some/file\"\n )", "def test_tree_two_nodes_left_has_depth_one(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1", "def addChild(node):", "def test_add():\n # Test for addition with scalar Rnode object and float value\n x = Rnode(0.11)\n z = x**2 + x\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value **2 + x.value\n assert x.grad() == sum(weight * var.grad()\n for weight, var in x.children)\n except AssertionError as e:\n print(e)", "def testAppendChildren(self):\n self.node.append_children(\n self.color_corrections + self.color_decisions\n )\n\n self.assertEqual(\n self.color_corrections,\n self.node.color_corrections\n )\n\n self.assertEqual(\n self.color_decisions,\n self.node.color_decisions\n )", "def _gen_test_tree_4():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(10)\n tree.right = BinaryNode(9)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n return tree", "def test_node_neighbors(graph_no_edges):\n graph_no_edges.add_edge('BB', 82, 5)\n assert graph_no_edges.neighbors('BB') == {82: 5}", "def test_imbalanced_addition(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n failure_callback = False\n handler = self.new_handler()\n new_vals = [randint(1, POINT_CAP) for _ in range(randint(HEIGHT[0], HEIGHT[1]))]\n for val in new_vals:\n handler.addNewNode(val, b=False)\n true_bal = check_balance(handler.root)\n if handler.balanced is not true_bal:\n failures += 1\n failure_callback = True\n break\n\n if failure_callback:\n break\n state = handler.get_gamestate()\n for val in new_vals:\n if val not in state['node_points'].values():\n failures += 1\n break\n\n successes += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly add new nodes (non-balancing addition) ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated adding nodes in non-balancing mode in {successes} trees.{BColors.ENDC}\")", "def test_node_instantiation(create_empty_node):\n from linked_list import Node\n assert create_empty_node.value is None", "def test_create_trienode():\n node = TrieNode(\"h\")\n assert node.value == \"h\"\n assert node.children == {}", "def test_addOutput(self):\n print(\"\\nTest 2: Adding OutputNode\")\n builder = StaticBuilder()\n builder.addInput(10, name=\"In\")\n builder.addInner(3, name=\"Det\")\n o_name = builder.addOutput(name=\"Out\")\n \n o1 = builder.nodes[o_name]\n print(\"\\nNode keys in builder:\", list(builder.nodes.keys()))\n print(\"This node's key:\", o_name)\n self.assertEqual(o1.label, 2, \"The label has not been assigned correctly\")\n self.assertEqual(builder.num_nodes, 3, \"The number of nodes has not been \"\n \"assigned correctly\")\n self.assertEqual(o1.num_declared_outputs, 0, \"The number of outputs of the \"\n \"OutputNode has not been assigned correctly\")\n self.assertEqual(o1.num_declared_inputs, 0, \"The number of inputs of the \"\n \"OutputNode has not been assigned correctly\")", "def test_add_nested_class(self):\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})", "def test_artefact_add() -> None:\n options()\n server = MockServer()\n db, store = server.new_connection()\n\n a = _graph.constant_artefact(db, store, b\"bla bla\")\n b = _graph.Artefact[bytes].grab(db, a.hash)\n c = _graph.get_data(db, store, a)\n assert b is not None\n assert a == b\n assert c == b\"bla bla\"", "def test_node_exists():\n assert Node", "def test_node_exists():\n assert Node", "def test01(self):\n\n t = tree(\"a\", [tree(\"b\"), tree(\"c\")]);\n self.assertTrue(self.isTree(t))", "def test_insert_node_singleton_content_2():\n first = 0\n second = first\n chain = N.Node(second)\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def test_insert_node_singleton_content_1():\n first = 0\n second = 1\n chain = N.Node(second)\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def test_graph_adds_nodes_and_edges(graph_no_edges):\n graph_no_edges.add_edge('Louisiana Crawfish', 'WA Invasive Species', 3)\n assert graph_no_edges.edges() == [(\n 'Louisiana Crawfish', 'WA Invasive Species', 3)]", "def test_insert_node_singleton_content_3():\n first = 0\n second = 1\n chain = N.Node(first)\n node = N.Node(second)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and singleton chain\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and singleton chain\"", "def test_balanced_addition(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n failure_callback = False\n handler = self.new_handler(balance=True)\n new_vals = [randint(1, POINT_CAP) for _ in range(randint(HEIGHT[0], HEIGHT[1]))]\n for val in new_vals:\n handler.addNewNode(val, b=True)\n if handler.balanced is False:\n handler.debug_wrapper()\n failures += 1\n failure_callback = True\n break\n\n if failure_callback:\n break\n state = handler.get_gamestate()\n for val in new_vals:\n if val not in state['node_points'].values():\n failures += 1\n break\n\n successes += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly add new nodes (balancing addition) ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(\n f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated adding nodes in balancing mode in {successes} trees.{BColors.ENDC}\")", "def test_tree_2_nodes_left_unbalanced(one_t):\n one_t.insert(9)\n assert one_t.balance() == 1", "def _gen_test_tree_3():\n tree = BinaryNode(5)\n tree.left = BinaryNode(1)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(3)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(8)\n tree.right.right = BinaryNode(9)\n return tree", "def test02(self):\n\n t = tree(\"a\", [tree(\"b\"), tree(\"c\")]);\n self.assertEqual(len(t), 3)", "def test_good_node():\n node_a = Node({'A':['B','C']})\n assert node_a.name == 'A'\n assert node_a.connections == ['B','C']", "def test_get_related_nodes(self):\n pass", "def test_init_empty_node():\n from dll import Node\n new_node = Node()\n assert new_node.value is None", "def set_numerical_test_node(self, feature_id, opname, threshold,\n default_left, left_child_key, right_child_key):\n if not self.empty:\n try:\n node_key = self.node_key\n except AttributeError:\n node_key = '_'\n raise ValueError(\n 'Cannot modify a non-empty node. ' + \\\n 'If you meant to change type of node {}, '.format(node_key) + \\\n 'delete it first and then add an empty node with ' + \\\n 'the same key.')\n try:\n # automatically create child nodes that don't exist yet\n if left_child_key not in self.tree:\n self.tree[left_child_key] = ModelBuilder.Node()\n if right_child_key not in self.tree:\n self.tree[right_child_key] = ModelBuilder.Node()\n _check_call(_LIB.TreeliteTreeBuilderSetNumericalTestNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n ctypes.c_uint(feature_id), c_str(opname),\n ctypes.c_double(threshold),\n ctypes.c_int(1 if default_left else 0),\n ctypes.c_int(left_child_key),\n ctypes.c_int(right_child_key)))\n self.empty = False\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a test node')", "def test_tree_2_nodes_right_unbalanced(one_t):\n one_t.insert(11)\n assert one_t.balance() == -1", "def add_node (self, node):\n raise NotImplementedError", "def mutate_add_edge(child):\n # TODO: can maybe be improved by sparqling\n nodes = list(child.nodes)\n if len(nodes) < 2:\n return child\n node1, node2 = random.sample(nodes, 2)\n var_edge = gen_random_var()\n new_triple = (node1, var_edge, node2)\n return child + (new_triple,)", "def test_node_operation_integers():\n # Checking the Conditions on here\n assert o.delete_node(-1) == \"No nodes are delete purpose\"\n assert o.display_node() == \"we don't have any nodes yet for Display Purpose\"\n assert o.create_node(10,-1) == 10\n assert o.create_node(20,-1) == 20\n assert o.create_node(30,-1) == 30\n assert o.create_node(40,-1) == 40\n assert o.create_node(5,0) == 5\n assert o.create_node(0,0) == 0\n assert o.display_node() == [0,5,10,20,30,40]\n assert o.delete_node(-1) == True \n assert o.display_node() == [0,5,10,20,30]\n assert o.delete_node(0) == True\n assert o.display_node() == [5,10,20,30]\n assert o.create_node(50,8) == \" you can assign the 0 or -1th position \"# int type of Value\n assert o.create_node(44,-6) == \" you can assign the 0 or -1th position \"# int type of Value\n assert o.display_node() == [5,10,20,30]", "def __init__(self):\n self.root = SimpleNode()", "def test_only_nodes(neuron_instance):\n\n h, rxd, data, save_path = neuron_instance\n\n dend = h.Section(\"dend\")\n r = rxd.Region(h.allsec())\n hydrogen = rxd.Species(r, initial=1)\n water = rxd.Species(r, initial=1)\n\n h.finitialize(-65)\n\n nodelist = hydrogen.nodes\n\n # test that should not work, so an append that succeeds is an error\n\n try:\n nodelist.append(water.nodes) # append nodelist\n raise Exception(\"should not get here\")\n except TypeError:\n ...\n\n try:\n nodelist.extend([1, 2, 3, water.nodes[0]]) # extend with non-nodes\n raise Exception(\"should not get here\")\n except TypeError:\n ...\n\n try:\n nodelist[0] = 17\n raise Exception(\"should not get here\")\n except TypeError:\n ...\n\n try:\n nl = rxd.nodelist.NodeList(\n [1, 2, 3, water.nodes[0]]\n ) # create NodeList with non-nodes\n raise Exception(\"should not get here\")\n except TypeError:\n ...\n\n try:\n nodelist.insert(1, \"llama\") # insert non-node into nodelist\n raise Exception(\"should not get here\")\n except TypeError:\n ...\n\n # test that should work, so getting in the except is an error\n try:\n nodelist.append(water.nodes[0]) # append node\n except TypeError:\n raise Exception(\"should not get here\")\n\n try:\n original_length = len(nodelist) # extend nodes\n nodelist.extend(item for item in water.nodes)\n assert len(nodelist) == original_length + len(water.nodes)\n except TypeError:\n raise Exception(\"should not get here\")\n\n try:\n nodelist[0] = water.nodes[0]\n except TypeError:\n raise Exception(\"should not get here\")\n\n try:\n nl = rxd.nodelist.NodeList(\n [water.nodes[0], water.nodes[0]]\n ) # create nodelist with nodes\n except TypeError:\n raise Exception(\"should not get here\")\n\n try:\n nodelist.insert(1, water.nodes[0]) # insert node into nodelist\n except TypeError:\n raise Exception(\"should not get here\")\n\n try:\n nl = rxd.nodelist.NodeList([]) # create empty nodelist\n except TypeError:\n raise Exception(\"should not get here\")\n\n try:\n nl = rxd.nodelist.NodeList(\n item for item in [water.nodes[0], water.nodes[0]]\n ) # create nodelist with nodes generator\n assert len(nl) == 2\n except TypeError:\n raise Exception(\"should not get here\")", "def test_root_value(small_tree):\n assert small_tree.root.value == 3", "def test_setter_na_element(self):\n root = netapp_api.NaElement('root')\n root['e1'] = netapp_api.NaElement('nested')\n self.assertEqual(len(root.get_children()), 1)\n e1 = root.get_child_by_name('e1')\n self.assertIsInstance(e1, netapp_api.NaElement)\n self.assertIsInstance(e1.get_child_by_name('nested'),\n netapp_api.NaElement)", "def test_insert_node_multiple_content_2():\n first = 0\n second = 1\n third = 3\n chain = N.Node(first, N.Node(third))\n node = N.Node(second)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at mid)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at middle)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at middle)\"", "def test_setter_na_element(self):\n root = netapp_api.NaElement('root')\n root['e1'] = netapp_api.NaElement('nested')\n self.assertEqual(1, len(root.get_children()))\n e1 = root.get_child_by_name('e1')\n self.assertIsInstance(e1, netapp_api.NaElement)\n self.assertIsInstance(e1.get_child_by_name('nested'),\n netapp_api.NaElement)", "def test_small_tree_has_right_child_child(small_tree):\n assert small_tree.root.right.right.value == 27", "def create_test_node_tag(**kw):\n tag = get_test_node_tag(**kw)\n dbapi = db_api.get_instance()\n return dbapi.add_node_tag(tag['node_id'], tag['tag'])", "def test_create_edge(self):\n n1, n2 = Node('a'), Node('b')\n n1 | n2\n self.assertEqual(n1.eout, [Edge(n1, n2)])\n self.assertEqual(n1.ein, [])\n self.assertEqual(n2.ein, [Edge(n1, n2)])\n self.assertEqual(n2.eout, [])", "def test_minimal_tree_creation():\n t = Tree(None)\n\n assert t.data is None\n assert t.parent is None\n assert len(t) == 0", "def testAppendAdditional(self):\n\n self.node.desc = 'first description'\n\n self.assertEqual(\n ['first description', ],\n self.node.desc\n )\n\n self.node.desc = 'second description'\n\n self.assertEqual(\n ['first description', 'second description'],\n self.node.desc\n )", "def test_binarytree_post_order_exists():\n assert BinaryTree.post_order", "def create_nodes(self):", "def test_node_builder(patch, os_info, dummy_server):\n NOVA.servers.find = mock.MagicMock(return_value=dummy_server)\n nb = NodeBuilder(CONFIG, os_info)\n nodes = nb.get_nodes()\n list(map(lambda x: setattr(x, \"exists\", False), nodes))\n assert isinstance(nodes[0], koris.cloud.openstack.Instance)\n assert nodes[0].name == 'node-1-test'\n\n certs = create_certs(CONFIG, ['node-1-test'], ['192.168.1.103'],\n write=False)\n\n lb_ip = '212.58.134.78'\n node_tasks = nb.create_initial_nodes(CloudConfig(), certs['ca'], lb_ip,\n \"6443\",\n \"123456.abcdefg12345678\",\n \"discovery_hash\",\n )\n\n coro_server_create = node_tasks[1]\n\n call_args = coro_server_create.get_stack()[0].f_locals\n # we go a long way to check that nb.creat_node_tasks\n # will create a future with the correct user data\n assert call_args['keypair'] == 'otiram'\n assert call_args['self'].name == 'node-1-test'\n assert isinstance(call_args['flavor'], Flavor)", "def test_binarytree_exists():\n assert BinaryTree", "def test_left_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(5)\n assert empty_t.root.left\n assert not empty_t.root.right", "def test04(self):\n\n s = \"a;\"\n t = parse_newick(s);\n self.assertTrue(self.isTree(t) and t.label == \"a\" and t.isLeaf())", "def test_node_except_integer():\n # Checking the Conditions on here\n assert o.create_node(50.0,1) == False# float type of Value\n assert o.create_node(\"bha\",8) == False # string type of Value\n assert o.create_node([1,2,3],0) == False# list type of Value\n assert o.create_node((1,2,3),-1) == False # tuple type of Value\n assert o.create_node({'x':1,'y':2},-5) ==False # dict type of Value\n assert o.create_node({1,2,3},0) == False # set type of Value\n assert o.create_node(True,1) == False # bool type of Value\n assert o.display_node() == [5,10,20,30] # display result\n assert o.delete_node(0) == True \n assert o.display_node() == [10,20,30] # display result\n assert o.delete_node(-1) == True \n assert o.display_node() == [10,20] # display result", "def test_insert_node_multiple_content_1():\n first = 0\n second = 1\n third = 3\n chain = N.Node(second, N.Node(third))\n node = N.Node(first)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at start)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at start)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at start)\"", "def test_add_01():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (1, 1, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_tree_with_one_node_has_correct_value(one_t):\n assert one_t.root.value == 10", "def test_add_to_graph():\n node_list = []\n node_list.append(Node({'A':['B','C']}))\n node_list.append(Node({'B':['C','D']}))\n node_list.append(Node({'C':['D']}))\n node_list.append(Node({'D':['C']}))\n g = Graph()\n for node in node_list:\n g.add(node)\n assert len(g.nodes) == len(node_list)", "def test_graph(self):\n with Graph('g') as graph:\n a = Node('a')\n self.assertEqual(graph.nodes, [a])", "def test_instantiate_branch_node(self):\n try:\n BranchNode('my_name')\n except Exception:\n message = \"BranchNode instantiation failed\"\n self.fail(message)", "def test_right_sided_tree_with_two_nodes_root_has_child(empty_t):\n empty_t.insert(10)\n empty_t.insert(15)\n assert empty_t.root.right\n assert not empty_t.root.left", "def _dummy_node(self) -> CFNode:\n node = CFNode()\n self._graph.add_node(node)\n return node", "def add_node(graph, node, parent, label):\n neg = node['neg']\n pos = node['pos']\n total = str(neg + pos)\n neg = str(neg)\n pos = str(pos)\n samples_info = total + ' samples\\n' + neg + ' of class 0, ' + pos + ' of class 1'\n if 'final_class' in node:\n legend = str(node['id']) + '. final class is ' + str(node['final_class'])\n new_node = pydot.Node(legend)\n else:\n legend = str(node['id']) + '. ' + node['split_attr'] + \\\n ' < ' + str(node['split_value']) + '\\n' + samples_info\n new_node = pydot.Node(legend)\n graph.add_node(new_node)\n if parent:\n graph.add_edge(pydot.Edge(parent, new_node, label=str(label),labelfontcolor=\"#009933\", fontsize=\"10.0\", color=\"blue\"))\n if 'left_child' in node:\n add_node(graph, node['left_child'], new_node, True)\n if 'right_child' in node:\n add_node(graph, node['right_child'], new_node, False)", "def test_add_02():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_topo_add_herbviore():\n instance = topo.Topography()\n instance.add_animal(animals.Herbivores())\n assert len(instance.herbivore_list) == 1", "def test_add_03():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (3, 4)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_add_00():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [\n info(\"A\", TensorProto.FLOAT, a_shape),\n info(\"B\", TensorProto.FLOAT, b_shape),\n ]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n b = np.random.rand(*b_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a, \"B\": b}, outputs).run()", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 2)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 4)\n self.small_tree.add_edge(1, 4)\n self.small_tree.add_edge(2, 4)\n self.small_tree.add_edge(4, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(1, 2)\n self.deterministic_graph.add_edge(1, 3)\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(2, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(3, 6)\n self.deterministic_graph.add_edge(5, 6)", "def test_add(self):\n a = Vector(1, 2)\n b = Vector(3, 4)\n c = a + b\n assert c.x == 4\n assert c.y == 6", "def test_empty_node():\n try:\n Node({})\n except Exception as e:\n assert str(e) == 'input info has more than 1 entry!'\n # create node with empty connection\n try:\n node_b = Node({'A':[]})\n except Exception:\n assert False\n assert node_b.name == 'A'", "def setUp(self):\r\n\r\n self.tree = DndParser(StringIO(TEST_TREE), constructor=PhyloNode)", "def add_node(self, node) -> None:\n\t\tnode.nested = True\n\t\tsuper(Node, self).add_node(node)", "def test_insert_node_multiple_content_3():\n first = 0\n second = 1\n third = 3\n chain = N.Node(first, N.Node(second))\n node = N.Node(third)\n\n result = A8.insert_node(node, chain)\n\n assert result.data == first, \"insert_node returned incorrect data value first given a node and chain length 2 (insert at end)\"\n assert result.next.data == second, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at end)\"\n assert result.next.next.data == third, \"insert_node returned incorrect data value second given a node and chain length 2 (insert at end)\"", "def inner_test():\n pass", "def inner_test():\n pass", "def test_tree_one_node_has_depth_zero(one_t):\n assert one_t.depth() == 0", "def testInit(self):\n\n self.assertEqual(\n [],\n self.node.desc\n )", "def test_edge_cases(doctest):", "def test_trie_node_init_no_values_two():\n from trie import Node\n test_case = Node()\n assert test_case.end is False", "def test_create_cluster_network(self):\n pass", "def test_insert_node_multiple_structure_2():\n chain = N.Node(1, N.Node(3))\n node = N.Node(2)\n\n result = A8.insert_node(node, chain)\n\n assert result is not None, \"insert_node returned empty chain given a node and chain length 2 (insert between)\"\n assert result.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert between)\"\n assert result.next.next is not None, \"insert_node returned short chain given a node and chain length 2 (insert between)\"\n assert result.next.next.next is None, \"insert_node returned badly formed chain given a node and chain length 2 (insert between)\"", "def test_bst_root(bst_ten_values_random):\n assert bst_ten_values_random.root.val == 5", "def test_small_tree_has_root_value(small_tree):\n assert small_tree.root.right.value == 11", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)" ]
[ "0.634626", "0.61077243", "0.60818183", "0.6013245", "0.5986549", "0.597251", "0.595647", "0.58965737", "0.5876589", "0.5845326", "0.5809255", "0.5755622", "0.5737071", "0.5720968", "0.5700372", "0.5690431", "0.56798387", "0.56767136", "0.56564647", "0.5654355", "0.56480926", "0.564055", "0.5635012", "0.56203693", "0.5614329", "0.559403", "0.5586549", "0.55817217", "0.55782926", "0.5563603", "0.55548126", "0.5545397", "0.5538528", "0.55311334", "0.55311334", "0.5518234", "0.551499", "0.549735", "0.5496943", "0.54769564", "0.5472111", "0.54689425", "0.54601103", "0.5455874", "0.5447568", "0.54400545", "0.5422628", "0.5420975", "0.54074025", "0.53959554", "0.5382332", "0.5377653", "0.53637385", "0.5360634", "0.5352539", "0.53478205", "0.53358215", "0.53351265", "0.53302526", "0.53302115", "0.53242207", "0.5321832", "0.5320564", "0.53139764", "0.5310955", "0.5305383", "0.52985805", "0.52970684", "0.5290114", "0.5284411", "0.528104", "0.52805775", "0.52708256", "0.5269764", "0.5269451", "0.5259601", "0.52544767", "0.5240662", "0.52406347", "0.52377814", "0.52365637", "0.5231766", "0.5227115", "0.52154416", "0.52116585", "0.52053124", "0.51979077", "0.51944965", "0.51928145", "0.51855093", "0.51855093", "0.51838946", "0.51825887", "0.5176513", "0.51725984", "0.51720965", "0.51714164", "0.51714116", "0.51707023", "0.5169623" ]
0.751408
0
Test adding basic OutputNode
def test_addOutput(self): print("\nTest 2: Adding OutputNode") builder = StaticBuilder() builder.addInput(10, name="In") builder.addInner(3, name="Det") o_name = builder.addOutput(name="Out") o1 = builder.nodes[o_name] print("\nNode keys in builder:", list(builder.nodes.keys())) print("This node's key:", o_name) self.assertEqual(o1.label, 2, "The label has not been assigned correctly") self.assertEqual(builder.num_nodes, 3, "The number of nodes has not been " "assigned correctly") self.assertEqual(o1.num_declared_outputs, 0, "The number of outputs of the " "OutputNode has not been assigned correctly") self.assertEqual(o1.num_declared_inputs, 0, "The number of inputs of the " "OutputNode has not been assigned correctly")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_node_outputs(self):\n pass", "def addOutputsNode():\n return render_template(\"addOutputsNode.html\")", "def testNewOutputModule(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n output_module = manager.OutputManager.NewOutputModule('test_output')\n self.assertIsInstance(output_module, TestOutput)\n\n with self.assertRaises(ValueError):\n manager.OutputManager.NewOutputModule(1)\n\n with self.assertRaises(KeyError):\n manager.OutputManager.NewOutputModule('bogus')\n\n manager.OutputManager.DeregisterOutput(TestOutput)", "def add_output_ops(self, graph, output):\n return output", "def test_output(self):\n new_route = self.route.output(\"test data\", transform=\"transformed\")\n assert new_route != self.route\n assert new_route.route[\"output\"] == \"test data\"\n assert new_route.route[\"transform\"] == \"transformed\"", "def write_output(self):", "def writeOutput(self, output):", "def _generate_output(self):\n raise NotImplementedError()", "def testHasOutputClass(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n self.assertTrue(manager.OutputManager.HasOutputClass('test_output'))\n self.assertFalse(manager.OutputManager.HasOutputClass('bogus'))\n self.assertFalse(manager.OutputManager.HasOutputClass(1))\n\n manager.OutputManager.DeregisterOutput(TestOutput)", "def test_node_write_to_output_buffer(graph):\n a = sf.Node()\n graph.render_subgraph(a)\n assert a.output_buffer[0][3] == 0.0\n a.output_buffer[0][3] = 1.0\n assert a.output_buffer[0][3] == 1.0\n\n #--------------------------------------------------------------------------------\n # Why is the output buffer of length 256 (SIGNALFLOW_DEFAULT_BLOCK_SIZE)\n # rather than 2048 (SIGNALFLOW_NODE_BUFFER_SIZE)? Because the output buffer's\n # length is reported by the Python bindings as `last_num_frames`.\n # Whether this is a good idea is open to debate.\n #\n # Better would be to have a precise and rigorous block size throughout, which\n # would mean adding a block buffer between the audio I/O and the Graph.\n #--------------------------------------------------------------------------------\n assert a.output_buffer.shape == (32, 256)\n a.output_buffer[31][255] = 1.0\n assert a.output_buffer[31][255] == 1.0\n with pytest.raises(IndexError):\n a.output_buffer[32][255] == 1.0\n with pytest.raises(IndexError):\n a.output_buffer[31][256] == 1.0", "def test_create_named_output_edge(self):\n n1, n2 = Node('a'), Node('b')\n result = n1 * 'foo' | n2\n self.assertEqual(result, n2)\n self.assertEqual(n1.eout, [Edge(n1, n2, 'foo')])\n self.assertEqual(n1.ein, [])\n self.assertEqual(n2.ein, [Edge(n1, n2, 'foo')])\n self.assertEqual(n2.eout, [])", "def testGetOutput(self):\n #f = open(\"src_output.root\", 'w')\n #f.close()\n\n #1) missing required -d option (the other required option, -r, is ignored)\n go = getoutput(self.logger, self.maplistopt)\n res = go()\n expRes = CommandResult(2001, 'ERROR: Task option is required')\n self.assertEquals(expRes, res)\n\n #2) -d option is present but -r is missing\n analysisDir = self.reqarea\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir])\n res = go()\n expRes = CommandResult(2002, 'ERROR: Range option is required')\n self.assertEquals(expRes, res)\n\n #3) request passed with the -d option does not exist\n #res = go([\"-d\", analysisDir + \"asdf\"])\n #TODO we expect an appropriate answer from the server.\n #By now, the server just answer an empty list\n\n #4) check correct behaviour without specifying output directory\n #N.B.: -p options is required for tests to skip proxy creation and delegation\n destDir = os.path.join(analysisDir, 'results')\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir(destDir))\n self.assertTrue(os.path.isfile(os.path.join(destDir, '1.root')))\n #Remove the directory\n shutil.rmtree(destDir)\n self.assertFalse(os.path.isdir(destDir))\n self.assertEquals(expRes, res)\n\n #5) correct behavior and output directory specified which exists\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"/tmp\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('/tmp'))\n destFile = os.path.join('/tmp', '1.root')\n self.assertTrue(os.path.isfile(destFile))\n os.remove(destFile)\n self.assertFalse(os.path.isfile(destFile))\n self.assertEquals(expRes, res)\n\n #6) correct behavior and output directory specified which does not exists\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"/tmp/asdf/qwerty\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('/tmp/asdf/qwerty'))\n #Remove the directory\n shutil.rmtree('/tmp/asdf/qwerty')\n self.assertEquals(expRes, res)\n\n #7) correct behavior and output directory specified which does not exists (relative path)\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"qwerty\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('qwerty'))\n #Remove the directory\n shutil.rmtree('qwerty')\n self.assertEquals(expRes, res)", "def create_test_node():\n node = cmds.createNode(\"unknown\")\n _add_test_attrs_to_node(node)\n return node", "def add_output(self, output, number, logid='default-log'):\n cell = self.get_cell(number, logid)\n out_element = ET.SubElement(cell, 'output')\n out_element.text = output", "def _add_output(self, node_entries):\n\n for node_entry in node_entries:\n for node_type, output_name in zip(node_entry[\"types\"], node_entry[\"output_names\"]):\n dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy.dtype(node_type.dtype)]\n output = onnx.helper.make_tensor_value_info(\n output_name, dtype, shape=get_node_shape(node_type)\n )\n self._mc.add_outputs([output])", "def save_output_node(out):\n out_wc = out.clone()\n return out_wc", "def test_5(self):\r\n r1, r2, r3, r4 = MyVariable(1), MyVariable(2), MyVariable(3), MyVariable(4)\r\n o0 = MyOp.make_node(r1, r2)\r\n o1 = MyOp.make_node(o0.outputs[0], r4)\r\n all = io_toposort([], o0.outputs)\r\n assert all == [o0]", "def test_set_output_implicitly(self):\n self.command.output = \"\"\n self.command.package = self.input_ovf\n self.assertEqual(self.command.output, \"\")\n self.command.run()\n self.assertEqual(self.command.output, self.input_ovf)", "def test_debug_output(self):\n assert output(self.msg) is not None", "def add_output_param(self, name, ptype, default_value=NULL_VALUE): \n param_name = self._get_unique_param_name(name, NodeParam.OUTPUT)\n p = NodeParam(self, param_name, ptype, NodeParam.OUTPUT, \n default_value=default_value, user_param=self._params_created) \n self._output_params[param_name] = p\n return p", "def __init__(self, name, node):\n super(OutputPlug, self).__init__(name, node, (InputPlug, ))\n self.node.outputs[self.name] = self", "def output(self):\r\n self.logic ( )\r\n return self.output", "def _create_outputs(self) -> ComponentOutputs:\n raise NotImplementedError", "def handle_output(self, workunit, label, s):\r\n pass", "def handle_output(self, workunit, label, s):\r\n pass", "def addOutput(self, *args):\n return _libsbml.Transition_addOutput(self, *args)", "def test_default_output(self):\n env = pike.Environment()\n output = pike.Graph('output')\n output.sink = pike.noop()\n with patch.object(output, 'run') as run:\n run.return_value = []\n env.set_default_output(output)\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n env.add(graph)\n env.run_all()\n run.assert_called_with([])", "def add_node(self, node):", "def test_add_znode(self):\n z = self.test_start_empty()\n self.test_start_one_value(z)", "def test_as_output(self):\n self.assertEqual(render('{% output_as 1 %}-{{ out }}'), '1-')\n self.assertEqual(render('{% output_as 1 as out %}-{{ out }}'),\n 'yes_as-1')", "def _populate_output(self):\n pass", "def add_output(self, result):\n if not isinstance(result, six.integer_types):\n raise TypeError('add_output must be called with an integer '\n '(LoomResult id.) Did you forget to call constant?')\n if not self._weaver.AddOutput(result):\n raise AssertionError('Weaver AddOutput failed: %s' %\n self._weaver.error_string())", "def test_html_output(self):\n pass", "def setUp(self):\n\t\tself.output = self.switchstdout()", "def test_output_vs_expectations(self):\n inventory = Inventory()\n inventory.add_host('superhost', hostvars={'ansible_connection':'local'})\n inventory.add_host('superhost2', hostvars={'ansible_connection':'local'})\n inventory.add_group('awesome')\n inventory.add_group('awesome2')\n inventory.groups['awesome'].add_host(inventory.hosts['superhost'])\n inventory.groups['awesome'].add_host(inventory.hosts['superhost2'])\n output = inventory.write_output_json()\n assert len(output['_meta']['hostvars']) == 2\n output.pop('_meta')\n assert len(output) == 4 #awesome, awesome2, all, ungrouped", "def testGetOutputClasses(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n names = []\n output_classes = []\n\n for name, output_class in manager.OutputManager.GetOutputClasses():\n names.append(name)\n output_classes.append(output_class)\n\n self.assertIn('test_output', names)\n self.assertIn(TestOutput, output_classes)\n\n manager.OutputManager.DeregisterOutput(TestOutput)", "def add_output(self, variable):\n self.outputs.append(variable)", "def __init__(self):\n self.output = []", "def test_normal_goes_normal(self):\n eq_(self.msg, output(self.msg,\"OUTPUT\"))", "def set_ouput(self, q_ouput):\n self.n_outputs = len(q_ouput) # It is also the quantity of required neurons \n self._output = q_ouput\n try: \n self._q_neuron.add_register(self._output)\n except exceptions.QiskitError:\n pass", "def testgen(self):\n self.parse()\n self.generate()", "def add_output(self):\r\n if self.slots[self.length-1].item is not Item.E:\r\n self.outputs.append(self.slots[self.length-1].item)", "def test_outputs(self):\n # Add\n script, script_len = make_cbuffer('00')\n for args in [\n (None, 1, script, script_len, 0), # Invalid tx\n (wally_tx(), -1, script, script_len, 0), # Invalid amount\n (wally_tx(), MAX_SATOSHI+1, script, script_len, 0), # Invalid amount\n (self.tx_deserialize_hex(TX_HEX), MAX_SATOSHI, script, script_len, 0), # Invalid total amount\n (wally_tx(), 1, None, script_len, 0), # Empty script\n (wally_tx(), 1, script, 0, 0), # Invalid script length\n (wally_tx(), 1, script, script_len, 1), # Invalid flag\n ]:\n self.assertEqual(WALLY_EINVAL, wally_tx_add_raw_output(*args))\n # Testing only wally_tx_add_raw_output, because it calls wally_tx_add_output and\n # wally_tx_get_total_output_satoshi\n\n # Remove\n for args in [\n (None, 0), # Invalid tx\n (wally_tx(), 0), # Remove from empty tx\n (self.tx_deserialize_hex(TX_FAKE_HEX), 1), # Invalid index\n ]:\n self.assertEqual(WALLY_EINVAL, wally_tx_remove_output(*args))\n\n # Add and remove inputs and outputs, test that serialization remains the same\n script2, script2_len = make_cbuffer('77' * 16)\n tx = self.tx_deserialize_hex(TX_FAKE_HEX)\n self.assertEqual(WALLY_OK, wally_tx_add_raw_output(tx, 55, script2, script2_len, 0))\n before_hex = self.tx_serialize_hex(tx)\n num_outputs = tx.num_outputs\n\n def remove_and_test(idx):\n self.assertNotEqual(before_hex, self.tx_serialize_hex(tx))\n self.assertEqual(WALLY_OK, wally_tx_remove_output(tx, idx))\n self.assertEqual(before_hex, self.tx_serialize_hex(tx))\n\n self.assertEqual(WALLY_OK, wally_tx_add_raw_output(tx, 1, script, script_len, 0))\n remove_and_test(num_outputs)\n for idx in range(0, num_outputs + 1):\n ret = wally_tx_add_raw_output_at(tx, idx, 1, script, script_len, 0)\n self.assertEqual(ret, WALLY_OK)\n remove_and_test(idx)\n\n ret = wally_tx_add_raw_output_at(tx, num_outputs + 1, 1, script, script_len, 0)\n self.assertEqual(ret, WALLY_EINVAL) # Invalid index", "def testGetOutputClass(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n output_class = manager.OutputManager.GetOutputClass('test_output')\n self.assertEqual(output_class, TestOutput)\n\n with self.assertRaises(ValueError):\n _ = manager.OutputManager.GetOutputClass(1)\n\n with self.assertRaises(KeyError):\n _ = manager.OutputManager.GetOutputClass('bogus')\n\n manager.OutputManager.DeregisterOutput(TestOutput)", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def add(self, script, inputs, outputs):", "def _add_output(self, name, input_layer):\n self.model.add_output(name=name, input=input_layer)\n self.output = name", "def __init__(self):\r\n\r\n super(Node, self).__init__()\r\n self.inputs = []\r\n self.outputs = []\r\n self._active_outputs = []\r\n self.description = None\r\n\r\n # Experimental: dictionary to be used to retype output fields\r\n # Currently used only in CSV source node.\r\n self._retype_dictionary = {}", "def testing(text, output):\n extractor = Extractor(text, output)\n extractor.parse()\n extractor.extract()\n extractor.write()", "def printOutput(self):\n pass", "def test_core_agent_basic(agent):\n # iterate plugin test. if no output plugin exists, last plugin is\n # default output.\n plugins = [plugin for plugin in agent.pluginpod.iter_plugins()]\n assert isinstance(plugins[-1], Output)", "def __set_outputs__(self):\n self.__set_in_out_var__(None, 1)", "def fixture_output_block():\n return Mock()", "def test_2(self):\r\n r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)\r\n o = MyOp.make_node(r1, r1)\r\n r2b = o.outputs[0]\r\n o2 = MyOp.make_node(r2b, r2b)\r\n all = io_toposort([r2b], o2.outputs)\r\n assert all == [o2]\r\n\r\n o2 = MyOp.make_node(r2b, r5)\r\n all = io_toposort([r2b], o2.outputs)\r\n assert all == [o2]", "def _update_output_after_create_node(self):\n # Constants and parameter should not exist for input and output.\n filtered_node = {NodeTypeEnum.CONST.value, NodeTypeEnum.PARAMETER.value}\n for node in self._normal_node_map.values():\n for src_name, input_attr in node.inputs.items():\n src_node = self._get_normal_node(node_name=src_name)\n if src_node.type in filtered_node:\n continue\n\n src_node.add_outputs(node.name, input_attr)", "def add_output(self, id: str, type: str, initial_value: float=None, **kwargs):\n output = Output(id=id, type=type, initial_value=initial_value)\n for key, value in kwargs.items():\n output.add_parameter(key, value)\n self.outputs.append(output)", "def test_node_repr_return():\n input = 42\n expected = f'<Node | Val: {input} | Next: None>'\n actual = repr(Node(input))\n assert expected == actual", "def debug_cntk_outputnodes():\n\tz = load_model(MODEL)\n\tprint (\"Load complete.\");\n\tfor index in range(len(z.outputs)):\n\t\tprint(\"Index {} for output: {}.\".format(index, z.outputs[index].name))", "def add_node_output_locations(self, xy,epsgIN,start,end,step): \n nodeIds = self.grid.get_node_output_locations(xy,epsgIN)\n if(elementIds != []):\n self.run_nc.add_node_output_locations(nodeIds,start,end,step)", "def test_init_node():\n from dll import Node\n new_node = Node(5)\n assert new_node.value == 5", "def recordStdout(self, test, output):\n if output:\n test = proto_test(test)\n self.stdout_output[test] = output", "def __init__(self, name, node):\n self.accepted_plugs = (InputPlug, InputPlugGroup)\n super(OutputPlug, self).__init__(name, node)\n if not isinstance(self, SubPlug):\n self.node.outputs[self.name] = self", "def testNoOutput(self):\n output_mediator = self._CreateOutputMediator()\n output_writer = cli_test_lib.TestOutputWriter()\n output_module = null.NullOutputModule(output_mediator)\n\n output_module.WriteHeader()\n\n event, event_data, event_data_stream = (\n containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))\n output_module.WriteEventBody(\n event, event_data, event_data_stream, None)\n\n output_module.WriteFooter()\n\n output = output_writer.ReadOutput()\n self.assertEqual('', output)", "def IsOutputNode(self) -> bool:\n return self._isoutput", "def addNode(cTag, nTag, pkg, exe, args='', name='', namespace=''): #@NoSelf", "def output(self, output):\n self._output = output", "def _is_function_output(node):\r\n return node.clients == [('output', 1)]", "def store_output(output):\n\n pass", "def test_add_00():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [\n info(\"A\", TensorProto.FLOAT, a_shape),\n info(\"B\", TensorProto.FLOAT, b_shape),\n ]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n b = np.random.rand(*b_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a, \"B\": b}, outputs).run()", "def __init__(self, output_mediator_object):\n super(TestOutputModule, self).__init__(output_mediator_object)\n self.events = []\n self.macb_groups = []", "def add_node (self, node):\n raise NotImplementedError", "def outputs(self):\n pass", "def __init__(self):\r\n super(AppendNode, self).__init__()", "def testRegistration(self):\n # pylint: disable=protected-access\n number_of_parsers = len(manager.OutputManager._output_classes)\n\n manager.OutputManager.RegisterOutput(TestOutput)\n\n with self.assertRaises(KeyError):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n self.assertEqual(\n len(manager.OutputManager._output_classes),\n number_of_parsers + 1)\n\n with self.assertRaises(KeyError):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n manager.OutputManager.DeregisterOutput(TestOutput)\n self.assertEqual(\n len(manager.OutputManager._output_classes),\n number_of_parsers)", "def create_nodes(self):", "def test_init_empty_node():\n from dll import Node\n new_node = Node()\n assert new_node.value is None", "def _add_node(self, input_tensors, output_tensors):\n raise NotImplementedError", "def test_get_task_output(self):\n pass", "def nodeOutliner(*args, addCommand: Union[Script, bool]=None, addObject: name=None, annotation:\n Union[AnyStr, bool]=\"\", attrAlphaOrder: Union[AnyStr, bool]=\"\",\n backgroundColor: Union[List[float, float, float], bool]=None, connectivity:\n Union[name, bool]=None, currentSelection: bool=True, defineTemplate: AnyStr=\"\",\n docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None, dropCallback:\n Script=None, enable: bool=True, enableBackground: bool=True,\n enableKeyboardFocus: bool=True, exists: bool=True, fullPathName: bool=True,\n height: Union[int, bool]=0, highlightColor: Union[List[float, float, float],\n bool]=None, isObscured: bool=True, lastClickedNode: bool=True, lastMenuChoice:\n Union[AnyStr, bool]=\"\", longNames: bool=True, manage: bool=True, menuCommand:\n Script=None, menuMultiOption: bool=True, multiSelect: bool=True, niceNames:\n bool=True, noBackground: bool=True, noConnectivity: bool=True, nodesDisplayed:\n bool=True, numberOfPopupMenus: bool=True, parent: Union[AnyStr, bool]=\"\",\n popupMenuArray: bool=True, pressHighlightsUnconnected: bool=True,\n preventOverride: bool=True, redraw: bool=True, redrawRow: bool=True, remove:\n Union[AnyStr, List[AnyStr]]=\"\", removeAll: bool=True, replace: Union[name,\n bool]=None, selectCommand: Union[Script, bool]=None, showConnectedOnly:\n bool=True, showHidden: bool=True, showInputs: bool=True, showNonConnectable:\n bool=True, showNonKeyable: bool=True, showOutputs: bool=True, showPublished:\n bool=True, showReadOnly: bool=True, statusBarMessage: AnyStr=\"\", useTemplate:\n AnyStr=\"\", visible: bool=True, visibleChangeCommand: Union[Script, bool]=None,\n width: Union[int, bool]=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass", "def with_output(self, output, output_format):\n\t\tself.variables['output'] = output\n\t\tself.variables['output_format'] = output_format\n\t\treturn self", "def writetif(self,outputname,):\n pass", "def test_4(self):\r\n r1, r2, r3, r4 = MyVariable(1), MyVariable(2), MyVariable(3), MyVariable(4)\r\n o0 = MyOp.make_node(r1, r2)\r\n o1 = MyOp.make_node(o0.outputs[0], r1)\r\n all = io_toposort([r1, o0.outputs[0]], [o0.outputs[0], o1.outputs[0]])\r\n assert all == [o1]", "def addChild(node):", "def test_output(self):\n inst = Amenity()\n out = \"[Amenity] ({}) {}\".format(inst.id, inst.__dict__)\n self.assertEqual(out, str(inst))", "def test_has_output(self):\n irc_message = 'This message goes to IRC'\n\n only_irc_response = ActionResponse(irc_message)\n self.assertFalse(only_irc_response.has_output)\n\n no_output_response = ActionResponse(irc_message, value=1.0)\n self.assertFalse(no_output_response.has_output)\n\n no_value_response = ActionResponse(\n irc_message,\n output_params={'osc': {'address': '/brightness/'}},\n )\n self.assertFalse(no_value_response.has_output)\n\n full_response = ActionResponse(\n 'This message goes to IRC',\n 0.5,\n {'osc': {'address': '/this/is/an/osc/address'}},\n )\n self.assertTrue(full_response.has_output)", "def get_output(self):\n raise NotImplementedError", "def testAppendAdditional(self):\n\n self.node.desc = 'first description'\n\n self.assertEqual(\n ['first description', ],\n self.node.desc\n )\n\n self.node.desc = 'second description'\n\n self.assertEqual(\n ['first description', 'second description'],\n self.node.desc\n )", "def lsOneNode(self, output):\n libxml2mod.xmlLsOneNode(output, self._o)", "def __init__(self, output, encoding='utf-8'):\n document = XMLGenerator(output, encoding)\n document.startDocument()\n self._document = document\n self._output = output\n return", "def output_test():\n\toutput_comparison_page(TEST_EVENT_LIST, TEST_COMPARISON_PAGE_FILEPATH)", "def _writeNode (self, node, parent=None):\n\t\t## Main:\n\t\tif (self._src_tree.is_node_tip (node)):\n\t\t\t# a simple (terminal) node\n\t\t\tname = node.get ('title') or node.get ('name')\n\t\t\t# if the name is not quoted and contains spaces, quote it\n\t\t\tif (not _quotedNameRegex.search (name)):\n\t\t\t\tif (_spacesInNameRegex.search (name)):\n\t\t\t\t\tname = \"'%s'\" % name\n\t\t\tself._dest_strm.write (name)\n\t\telse:\n\t\t\t# complex (internal) node\n\t\t\tself._dest_strm.write ('(')\n\t\t\tchildren = self._src_tree.node_children(node)\n\t\t\tfirst_node = True\n\t\t\tfor child in children:\n\t\t\t\tif (first_node):\n\t\t\t\t\tfirst_node = False\n\t\t\t\telse:\n\t\t\t\t\tself._dest_strm.write (', ')\n\t\t\t\tself._writeNode (child, node)\n\t\t\tself._dest_strm.write (')')\n\t\t\t# do support value\n\t\t\tsupval = node.get ('support', None)\n\t\t\tif (supval is not None):\n\t\t\t\tself._dest_strm.write (self._support_format % supval)\n\t\t# do the distance\n\t\tif parent:\n\t\t\tbr = self._src_tree.get_branch (node, parent)\n\t\t\t#dist = self._src_tree.get_distance (node, parent)\n\t\t\tdist = br.distance\n\t\telse:\n\t\t\tdist = node.get ('distance', None)\n\t\tif (dist is not None):\n\t\t\tself._dest_strm.write (':' + self._dist_format % dist)", "def print_out():\n pass", "def __init__(self, output, encoding='utf-8', short_empty_elements=True):\n document = XMLGenerator(output, encoding) # Python 3.2 : short_empty_elements\n document.startDocument()\n self._document = document\n self._output = output\n self._encoding = encoding\n self._short_empty_elements = short_empty_elements\n self._open_elements = []\n return", "def test_repr(self):\n\n node = Node(\n {\n 'healthchecks': [],\n 'host': 'iwa-ait.org',\n 'port': 22,\n 'user': 'adm-technical',\n 'password': 'oh-some-secret'\n },\n {},\n mock.Mock()\n )\n\n self.assertIn('iwa-ait.org', str(node))\n self.assertIn('22', str(node))\n self.assertIn('adm-technical', str(node))\n self.assertNotIn('oh-some-secret', str(node))", "def add_out(self, *outputs: 'Output') -> None:\n self.outputs.extend(outputs)", "def get_output_node(self) -> WillumpGraphNode:\n return self.output_node", "def output(self, output):\n\n self._output = output", "def output(self, output):\n\n self._output = output", "def output(self, output):\n\n self._output = output", "def test_0(self):\r\n r1, r2, r5 = MyVariable(1), MyVariable(2), MyVariable(5)\r\n o = MyOp.make_node(r1, r2)\r\n o2 = MyOp.make_node(o.outputs[0], r5)\r\n\r\n all = general_toposort(o2.outputs, prenode)\r\n assert all == [r5, r2, r1, o, o.outputs[0], o2, o2.outputs[0]]\r\n\r\n all = io_toposort([r5], o2.outputs)\r\n assert all == [o, o2]" ]
[ "0.75490946", "0.666691", "0.63000286", "0.6119677", "0.6104275", "0.6039189", "0.59738773", "0.5940442", "0.5934479", "0.5925732", "0.5917337", "0.5912333", "0.5910766", "0.58941495", "0.5879698", "0.5866728", "0.5860062", "0.5858629", "0.58093554", "0.5797671", "0.57790154", "0.57656026", "0.57635814", "0.57453835", "0.57453835", "0.57427186", "0.5732115", "0.57124406", "0.5703054", "0.5690593", "0.56773716", "0.56682795", "0.566644", "0.56639975", "0.5619473", "0.56171626", "0.56150424", "0.560486", "0.55795324", "0.55755544", "0.5572162", "0.55620325", "0.5560451", "0.5526315", "0.5520771", "0.5517954", "0.55058885", "0.54886407", "0.5483997", "0.54834425", "0.54804665", "0.545563", "0.5451938", "0.54491264", "0.54445946", "0.5443256", "0.54379237", "0.54360926", "0.54268557", "0.54232293", "0.54227847", "0.5422473", "0.5419184", "0.54128456", "0.5412118", "0.5411788", "0.5400499", "0.53969777", "0.5392518", "0.5385396", "0.5382208", "0.53695995", "0.53638184", "0.5359718", "0.53541243", "0.5341168", "0.533917", "0.53360415", "0.5335818", "0.53329873", "0.53253603", "0.5322068", "0.5321976", "0.5310935", "0.5307361", "0.5297672", "0.5296744", "0.5294222", "0.5278043", "0.52686095", "0.5257527", "0.52570695", "0.5254315", "0.5251288", "0.5250691", "0.52458775", "0.52404565", "0.52404565", "0.52404565", "0.52375114" ]
0.78752226
0
Test building the simplest model possible.
def test_BuildModel0(self): print("\nTest 4: Building a Basic Model") builder = StaticBuilder(scope="Basic") in_name = builder.addInput(10) enc_name = builder.addInner(3) out_name = builder.addOutput() builder.addDirectedLink(in_name, enc_name) builder.addDirectedLink(enc_name, out_name) self.assertEqual(builder.num_nodes, 3, "The number of nodes has not been " "assigned correctly") builder.build() inn, enc, out = ( builder.nodes[in_name], builder.nodes[enc_name], builder.nodes[out_name] ) self.assertEqual(inn._oslot_to_otensor[0].shape.as_list()[-1], enc._islot_to_itensor[0].shape.as_list()[-1], "The input tensors have not been assigned correctly") self.assertEqual(enc._oslot_to_otensor[0].shape.as_list()[-1], out._islot_to_itensor[0].shape.as_list()[-1], "The input tensors have not been assigned correctly")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_build_model(arguments):\n ...", "def test_quick_build(self):\n pass", "def test_quick_build1(self):\n pass", "def test_simple_creation():\n # Get model file\n create.main(\"mlp\", \"10:12:8\", \"model_test.tar\")", "def test_model():\n pass", "def test_BuildModel3(self):\n print(\"\\nTest 7: Building a more complicated Model\")\n builder = StaticBuilder(\"BreakIt\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3)\n enc2 = builder.addInner(5, num_islots=2)\n out1 = builder.addOutput()\n out2 = builder.addOutput()\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(in2, enc2, islot=0)\n builder.addDirectedLink(enc1, enc2, islot=1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc2, out2)\n \n builder.build()", "def build_model():", "def test_model_found(arguments):\n ...", "def test_STLModelBuilder1(self):\n\n self.delayDisplay(\"Starting the test\")\n #\n # first, get some data\n #\n import SampleData\n SampleData.downloadFromURL(\n nodeNames='FA',\n fileNames='FA.nrrd',\n uris='http://slicer.kitware.com/midas3/download?items=5767')\n self.delayDisplay('Finished with download and loading')\n\n volumeNode = slicer.util.getNode(pattern=\"FA\")\n logic = STLModelBuilderLogic()\n # self.assertIsNotNone(logic.hasImageData(volumeNode))\n self.delayDisplay('Test passed!')", "def test_get_model(self) -> None:\n get_model()", "def test_model_initialization():\n MyModel(\"model\", SkillContext())", "def runTest(self):\n self.setUp()\n self.test_STLModelBuilder1()", "def test_build(self):\n with model.BuildAntimony(self.copasi_file1) as loader:\n self.mod1 = loader.load(\n \"\"\"\n model model1\n compartment cell = 1.0\n var A in cell\n var B in cell\n\n vAProd = 0.1\n kADeg = 0.2\n kBProd = 0.3\n kBDeg = 0.4\n A = 0\n B = 0\n\n AProd: => A; cell*vAProd\n ADeg: A =>; cell*kADeg*A*B\n BProd: => B; cell*kBProd*A\n BDeg: B => ; cell*kBDeg*B\n end\n \"\"\"\n )\n self.assertTrue(os.path.isfile(self.copasi_file1))", "def test_models(self) -> None:\n full_name = \"owenstranathan/kupy-test\"\n self.assertEqual(full_name, self.project.full_name)\n self.assertEqual(sha1(full_name), self.project.id)\n self.assertEqual('12345', self.project.secrets['SECRET_TOKEN'])\n self.assertIn(self.build, list(self.project.builds))\n self.assertIn(self.deploy, list(self.project.deploys))\n\n build_id = sha1(f\"{self.project.id}/{self.commit_id}\")\n self.assertEqual(self.build.id, build_id)\n self.assertEqual(self.build.branch, \"develop\")\n self.assertEqual(self.build.commit_id, self.commit_id)\n self.assertEqual(self.build.project, self.project)\n self.assertIn(self.deploy, list(self.build.deploys))\n\n deploy_id = sha1(f\"{self.project.id}/{self.build.id}\")\n self.assertEqual(deploy_id, self.deploy.id)\n self.assertEqual(self.project, self.deploy.project)\n self.assertEqual(self.build, self.deploy.build)", "def create_model(self):\n self.skipTest(\"Base module should not be tested.\")", "def test_model_info_basic():\n model = ModelInfo('test description', {'f1': 0.9},\n BaseLocation('protoc://something:8080/thingy'))\n assert 'test description' in model.items['description']\n assert model.items['metrics']['f1'] == 0.9\n assert model.items['location'].get_host() == 'something:8080'\n assert model.items['hash'] is not None", "def test_BuildModel1(self):\n print(\"\\nTest 5: Building a Model with cloning\")\n builder = StaticBuilder(\"Clone\")\n in1 = builder.addInput(10)\n enc1 = builder.addInner(3)\n out1 = builder.addOutput(name=\"Out1\")\n out2 = builder.addOutput(name=\"Out2\")\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc1, out2)\n \n builder.build()", "def test_build_default_model(self):\n cfg = get_cfg_defaults()\n cfg.SYSTEM.NUM_GPUS = self.num_gpu\n model = build_model(cfg, self.device)\n self.assertTrue(isinstance(model, (torch.nn.Module,\n torch.nn.DataParallel,\n torch.nn.parallel.DistributedDataParallel)))", "def testModel( self, classTest, classPred):", "def test_settingmodel_init():\n SettingsModel()", "def test_training(self):\n\t\tpass", "def testGetReigsteredModel(self):\n from soc.models.student import Student\n model = models_logic.getModel('soc.models.student.Student')\n self.assertEqual(model, Student)", "def test_machine_learning():", "def test_pregenerated_model(sub_test, case):\n\n if case.startswith(\"sensi2\"):\n model_name = sub_test + \"_o2\"\n else:\n model_name = sub_test\n\n model_swig_folder = str(\n Path(__file__).parents[2]\n / \"build\"\n / \"tests\"\n / \"cpp\"\n / f\"external_{model_name}-prefix\"\n / \"src\"\n / f\"external_{model_name}-build\"\n / \"swig\"\n )\n\n test_model_module = amici.import_model_module(\n module_name=model_name, module_path=model_swig_folder\n )\n model = test_model_module.getModel()\n solver = model.getSolver()\n amici.readModelDataFromHDF5(\n options_file, model.get(), f\"/{sub_test}/{case}/options\"\n )\n amici.readSolverSettingsFromHDF5(\n options_file, solver.get(), f\"/{sub_test}/{case}/options\"\n )\n\n edata = None\n if \"data\" in expected_results[sub_test][case].keys():\n edata = amici.readSimulationExpData(\n str(expected_results_file), f\"/{sub_test}/{case}/data\", model.get()\n )\n rdata = amici.runAmiciSimulation(model, solver, edata)\n\n check_derivative_opts = dict()\n\n if model_name == \"model_nested_events\":\n check_derivative_opts[\"rtol\"] = 1e-2\n elif model_name == \"model_events\":\n check_derivative_opts[\"atol\"] = 1e-3\n\n if (\n edata\n and solver.getSensitivityMethod()\n and solver.getSensitivityOrder()\n and len(model.getParameterList())\n and not model_name.startswith(\"model_neuron\")\n and not case.endswith(\"byhandpreeq\")\n ):\n check_derivatives(model, solver, edata, **check_derivative_opts)\n\n verify_simulation_opts = dict()\n\n if model_name.startswith(\"model_neuron\"):\n verify_simulation_opts[\"atol\"] = 1e-5\n verify_simulation_opts[\"rtol\"] = 1e-2\n\n if model_name.startswith(\"model_robertson\") and case == \"sensiforwardSPBCG\":\n verify_simulation_opts[\"atol\"] = 1e-3\n verify_simulation_opts[\"rtol\"] = 1e-3\n\n verify_simulation_results(\n rdata, expected_results[sub_test][case][\"results\"], **verify_simulation_opts\n )\n\n if model_name == \"model_steadystate\" and case == \"sensiforwarderrorint\":\n edata = amici.amici.ExpData(model.get())\n\n # Test runAmiciSimulations: ensure running twice\n # with same ExpData yields same results\n if (\n edata\n and model_name != \"model_neuron_o2\"\n and not (model_name == \"model_robertson\" and case == \"sensiforwardSPBCG\")\n ):\n if isinstance(edata, amici.amici.ExpData):\n edatas = [edata, edata]\n else:\n edatas = [edata.get(), edata.get()]\n\n rdatas = amici.runAmiciSimulations(\n model, solver, edatas, num_threads=2, failfast=False\n )\n verify_simulation_results(\n rdatas[0],\n expected_results[sub_test][case][\"results\"],\n **verify_simulation_opts,\n )\n verify_simulation_results(\n rdatas[1],\n expected_results[sub_test][case][\"results\"],\n **verify_simulation_opts,\n )\n\n # test residuals mode\n if solver.getSensitivityMethod() == amici.SensitivityMethod.adjoint:\n with pytest.raises(RuntimeError):\n solver.setReturnDataReportingMode(amici.RDataReporting.residuals)\n else:\n solver.setReturnDataReportingMode(amici.RDataReporting.residuals)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n verify_simulation_results(\n rdata,\n expected_results[sub_test][case][\"results\"],\n fields=[\"t\", \"res\", \"sres\", \"y\", \"sy\", \"sigmay\", \"ssigmay\"],\n **verify_simulation_opts,\n )\n with pytest.raises(RuntimeError):\n solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n\n chi2_ref = rdata.chi2\n\n # test likelihood mode\n solver.setReturnDataReportingMode(amici.RDataReporting.likelihood)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n verify_simulation_results(\n rdata,\n expected_results[sub_test][case][\"results\"],\n fields=[\"t\", \"llh\", \"sllh\", \"s2llh\", \"FIM\"],\n **verify_simulation_opts,\n )\n\n # test sigma residuals\n\n if (\n model_name == \"model_jakstat_adjoint\"\n and solver.getSensitivityMethod() != amici.SensitivityMethod.adjoint\n ):\n model.setAddSigmaResiduals(True)\n solver.setReturnDataReportingMode(amici.RDataReporting.full)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether activation changes chi2\n assert chi2_ref != rdata.chi2\n\n if (\n edata\n and solver.getSensitivityMethod()\n and solver.getSensitivityOrder()\n and len(model.getParameterList())\n ):\n check_derivatives(model, solver, edata, **check_derivative_opts)\n\n chi2_ref = rdata.chi2\n res_ref = rdata.res\n\n model.setMinimumSigmaResiduals(100)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether changing the minimum changes res but not chi2\n assert np.isclose(chi2_ref, rdata.chi2)\n assert not np.allclose(res_ref, rdata.res)\n\n model.setMinimumSigmaResiduals(-10)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether having a bad minimum results in nan chi2\n assert np.isnan(rdata.chi2)\n\n with pytest.raises(RuntimeError):\n model.getParameterByName(\"thisParameterDoesNotExist\")", "def test_training(self):\n warnings.filterwarnings('ignore')\n example_args = example_args_parser()\n example_args.unittest = True\n # prepare data\n example_args.stage = 'prepare'\n example_wrapper(example_args)\n # train goalDNN model\n example_args.stage = 'train'\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # train cVAE model\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # train gcVAE model\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # cVAE harmonization\n example_args.stage = 'predict'\n example_args.model = 'cVAE'\n example_wrapper(example_args)\n # gcVAE harmonization\n example_args.model = 'gcVAE'\n example_wrapper(example_args)\n # goalDNN prediction\n example_args.model = 'goalDNN'\n example_wrapper(example_args)\n # XGBoost\n example_args.stage = 'train'\n example_args.model = 'XGBoost'\n example_wrapper(example_args)\n # compare with reference results\n check_args = check_results_args_parser()\n check_args.unittest = True\n check_reference_results(check_args)", "def test_build_network(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(close_session=True, training_epochs=50)", "def test_proof_model():\n m = build_proof_model()\n with m:\n trace = pm.sample(50)\n assert trace.report.ok", "def build_models():\n train_models()\n return build_response.sent_ok()", "def test_prepare_model_medium_objective(raw_model, expected_model, config):\n essential.configure_model(raw_model, config)\n raw_id = find_objective_function(raw_model)[0].id\n expected_id = find_objective_function(expected_model)[0].id\n assert raw_id == expected_id", "def test02(self):\n\t\tself.model = DomainModel('Test Domain Model',\n\t\t\tActor('Driver',\n\t\t\t\t\"\"\"Person who drives a car\"\"\",\n\t\t\t\tisA('Person')\n\t\t\t),\n\t\t\tActor('Passenger',\n\t\t\t\t\"\"\"Person who rides in a car\"\"\",\n\t\t\t\tisA('Person')\n\t\t\t),\n\t\t\tActor('Person',\n\t\t\t\t\"\"\"A human being, capable of driving and being driven\"\"\"\n\t\t\t),\n\t\t\tNoun('Car',\n\t\t\t\t\"\"\"A type of vehicle\"\"\",\n\t\t\t\thas('Driver','Passengers')\n\t\t\t)\n\t\t)", "def test_create_nontar_model():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, \"misc\")\n model_file = os.path.join(misc_folder, \"model-nonexistent.bla\")\n create.main(\"mlp\", \"10:12:8\", model_file)\n # TODO: Check if error was logged", "def test_model_obj():\n # Get the ambient profile data\n profile = get_profile()\n\n # Initialize a Model object\n sbm = single_bubble_model.Model(profile)\n\n # Check the model attributes\n assert_approx_equal(sbm.p.rho_r, 1031.035855535142, significant=6)\n (T, S, P) = profile.get_values(1000., ['temperature', 'salinity',\n 'pressure'])\n (Tp, Sp, Pp) = sbm.profile.get_values(1000., ['temperature', 'salinity',\n 'pressure'])\n assert Tp == T\n assert Sp == S\n assert Pp == P", "def test_constructor(self):\n # Record the model types of all the models to be created\n all_model_types = model_type_to_display_name.keys()\n\n # Record the attribute / value pairs that are common to all models.\n common_attr_value_dict = {\"data\": self.fake_df,\n \"name_spec\": self.fake_names,\n \"design\": self.fake_design,\n \"ind_var_names\": self.fake_names[\"x\"],\n \"alt_id_col\": self.alt_id_col,\n \"obs_id_col\": self.obs_id_col,\n \"choice_col\": self.choice_col,\n \"specification\": self.fake_specification,\n \"alt_IDs\": self.fake_df[\"alt_id\"].values,\n \"choices\": self.fake_df[\"choice\"].values}\n\n # Create a shape name dictionary to relate the various models to the\n # names of their shape parameters.\n shape_name_dict = {\"MNL\": None,\n \"Asym\": self.fake_shape_names[:2],\n \"Cloglog\": None,\n \"Scobit\": self.fake_shape_names,\n \"Uneven\": self.fake_shape_names,\n \"Nested Logit\": None,\n \"Mixed Logit\": None}\n\n # Create a shape reference position dictionary to relate the various\n # models to their shape reference positions.\n shape_ref_dict = {}\n for key in shape_name_dict:\n shape_ref_dict[key] = (None if key != \"Asym\" else\n self.fake_shape_ref_pos)\n\n # Create an intercept_names and intercept_ref_position dictionary to\n # relate the various models to their respective kwargs.\n intercept_names_dict = {}\n intercept_ref_dict = {}\n for key in shape_name_dict:\n if key in [\"MNL\", \"Nested Logit\", \"Mixed Logit\"]:\n intercept_names_dict[key] = None\n intercept_ref_dict[key] = None\n else:\n intercept_names_dict[key] = self.fake_intercept_names\n intercept_ref_dict[key] = self.fake_intercept_ref_pos\n\n # Create a nest_names dictionary to relate the various models to their\n # nest_name attributes\n nest_name_dict = {}\n nest_spec_dict = {}\n for key in shape_name_dict:\n if key != \"Nested Logit\":\n nest_name_dict[key] = None\n nest_spec_dict[key] = None\n else:\n nest_name_dict[key] = list(self.fake_nest_spec.keys())\n nest_spec_dict[key] = self.fake_nest_spec\n\n # Create dictionaries for the mixing_id_col, mixing_vars, and\n # mixing_pos attributes\n mixing_id_col_dict = {}\n mixing_vars_dict = {}\n mixing_pos_dict = {}\n\n for key in shape_name_dict:\n if key != \"Mixed Logit\":\n mixing_id_col_dict[key] = None\n mixing_vars_dict[key] = None\n mixing_pos_dict[key] = None\n else:\n mixing_id_col_dict[key] = self.obs_id_col\n mixing_vars_dict[key] = self.fake_names[\"x\"]\n mixing_pos_dict[key] = [0]\n\n # Record the attribute / value pairs that vary across models\n varying_attr_value_dict = {\"model_type\": model_type_to_display_name,\n \"intercept_names\": intercept_names_dict,\n \"intercept_ref_position\":\n intercept_ref_dict,\n \"shape_names\": shape_name_dict,\n \"shape_ref_position\": shape_ref_dict,\n \"nest_names\": nest_name_dict,\n \"nest_spec\": nest_spec_dict,\n \"mixing_id_col\": mixing_id_col_dict,\n \"mixing_vars\": mixing_vars_dict,\n \"mixing_pos\": mixing_pos_dict}\n\n # Set up the keyword arguments that are needed for each of the model\n # types\n variable_kwargs = {}\n for model_name in all_model_types:\n variable_kwargs[model_name] = {}\n variable_kwargs[model_name][\"intercept_names\"] =\\\n intercept_names_dict[model_name]\n variable_kwargs[model_name][\"intercept_ref_pos\"] =\\\n intercept_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_ref_pos\"] =\\\n shape_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_names\"] =\\\n shape_name_dict[model_name]\n variable_kwargs[model_name][\"nest_spec\"] =\\\n nest_spec_dict[model_name]\n variable_kwargs[model_name][\"mixing_id_col\"] =\\\n mixing_id_col_dict[model_name]\n variable_kwargs[model_name][\"mixing_vars\"] =\\\n mixing_vars_dict[model_name]\n\n # Execute the test for each model type\n for model_name in all_model_types:\n # Update the model type in the list of constructor args\n self.constructor_args[-1] = model_name\n\n # Use this specific model's keyword arguments\n self.constructor_kwargs.update(variable_kwargs[model_name])\n\n # Construct the model object\n model_obj = pylogit.create_choice_model(*self.constructor_args,\n **self.constructor_kwargs)\n\n # Make sure that the constructor has all of the required attributes\n for attr in common_attr_value_dict:\n value = common_attr_value_dict[attr]\n if isinstance(value, pd.DataFrame):\n self.assertTrue(value.equals(model_obj.data))\n elif isinstance(value, np.ndarray):\n npt.assert_allclose(value,\n model_obj.__getattribute__(attr))\n else:\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n for attr in varying_attr_value_dict:\n value = varying_attr_value_dict[attr][model_name]\n\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n return None", "def test_simple5(self):\n api = self.load_api_description('simple5.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(api.license, 'apache2')\n self.assertEqual(api.community, 'http://community.test.com')\n\n self.assertEqual(len(api.ownership), 2)\n self.assertEqual(api.ownership[0].name, 'Peter Parker')\n self.assertEqual(api.ownership[1].name, 'Bruce Wayne')\n self.assertEqual(api.ownership[1].email, 'bat@jleague.com')\n self.assertEqual(api.ownership[1].ownerType, 'tech')\n self.assertEqual(len(api.sla), 3)\n\n sla = api.sla[2]\n self.assertEqual(sla.name, 'GOLD')\n self.assertEqual(sla.availability, 99.9)\n self.assertEqual(sla.rateLimit, 1000)\n self.assertEqual(sla.timeUnit, 'second')\n cost = sla.costModel\n self.assertEqual(cost.currency, 'USD')\n self.assertEqual(cost.unitPrice, 0.1)\n self.assertEqual(cost.requestsPerUnit, 1000)\n\n sla = api.sla[0]\n self.assertEqual(sla.name, 'FREE')\n self.assertIsNone(sla.costModel)", "def test_make_form():", "def test_build(self):\n self.app.build()", "def test():\n return _make_modules(is_train=False)", "def test_default_model(self):\n\n try:\n pp = Lexpp()\n except Exception:\n self.fail(\"initialize was failed\")", "def test_model_runs(self):\n\n for m in self.models:\n self.assertTrue(m is not None)\n self.assertTrue(isinstance(m, topic_model.TopicModel))", "def test_simple(self):\n # Get components for a simple network\n cdata = random_data(num_features=2, num_samples=100, labels=[1 if x < 50 else 0 for x in range(100)])\n encoder = DenseAngleEncoding(cdata, angle_simple_linear, nearest_neighbor(2, 1))\n layer = ProductAnsatz(1)\n measure = Measurement(1, range(1))\n computer = \"1q-qvm\"\n\n # make the network\n qnn = Network([encoder, layer, measure], computer)\n\n # check some basics\n self.assertEqual(type(qnn.computer), QuantumComputer)", "def test_optimize_basic(name, builder):\n\n model = Model(name)\n dirty = True\n printing = True\n counter = 1\n stats = list()\n\n with model.build():\n builder()\n\n if printing:\n print_graphs(f'opt_{name}_init', model)\n\n while dirty:\n\n print()\n\n dirty, new_model = model.run_algebra(\n OptimizeAlg(name=name,\n counter=counter,\n stats=stats,\n num_steps=1))\n \n if printing: \n print_graphs(f'opt_{name}_post({counter})', new_model)\n\n model = new_model\n counter += 1\n\n if printing:\n print_stats(f'opt_{name}', stats)", "def test_build_basic(self):\n # Get the components for a network\n data = array([[0, 1], [1, 0]])\n cdata = CData(data)\n encoder = BinaryEncoding(cdata)\n layer = ProductAnsatz(2)\n measure = Measurement(2, [0])\n\n # Make the network\n qnn = Network([encoder, layer, measure], computer=\"2q-qvm\")\n\n # Build each circuit for the network\n net0 = qnn._build(0)\n net1 = qnn._build(1)\n\n # Check that each circuit is a BaseAnsatz\n self.assertEqual(type(net0), BaseAnsatz)\n self.assertEqual(type(net1), BaseAnsatz)", "def test_generate_data_model():\n params = dict(name=\"test\", type_=str, is_required=True)\n\n data_model = DataModel(\"test\", [Attribute(**params)])\n\n assert generate_data_model(\"test\", {\"test\": \"str\"}) == data_model", "def test_active_inference_SPM_1b(self):", "def test01_name(self):\n model = self.setup_model01(\"m1\")\n model2 = self.setup_model01(\"m2\")\n\n model2.b[1].a = 0.11\n model2.b[1].b = 0.11\n model2.x = False\n to_json(model, fname=self.fname, human_read=True)\n from_json(model2, fname=self.fname)\n # make sure they are right\n assert pytest.approx(20) == value(model2.b[1].b)\n assert pytest.approx(2) == value(model2.b[1].a)\n assert value(model2.x) == True", "def test_valid_basic_build():\n config = load_json_fixture(\"basic-build-config.json\")\n\n vd.SCHEMA_BUILD_CONFIG(config)", "def test_valid_model(self):\n model_cls = ModelContainer(APP_LABEL, TestModel2._meta.db_table).model_cls\n self.assertTrue(model_cls.__class__.__name__ is models.Model.__class__.__name__)", "def test_build_feature_base(self):\n data = pd.DataFrame(pd.read_csv(\"tests/in_data/pro1_sub.csv\"))\n\n X = data.ix[:,1]\n Y = data.ix[:,0]\n model_sample = Model([],\"presence\")\n\n feature_base = model_sample.build_feature_base(X,Y)\n feature_evaluation =\n assert_equal(len(feature_base) > 10, True)", "def test_generate_all_testing(self):\n pass", "def testGetModelsData(self):\n models = models_logic._getModelsData()\n self.assertTrue(models)", "def test_predictor():", "def test_model_to_regular(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")", "def run_tests():\n source1 = TextModel('prep')\n source1.add_file('source_model_1.txt')\n \n source2 = TextModel('athletes')\n source2.add_file('source_model_2.txt')\n\n new1 = TextModel('my_writing')\n new1.add_file('my_writing.txt')\n new1.classify(source1, source2)\n\n # Add code for three other new models below.", "def test_model_info_Generator():\n model = ModelInfo('test description', {'f1': 0.9},\n BaseLocation('protoc://something'), TestGenerator())\n assert 'test description' in model.items['description']\n assert model.items['metrics']['f1'] == 0.9\n assert model.items['location'].get_host() == 'something'\n assert model.items['hash'] is 5", "def test_create_hyperflex_server_model(self):\n pass", "def test_create_a_car_make(car_make):\n car_makes = models.CarMake.objects.all()\n\n assert car_make\n assert car_make.name == \"Volkswagen\"\n assert len(car_makes) == 1", "def test_build_model(self):\n with tempfile.TemporaryDirectory() as tempdir:\n eval_file = os.path.join(tempdir, \"en_test.dev.txt\")\n with open(eval_file, \"w\", encoding=\"utf-8\") as fout:\n fout.write(fake_text_1)\n train_file = os.path.join(tempdir, \"en_test.train.txt\")\n with open(train_file, \"w\", encoding=\"utf-8\") as fout:\n for i in range(1000):\n fout.write(fake_text_1)\n fout.write(\"\\n\")\n fout.write(fake_text_2)\n fout.write(\"\\n\")\n save_name = 'en_test.forward.pt'\n vocab_save_name = 'en_text.vocab.pt'\n checkpoint_save_name = 'en_text.checkpoint.pt'\n args = ['--train_file', train_file,\n '--eval_file', eval_file,\n '--eval_steps', '0', # eval once per opoch\n '--epochs', '2',\n '--cutoff', '1',\n '--batch_size', '%d' % len(fake_text_1),\n '--lang', 'en',\n '--shorthand', 'en_test',\n '--save_dir', tempdir,\n '--save_name', save_name,\n '--vocab_save_name', vocab_save_name,\n '--checkpoint_save_name', checkpoint_save_name]\n args = charlm.parse_args(args)\n charlm.train(args)\n\n assert os.path.exists(os.path.join(tempdir, vocab_save_name))\n\n # test that saving & loading of the model worked\n assert os.path.exists(os.path.join(tempdir, save_name))\n model = char_model.CharacterLanguageModel.load(os.path.join(tempdir, save_name))\n\n # test that saving & loading of the checkpoint worked\n assert os.path.exists(os.path.join(tempdir, checkpoint_save_name))\n model = char_model.CharacterLanguageModel.load(os.path.join(tempdir, checkpoint_save_name))\n trainer = char_model.CharacterLanguageModelTrainer.load(args, os.path.join(tempdir, checkpoint_save_name))\n\n assert trainer.global_step > 0\n assert trainer.epoch == 2\n\n # quick test to verify this method works with a trained model\n charlm.get_current_lr(trainer, args)\n\n # test loading a vocab built by the training method...\n vocab = charlm.load_char_vocab(os.path.join(tempdir, vocab_save_name))\n trainer = char_model.CharacterLanguageModelTrainer.from_new_model(args, vocab)\n # ... and test the get_current_lr for an untrained model as well\n # this test is super \"eager\"\n assert charlm.get_current_lr(trainer, args) == args['lr0']", "def test_full_model(model, documents, request):\n topic_model = copy.deepcopy(request.getfixturevalue(model))\n if model == \"base_topic_model\":\n topic_model.save(\"model_dir\", serialization=\"pytorch\", save_ctfidf=True, save_embedding_model=\"sentence-transformers/all-MiniLM-L6-v2\")\n topic_model = BERTopic.load(\"model_dir\")\n topics = topic_model.topics_\n\n for topic in set(topics):\n words = topic_model.get_topic(topic)[:10]\n assert len(words) == 10\n\n for topic in topic_model.get_topic_freq().Topic:\n words = topic_model.get_topic(topic)[:10]\n assert len(words) == 10\n\n assert len(topic_model.get_topic_freq()) > 2\n assert len(topic_model.get_topics()) == len(topic_model.get_topic_freq())\n\n # Test extraction of document info\n document_info = topic_model.get_document_info(documents)\n assert len(document_info) == len(documents)\n\n # Test transform\n doc = \"This is a new document to predict.\"\n topics_test, probs_test = topic_model.transform([doc, doc])\n\n assert len(topics_test) == 2\n\n # Test topics over time\n timestamps = [i % 10 for i in range(len(documents))]\n topics_over_time = topic_model.topics_over_time(documents, timestamps)\n\n assert topics_over_time.Frequency.sum() == len(documents)\n assert len(topics_over_time.Topic.unique()) == len(set(topics))\n\n # Test hierarchical topics\n hier_topics = topic_model.hierarchical_topics(documents)\n\n assert len(hier_topics) > 0\n assert hier_topics.Parent_ID.astype(int).min() > max(topics)\n\n # Test creation of topic tree\n tree = topic_model.get_topic_tree(hier_topics, tight_layout=False)\n assert isinstance(tree, str)\n assert len(tree) > 10\n\n # Test find topic\n similar_topics, similarity = topic_model.find_topics(\"query\", top_n=2)\n assert len(similar_topics) == 2\n assert len(similarity) == 2\n assert max(similarity) <= 1\n\n # Test topic reduction\n nr_topics = len(set(topics))\n nr_topics = 2 if nr_topics < 2 else nr_topics - 1\n topic_model.reduce_topics(documents, nr_topics=nr_topics)\n\n assert len(topic_model.get_topic_freq()) == nr_topics\n assert len(topic_model.topics_) == len(topics)\n\n # Test update topics\n topic = topic_model.get_topic(1)[:10]\n vectorizer_model = topic_model.vectorizer_model\n topic_model.update_topics(documents, n_gram_range=(2, 2))\n\n updated_topic = topic_model.get_topic(1)[:10]\n\n topic_model.update_topics(documents, vectorizer_model=vectorizer_model)\n original_topic = topic_model.get_topic(1)[:10]\n\n assert topic != updated_topic\n if topic_model.representation_model is not None:\n assert topic != original_topic\n\n # Test updating topic labels\n topic_labels = topic_model.generate_topic_labels(nr_words=3, topic_prefix=False, word_length=10, separator=\", \")\n assert len(topic_labels) == len(set(topic_model.topics_))\n\n # Test setting topic labels\n topic_model.set_topic_labels(topic_labels)\n assert topic_model.custom_labels_ == topic_labels\n\n # Test merging topics\n freq = topic_model.get_topic_freq(0)\n topics_to_merge = [0, 1]\n topic_model.merge_topics(documents, topics_to_merge)\n assert freq < topic_model.get_topic_freq(0)\n\n # Test reduction of outliers\n if -1 in topics:\n new_topics = topic_model.reduce_outliers(documents, topics, threshold=0.0)\n nr_outliers_topic_model = sum([1 for topic in topic_model.topics_ if topic == -1])\n nr_outliers_new_topics = sum([1 for topic in new_topics if topic == -1])\n\n if topic_model._outliers == 1:\n assert nr_outliers_topic_model > nr_outliers_new_topics\n\n # # Save and load model\n # if topic_model.topic_embeddings_ is not None:\n # topic_model.save(\"model_dir\", serialization=\"pytorch\", save_ctfidf=True)\n # loaded_model = BERTopic.load(\"model_dir\")", "def test_train_house_price_model():\n with app.test_client()as c:\n response = c.get('/REST/api/v1.0/train')\n assert response.status_code == 201", "def test_minimal_machine(self):\n Machine.objects.create(name=\"Linac Name 03\",\n visible_name=\"Linac 03\")", "def test_deep_learning_models():\n atom = ATOMClassifier(*mnist, n_rows=0.1, random_state=1)\n pytest.raises(PermissionError, atom.clean)\n atom.run(KerasClassifier(neural_network, epochs=1, batch_size=512, verbose=0))", "def test_test_client_model(self):\n pass", "def test_creating_simple_feature():\n # given & when\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n\n # then\n assert feature.id == 1\n assert feature.keyword == \"Feature\"\n assert feature.sentence == \"I am a feature\"\n assert feature.path == \"foo.feature\"\n assert feature.line == 1\n assert feature.tags == []", "def test_all_components(self):\n model_name = 'BCZModel'\n pose_components = [\n ('xyz', 3, True, 100.),\n ('quaternion', 4, False, 10.),\n ('axis_angle', 3, True, 10.),\n ('arm_joints', 7, True, 1.),\n ('target_close', 1, False, 1.),\n ]\n gin.bind_parameter(\n 'BCZModel.action_components', pose_components)\n gin.parse_config('BCZPreprocessor.mock_subtask = True')\n gin.parse_config(\n 'resnet_film_network.film_generator_fn = @linear_film_generator')\n self._fixture.random_train(model, model_name)", "def test_valid_model_code(self) -> None:\n model_code = 101\n expected = {\n \"code\": \"GR1\",\n \"number\": 101,\n \"name\": \"Short, sparse, dry climate grass\",\n \"description\": \"Short, sparse dry climate grass is short, naturally or heavy grazing, \"\n \"predicted rate of fire spread and flame length low.\",\n \"fuel_load\": [0.100, 0.00, 0.00, 0.3, 0.00],\n \"type\": \"Dynamic\",\n \"sav_ratio\": [2200.0, 2000.0, 0.0],\n \"fuel_bed_depth\": 0.4,\n \"dead_fuel_moisture_of_extinction\": 0.15,\n \"characteristic_sav\": 2054.0,\n \"bulk_density\": 0.05,\n \"relative_packing_ratio\": 0.22\n }\n res = self.app.get('/model-parameters', query_string={\"number\": model_code})\n self.assertEqual(200, res.status_code)\n self.assertEqual(expected, json.loads(res.data.decode('utf-8')))", "def test_create_a_car_model(car_model):\n car_models = models.CarMake.objects.all()\n\n assert car_model\n assert car_model.name == \"Golf\"\n assert car_model.car_make.name == \"Volkswagen\"\n assert len(car_models) == 1", "def test_create10(self):\n pass", "def test_model_processor():\n global model_processor_called\n\n model_str = 'first 34 45 7 A 45 65 B true C \"dfdf\"'\n\n metamodel = metamodel_from_str(grammar)\n metamodel.register_model_processor(model_processor)\n\n metamodel.model_from_str(model_str)\n\n assert model_processor_called", "def pywemo_model_fixture():\n return \"LightSwitch\"", "def test_intro_model():\n k = 100 # number of latent topics\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n X_train, y_train = prep.subset(features)\n\n baseline = DummyClassifier(strategy='stratified')\n\n rf = RandomForestClassifier(max_features=0.1, n_estimators=1000, max_depth=8, n_jobs=-1)\n ada = AdaBoostClassifier(n_estimators=100, learning_rate=0.05)\n gb = GradientBoostingClassifier(n_estimators=100, learning_rate=0.05, max_depth=4)\n\n mc = ModelChooser([gb])\n\n mc.train(X_train, y_train)\n\n save_model(mc.list_of_models[0], \"intro_model_100_topics_gb.pkl\")", "def test_run_simplega():\n WRFga_winner = run_simplega(pop_size=100, n_generations=1, testing=True)\n assert WRFga_winner.Fitness >= 0", "def unitary_test():", "def test_perfectModelEnsemble_init(PM_ds_initialized_1d):\n pm = PerfectModelEnsemble(PM_ds_initialized_1d)\n print(PerfectModelEnsemble)\n assert pm", "def test_example(self):\n self.assertEqual(self.example.get_example(), True)", "def test_coupledmodels_get(self):\n pass", "def test_intro_model_n_amd():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n n=100\n prep.prepare(n_components=n, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl')\n features = [\n\n u'days_since_start',\n u'vote_required',\n u'nterms', u'success_rate',\n u'n_amd', u'session_type',\n u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', u'party_COM',\n u'urgency_No', u'urgency_Yes',\n u'appropriation_No', u'appropriation_Yes',\n u'taxlevy_No', u'taxlevy_Yes',\n u'fiscal_committee_No', u'fiscal_committee_Yes']\n topic_features = [\"topic_\"+str(k) for k in range(n)]\n features += topic_features\n X_train, y_train = prep.subset(features, dep_var='n_amd')\n\n baseline = DummyRegressor()\n\n gb = GradientBoostingRegressor()\n\n mc = ModelChooser([baseline, gb])\n mc.fit_predict(X_train, y_train, regressor=True)\n mc.print_results(regressor=True)", "def run_tests():\r\n source1 = TextModel('50 Shades of Gray')\r\n source1.add_file('50.txt')\r\n \r\n print()\r\n \r\n source2 = TextModel('King James Version of the Bible')\r\n source2.add_file('kjv.txt')\r\n\r\n print()\r\n\r\n new1 = TextModel('Shakespeare')\r\n new1.add_file('shake.txt')\r\n new1.classify(source1, source2)\r\n \r\n print()\r\n \r\n new2 = TextModel('JK Rowling')\r\n new2.add_file('hp.txt')\r\n new2.classify(source1, source2)\r\n \r\n print()\r\n \r\n new3 = TextModel('Breitbart News Network')\r\n new3.add_file('bnn.txt')\r\n new3.classify(source1, source2)\r\n \r\n print()\r\n \r\n new4 = TextModel('Chaucer')\r\n new4.add_file('tct.txt')\r\n new4.classify(source1, source2)", "def test_constructor(self):\n pass", "def _build_model(self):\n raise NotImplementedError()", "def build_model(self):\n pass", "def build_model(self):\n pass", "def test_load_model():\n model = BERTopic(language=\"Dutch\", embedding_model=None, n_components=12)\n model.save(\"test\")\n loaded_model = BERTopic.load(\"test\")\n assert type(model) == type(loaded_model)\n assert model.language == loaded_model.language\n assert model.embedding_model == loaded_model.embedding_model\n assert model.top_n_words == loaded_model.top_n_words\n assert model.n_neighbors == loaded_model.n_neighbors\n assert model.n_components == loaded_model.n_components", "def test(self):\n pass", "def test_model_todo(self):\n mommy.make('Todo',user=self.user,title=recipe.seq('hello'), _quantity=5)\n todos = Todo.objects.all()\n self.assertEqual(len(todos), 5)\n self.assertIn('hello', todos.__str__())", "def test_dummy():", "def test_train_model_type_good():\n\tdf = pd.read_csv(\"test/sample_features.csv\")\n\n\ty_train = df['price']\n\tX_train = df.loc[:, df.columns != 'price']\n\n\tparams = {'n_estimators': 5, 'random_state': 2}\n\trf_test = train(X_train, y_train, params)\n\n\t# test model type\n\ttest_model_type = str(type(rf_test))\n\ttrue_model_type = \"<class 'sklearn.ensemble._forest.RandomForestRegressor'>\"\n\tassert test_model_type == true_model_type", "def test_tests():\n submission = SubmissionBuilder(\"t\", \"b\", [\"anything\"]).build()\n assert submission.get(\"results\") == [\"anything\"], submission", "def test_noarguments(self):\n self.assertEqual(BaseModel, type(BaseModel()))", "def test_coupledmodels_post(self):\n pass", "def test_model_info():\n with pytest.raises(InvalidDataError):\n ModelInfo('test description', {'f1': 0.9},\n None)", "def model_switch_to_testing(self):\n pass", "def test_add_model_components(self):\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_add_model_components(self):\n create_abstract_model(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_home_get(self):\n\n response = self.client().get('/')\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model, self.data_manipulation.get_latest_model())", "def test_custom_models(model):\n atom = ATOMRegressor(X_reg, y_reg, random_state=1)\n atom.run(models=model, n_calls=2, n_initial_points=1)\n assert atom.rfr.fullname == \"RandomForestRegressor\"\n assert atom.rfr.estimator.get_params()[\"random_state\"] == 1", "def test_posthoc_check():\n # make a model, train then save it\n model, X, y, Xval, yval = make_small_model()\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model.compile(loss=loss, optimizer=None)\n model.fit(X, y, validation_data=(Xval, yval), epochs=1, batch_size=20)\n\n # should be ok\n _, disclosive = model.posthoc_check()\n assert disclosive is False, \"base config in tests should be ok\"\n\n # change optimizer and some other settings\n # in way that stresses lots of routes\n cleanup_file(\"tfsaves/fit_model.tf\")\n model.epochs = 1000\n model.optimizer = tf.keras.optimizers.get(\"SGD\")\n _, disclosive = model.posthoc_check()\n assert disclosive is True, \"should pick up optimizer changed\"\n\n cleanup_file(\"keras_save.tf\")\n cleanup_file(\"tfsaves\")", "def classic_model_testing():\n dataset_path = \"/home/kateryna/Documents\"\n X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])\n contam = 0.08\n models = [XGBOD(), OCSVM(contamination=contam), IForest(contamination=contam, n_estimators=150), XGBOD(learning_rate=0.01, n_estimators=150),\n COPOD(contamination=contam)]\n for model in models:\n model_name = model.__str__().split('(')[0]\n clf = model\n clf.fit(X_train, y_train)\n\n y_train_pred = clf.labels_\n y_train_scores = clf.decision_scores_\n\n # get the prediction on the test data\n # 0 stands for inliers and 1 for outliers.\n y_test_pred = clf.predict(X_test)\n y_test_scores = clf.decision_function(X_test)\n # y_probabilities = clf.predict_proba(X_test)\n print(\"\\nOn Training Data:\")\n evaluate_print(model_name, y_train, y_train_scores)\n print(\"\\nOn Test Data:\")\n evaluate_print(model_name, y_test, y_test_scores)\n print('roc auc', roc_auc_score(y_test, y_test_scores))\n\n conf_mtx_test = confusion_matrix(y_test, y_test_pred, labels=[0, 1])\n print(conf_mtx_test)\n conf_mtx_train = confusion_matrix(y_train, y_train_pred, labels=[0, 1])\n print(conf_mtx_train)\n print('~~~')", "def test_utilities(random_params):\n logging.warning('\\n\\n\\nDISTANCE ONLY, AMSTERDAM CASE')\n random_params['case'] = np.random.choice(['amsterdam-income', 'amsterdam-ses'])\n log_params(random_params)\n model = CompassModel(**random_params)\n \n # Initially, no utilities can be zero (theoretically it could be)\n # if they are 0, then it's probably an unfilled initial array.\n assert np.all(Household._household_res_utility[:] > 0)\n assert np.all(Household._household_res_utility[:] <= 1)\n\n if model.params['case'].lower()=='lattice':\n assert np.all(model.normalized_compositions >= 0)\n assert np.all(model.normalized_compositions <= 1)\n model.simulate(res_steps=1, school_steps=2)\n assert np.all(model.normalized_compositions >= 0)\n assert np.all(model.normalized_compositions <= 1)\n else:\n model.simulate(res_steps=0, school_steps=2)\n\n # Now utilities can be 0\n for array in [Household._household_res_utility[:], \n Household._household_school_utility_comp,\n Household._household_school_utility, \n Household._household_distance]:\n assert np.all(array >= 0)\n assert np.all(array <= 1)", "def test_T01():", "def test_setup(self):\n\t\t\n\t\tprint(f\"using self.weights_dir as model = {self.model}\")\n\t\tself.assertTrue(os.path.isdir(self.model))\n\t\tself.assertEquals(self.weights_dir, self.model)\n\n\t\tnum_files = len(glob.glob(f\"{self.model}/*\"))\n\t\tself.assertTrue(num_files > 0)\n\t\t\n\t\treturn" ]
[ "0.74242914", "0.7226327", "0.705865", "0.70076", "0.70002264", "0.6796702", "0.6709102", "0.6660882", "0.656199", "0.6525237", "0.6459824", "0.6387487", "0.63846844", "0.635007", "0.63139594", "0.63126516", "0.630687", "0.62744576", "0.6251773", "0.6231023", "0.62173486", "0.6181008", "0.613728", "0.6128389", "0.6114547", "0.61141896", "0.6107538", "0.6105489", "0.6096621", "0.6085447", "0.6076917", "0.60660136", "0.60426676", "0.60292876", "0.60150814", "0.6008812", "0.5985767", "0.5982461", "0.5978366", "0.59744334", "0.5973444", "0.59655637", "0.5958797", "0.59411275", "0.593012", "0.59252685", "0.5923951", "0.5916209", "0.58991283", "0.58838606", "0.58804095", "0.587431", "0.58588594", "0.5854932", "0.5842246", "0.58377314", "0.5828509", "0.5826974", "0.5822163", "0.5818692", "0.5813957", "0.580534", "0.5804291", "0.5801909", "0.579934", "0.5797675", "0.5796357", "0.57868445", "0.57866883", "0.5781613", "0.5774994", "0.57698625", "0.5767115", "0.57657355", "0.57574624", "0.57553774", "0.5754923", "0.57430196", "0.5739388", "0.57375354", "0.57375354", "0.5734402", "0.5720301", "0.57096535", "0.57085496", "0.57070506", "0.5705859", "0.5701329", "0.5698642", "0.5687896", "0.5684853", "0.56752187", "0.56752187", "0.56721365", "0.5666043", "0.5663256", "0.56600124", "0.5647368", "0.5646925", "0.5642182" ]
0.70569456
3
Test building a model with 2 outputs. Test Cloning an output
def test_BuildModel1(self): print("\nTest 5: Building a Model with cloning") builder = StaticBuilder("Clone") in1 = builder.addInput(10) enc1 = builder.addInner(3) out1 = builder.addOutput(name="Out1") out2 = builder.addOutput(name="Out2") builder.addDirectedLink(in1, enc1) builder.addDirectedLink(enc1, out1) builder.addDirectedLink(enc1, out2) builder.build()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_output_2():\n #input_file = os.path.join('.', 'test_files', 'test.input')\n actual = os.path.join('.', 'test_files', 'rc_actual.out')\n times = list(range(0, 30, 5))\n inputs = {\"names\": ['V'],\n \"values\": [\n [1],\n [0],\n [-1],\n [0],\n [1]\n ]\n }\n params=None\n\n default_model_2 = ModelBCMD('rc',\n inputs,\n params,\n times,\n input_file = None,\n input_required=True,\n testing=True,\n workdir=os.path.join('.','test_files'),\n debug=True,\n basedir=BASEDIR)\n\n default_model_2.write_default_input_2()\n print(default_model_2.input_file.encode())\n print(default_model_2.input_file.encode().decode())\n default_model_2.run_2()\n assert_true(filecmp.cmp(default_model_2.output_coarse, actual), msg='Coarse output files do not match actual.')\n os.remove(default_model_2.output_coarse)\n os.remove(default_model_2.output_detail)", "def test_BuildModel0(self):\n print(\"\\nTest 4: Building a Basic Model\")\n builder = StaticBuilder(scope=\"Basic\")\n in_name = builder.addInput(10)\n enc_name = builder.addInner(3)\n out_name = builder.addOutput()\n builder.addDirectedLink(in_name, enc_name)\n builder.addDirectedLink(enc_name, out_name)\n \n self.assertEqual(builder.num_nodes, 3, \"The number of nodes has not been \"\n \"assigned correctly\")\n \n builder.build()\n inn, enc, out = ( builder.nodes[in_name], builder.nodes[enc_name],\n builder.nodes[out_name] )\n self.assertEqual(inn._oslot_to_otensor[0].shape.as_list()[-1],\n enc._islot_to_itensor[0].shape.as_list()[-1], \n \"The input tensors have not been assigned correctly\")\n self.assertEqual(enc._oslot_to_otensor[0].shape.as_list()[-1],\n out._islot_to_itensor[0].shape.as_list()[-1], \n \"The input tensors have not been assigned correctly\")", "def test_part_1(arguments, output):\n # assert part_1.solution(arguments) == output\n assert part_1.solution(arguments) == output", "def test_BuildModel2(self):\n print(\"\\nTest 6: Building a Model with Concat\")\n builder = StaticBuilder(\"Concat\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3, num_islots=2)\n out1 = builder.addOutput()\n\n builder.addDirectedLink(in1, enc1, islot=0)\n builder.addDirectedLink(in2, enc1, islot=1)\n builder.addDirectedLink(enc1, out1)\n \n builder.build()", "def test_simple_merge(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(6)(x1)\n x4 = merge([x2, x3], mode=\"concat\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )", "def test_clone_scenario(self):\n pass", "def test_BuildModel3(self):\n print(\"\\nTest 7: Building a more complicated Model\")\n builder = StaticBuilder(\"BreakIt\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3)\n enc2 = builder.addInner(5, num_islots=2)\n out1 = builder.addOutput()\n out2 = builder.addOutput()\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(in2, enc2, islot=0)\n builder.addDirectedLink(enc1, enc2, islot=1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc2, out2)\n \n builder.build()", "def _test_output_shapes(model):\n assert model.r == r\n assert model.m == m\n assert model.c_.shape == (r,)\n assert model.A_.shape == (r,r)\n assert model.Hc_.shape == (r,r*(r+1)//2)\n assert model.H_.shape == (r,r**2)\n assert model.Gc_.shape == (r,r*(r+1)*(r+2)//6)\n assert model.G_.shape == (r,r**3)\n assert model.B_.shape == (r,m)\n assert hasattr(model, \"datacond_\")\n assert hasattr(model, \"dataregcond_\")\n assert round(model.dataregcond_, 6) <= round(model.datacond_, 6)\n assert hasattr(model, \"residual_\")\n assert hasattr(model, \"misfit_\")\n assert round(model.misfit_, 6) <= round(model.residual_, 6)", "def test_build_model(arguments):\n ...", "def test(self):\n img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)\n ## test flow ##\n\n self.save_results(img_gen, data_name='vis')\n if self.opt.save_input or self.opt.phase == 'val':\n self.save_results(self.input_P1, data_name='ref')\n self.save_results(self.input_P2, data_name='gt')\n result = torch.cat([self.input_P1, img_gen, self.input_P2], 3)\n self.save_results(result, data_name='all')", "def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass", "def test_output(data,idx,model):\n x,y = data[idx]\n out = model(x)\n return y.data.cpu().numpy(), out.data.cpu().numpy()", "def test_prepare_sample_to_forward(self):\n sample = [\n {\"src\": \"ola mundo\", \"ref\": \"hi world\", \"mt\": \"hey world!\", \"score\": 0.8},\n {\"src\": \"ola mundo\", \"ref\": \"hi world\", \"mt\": \"hey world!\", \"score\": 0.8},\n ]\n\n model_input, target = self.estimator.prepare_sample(sample)\n model_output = self.estimator(**model_input)\n self.assertTrue(model_output[\"score\"].shape[0] == 2)\n self.assertTrue(model_output[\"score\"].shape[1] == 1)", "def test_simple_creation():\n # Get model file\n create.main(\"mlp\", \"10:12:8\", \"model_test.tar\")", "def test_copied_models_are_equal(dbdiskrepo):\n original = fit_model()\n\n shallow = copy(original)\n assert original.artifact.id == shallow.artifact.id\n assert original.artifact.value_id == shallow.artifact.value_id\n assert hash(original) == hash(shallow)\n\n deep = deepcopy(original)\n assert original.artifact.id == deep.artifact.id\n assert original.artifact.value_id == deep.artifact.value_id\n assert hash(original) == hash(deep)", "def testModel( self, classTest, classPred):", "def test_copying_layout(empty_model):\n assert 1 == 0 # TODO", "def test_reproducible(self):\n model_1 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_1.train(epochs=2)\n\n model_2 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_2.train(epochs=2)\n self.assertTrue(np.allclose(model_1.kv.syn0, model_2.kv.syn0))", "def test():\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', type=str, help='name of the model',\n default='model_new_o')\n parser.add_argument('-f', '--filename', type=str,\n help='name of the dataset (.h5 file)', default='./dataset.h5')\n parser.add_argument('-bs', '--batch-size', type=int,\n help='size of the batches of the training data', default=256)\n args = parser.parse_args()\n\n name = args.name\n filename = args.filename\n batch_size = args.batch_size\n\n out_channels = 400\n model_path = './model/' + name\n checkpoint_path = model_path + '/checkpoints'\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n for k, v in vars(args).items():\n print('{0} = \"{1}\"'.format(k, v))\n print('device = \"' + device + '\"')\n\n if not os.path.exists(checkpoint_path):\n print('Model parameters not found: ' + checkpoint_path)\n exit()\n\n # Dataset\n\n input_cols = ['camera', 'pos_x', 'pos_y', 'theta']\n target_cols = ['target_map']\n train_test_split = 11\n\n dataset = get_dataset(filename, device=device, augment=False,\n input_cols=input_cols, target_cols=target_cols)\n split_index = dataset.cumulative_sizes[train_test_split]\n\n # Model\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model = NN(in_channels=3, out_channels=out_channels).to(device)\n model.load_state_dict(torch.load(checkpoint_path + '/best.pth'))\n summary(model, (3, 64, 80), device=device)\n\n auc_function = MaskedAUROC()\n\n # Testing\n\n aucs = []\n for x, px, py, pt, y in dataset.batches(batch_size, start=split_index, shuffle=False):\n pose = torch.stack([px, py, pt], dim=-1).to(device)\n mask = y > -1\n\n preds = model(x)\n\n aucs.append(auc_function(preds, y, mask).cpu().numpy())\n\n auc = np.nanmean(aucs, axis=0).reshape(20, 20)\n auc = np.rot90(auc, 1)\n auc = np.fliplr(auc) * 100\n\n print('AUC: ' + str(auc.mean().item()))\n\n print(auc)\n\n rounded = (100 * coords).round(2).astype(int)\n fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(7, 5.8))\n sns.distplot(auc, bins=int(np.ceil(auc.max() - auc.min())),\n ax=ax[0], kde=False, rug=False, color='red', hist_kws={'rwidth': 0.75})\n sns.heatmap(auc, cmap='gray', annot=True, cbar_kws={'shrink': .8},\n vmin=50, vmax=100, linewidths=0, ax=ax[1])\n plt.yticks(.5 + np.arange(20), np.unique(rounded[:, 0])[::-1])\n plt.xticks(.5 + np.arange(20), np.unique(rounded[:, 1]))\n plt.xlabel('Y [cm]')\n plt.ylabel('X [cm]')\n plt.setp(ax[1].xaxis.get_majorticklabels(), rotation=0)\n plt.setp(ax[1].yaxis.get_majorticklabels(), rotation=0)\n plt.axis('equal')\n plt.tight_layout()\n plt.show()", "def test09(self):\n model = self.setup_model02()\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n wts = StoreSpec.suffix(suffix_filter=(\"dual\",))\n to_json(model, fname=self.fname, wts=wts)\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n\n from_json(model, fname=self.fname, wts=wts)\n assert model.dual[model.g] == 1\n assert model.ipopt_zL_out[model.x[1]] == 10\n assert model.ipopt_zL_out[model.x[2]] == 10\n assert model.ipopt_zU_out[model.x[1]] == 10\n assert model.ipopt_zU_out[model.x[2]] == 10", "def test_set_output_implicitly(self):\n self.command.output = \"\"\n self.command.package = self.input_ovf\n self.assertEqual(self.command.output, \"\")\n self.command.run()\n self.assertEqual(self.command.output, self.input_ovf)", "def test_merge_multiply(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(5)(x1)\n x4 = merge([x2, x3], mode=\"mul\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )", "def test10(self):\n model = self.setup_model02()\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n wts = StoreSpec.suffix(suffix_filter=(\"dual\",))\n to_json(model, fname=self.fname, wts=wts)\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n\n from_json(model, fname=self.fname, wts=StoreSpec.suffix())\n assert model.dual[model.g] == 1\n assert model.ipopt_zL_out[model.x[1]] == 10\n assert model.ipopt_zL_out[model.x[2]] == 10\n assert model.ipopt_zU_out[model.x[1]] == 10\n assert model.ipopt_zU_out[model.x[2]] == 10", "def testOutputs(self):\n # Remember original (correct) example outputs\n old_files = self.read_outputs()\n\n # Set up and run Xanthos\n ini = 'example/pm_abcd_mrtm.ini'\n xth = Xanthos(ini)\n res = xth.execute()\n\n # Check result dimensions\n self.assertEqual(res.Q.shape, (67420, 372))\n\n # Test that new outputs equal old outputs.\n new_files = self.read_outputs()\n for k in new_files.keys():\n pd.testing.assert_frame_equal(new_files[k], old_files[k])", "def _post_model_build(self):\r\n with tf.variable_scope('copy2test'):\r\n all_variables = tf.global_variables()\r\n train_vars = dict([(v.name, v) for v in all_variables\r\n if not v.name.startswith('test/')])\r\n test_vars = dict([(v.name, v) for v in all_variables\r\n if v.name.startswith('test/')])\r\n self._copy_variables_to_test_model_op = tf.tuple([\r\n test_vars['test/' + k].assign(train_vars[k]) for k in train_vars.keys()\r\n if 'test/' + k in test_vars\r\n ])\r\n\r\n # Begin testing thread\r\n self._coordinator = tf.train.Coordinator()\r\n self._thread = threading.Thread(target=self.test_job,\r\n name='%s_tester' % self.model.identifier)\r\n self._thread.daemon = True\r\n self._thread.start()\r\n\r\n # Pick tensors we need to evaluate\r\n all_tensors = dict(self.model.loss_terms['test'], **self.model.metrics['test'])\r\n self._tensors_to_evaluate = dict([(n, t) for n, t in all_tensors.items()])\r\n loss_terms_to_evaluate = dict([(n, t) for n, t in self.model.loss_terms['test'].items()\r\n if t in self._tensors_to_evaluate.values()])\r\n metrics_to_evaluate = dict([(n, t) for n, t in self.model.metrics['test'].items()\r\n if t in self._tensors_to_evaluate.values()])\r\n\r\n # Placeholders for writing summaries at end of test run\r\n self._placeholders = {}\r\n for type_, tensors in (('loss', loss_terms_to_evaluate),\r\n ('metric', metrics_to_evaluate)):\r\n for name in tensors.keys():\r\n name = '%s/test/%s' % (type_, name)\r\n placeholder = tf.placeholder(dtype=np.float32, name=name + '_placeholder')\r\n self.summary.scalar(name, placeholder)\r\n self._placeholders[name.split('/')[-1]] = placeholder", "def test_output(self):\n new_route = self.route.output(\"test data\", transform=\"transformed\")\n assert new_route != self.route\n assert new_route.route[\"output\"] == \"test data\"\n assert new_route.route[\"transform\"] == \"transformed\"", "def test_main_modular_reuse_model(tmpdir_factory: TempdirFactory) -> None:\n\n output_directory = Path(tmpdir_factory.mktemp('output'))\n\n input_filename = OPEN_API_DATA_PATH / 'modular.yaml'\n output_path = output_directory / 'model'\n\n with freeze_time(TIMESTAMP):\n main(\n [\n '--input',\n str(input_filename),\n '--output',\n str(output_path),\n '--reuse-model',\n ]\n )\n main_modular_dir = EXPECTED_MAIN_PATH / 'main_modular_reuse_model'\n for path in main_modular_dir.rglob('*.py'):\n result = output_path.joinpath(path.relative_to(main_modular_dir)).read_text()\n assert result == path.read_text()", "def output_model(output_dir=\"./output\", model_out=None): \n # Find the path of MODEL_INIT via the parameter file\n par_file = os.path.join(output_dir, \"seisflows_paths.json\")\n with open(par_file) as f:\n model_init = json.load(f)[\"MODEL_INIT\"]\n\n assert(os.path.exists(model_init)), \\\n f\"MODEL_INIT does not exist\\n{model_init}\"\n print(f\"MODEL INIT: {model_init}\")\n\n # Determine the model number, only choose numbers, no 'init' or 'true'\n if model_out is None:\n available_models = glob(os.path.join(output_dir, \"model_[0-9]???\"))\n model_out = sorted(available_models)[-1]\n else:\n model_out = os.path.join(output_dir, model_out)\n\n assert(os.path.exists(model_out)), f\"MODEL_OUT does not exist\\n{model_out}\"\n print(f\"MODEL OUT: {model_out}\")\n\n # Quick check to make sure NPROC is the same for each directory\n nproc_check = [0, 0]\n for i, m in enumerate([model_init, model_out]):\n nprocs = [os.path.basename(_) for _ in glob(os.path.join(m, \"*\"))]\n # list comprehension strips string parts, e.g. 'proc000001_vp.bin' -> 1\n nproc_check[i] = max([int(_.split('_')[0][4:]) for _ in nprocs])\n assert(nproc_check[0] == nproc_check[1]), f\"NPROCS differ {nproc_check}\"\n print(f\"NPROC: {nproc_check[0]}\")\n \n # Symlink all available files that don't already exist in model_out\n model_init_files = glob(os.path.join(model_init, \"*\"))\n for src in model_init_files:\n dst = os.path.join(model_out, os.path.basename(src))\n if os.path.exists(dst):\n continue\n else:\n os.symlink(src, dst)", "def main():\n parser = argparse.ArgumentParser(description='Behavioral Cloning Training Program')\n parser.add_argument('-d', help='data directory', dest='data_dir', type=str, default='data')\n parser.add_argument('-t', help='test size fraction', dest='test_size', type=float, default=0.2)\n parser.add_argument('-k', help='drop out probability', dest='keep_prob', type=float, default=0.5)\n parser.add_argument('-n', help='number of epochs', dest='nb_epoch', type=int, default=5)\n parser.add_argument('-c', help='steering correction', dest='correction', type=float, default=0.2)\n parser.add_argument('-b', help='batch size', dest='batch_size', type=int, default=32)\n parser.add_argument('-o', help='save best models only', dest='save_best_only', type=s2b, default='true')\n parser.add_argument('-l', help='learning rate', dest='learning_rate', type=float, default=1.0e-3)\n args = parser.parse_args()\n\n print('-' * 30)\n print('Parameters')\n print('-' * 30)\n for key, value in vars(args).items():\n print('{:<20} := {}'.format(key, value))\n print('-' * 30)\n\n data = load_data(args)\n model = build_model(args)\n train_model(model, args, *data)", "def test_second_keras_model_created():\n X, _, _, _ = get_data()\n tf.random.set_seed(12345)\n initializer = tf.keras.initializers.Zeros()\n input_data = Input(shape=X[0].shape)\n xx = Dense(128, activation=\"relu\", kernel_initializer=initializer)(input_data)\n xx = Dense(128, activation=\"relu\", kernel_initializer=initializer)(xx)\n xx = Dense(64, activation=\"relu\", kernel_initializer=initializer)(xx)\n output = Dense(n_classes, activation=\"softmax\", kernel_initializer=initializer)(xx)\n _ = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n model2 = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n rightname = \"KerasModel\"\n assert (\n model2.model_type == rightname\n ), \"failed check for second model type being set in init()\"\n # noise multiplier should have been reset from default to one that matches rules.json\n assert model2.noise_multiplier == 0.7", "def test11(self):\n model = self.setup_model02()\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n to_json(model, fname=self.fname, wts=StoreSpec.suffix())\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n\n wts = StoreSpec.suffix(suffix_filter=(\"dual\",))\n from_json(model, fname=self.fname, wts=wts)\n assert model.dual[model.g] == 1\n assert model.ipopt_zL_out[model.x[1]] == 10\n assert model.ipopt_zL_out[model.x[2]] == 10\n assert model.ipopt_zU_out[model.x[1]] == 10\n assert model.ipopt_zU_out[model.x[2]] == 10", "def test_compute_model(self):\n # Set test tolerances (for infinity norm of transfer function\n # difference)\n tf_abs_tol = 1e-6\n tf_rel_tol = 1e-4\n\n # Set time parameters for discrete-time simulation\n dt = 0.1\n num_time_steps = 1000\n\n # Set size of plant and model. For test, don't reduce the system, just\n # check that it comes back close to the original plant. Also, note that\n # using more than 8 states causes poorly conditioned TF coeffs\n # (https://github.com/scipy/scipy/issues/2980)\n num_states_plant = 8\n num_states_model = num_states_plant\n\n # Loop through different numbers of inputs, numbers of outputs, and\n # sampling intervals\n for num_inputs in [1, 3]:\n for num_outputs in [1, 2]:\n for sample_interval in [1, 2, 4]:\n # Define time steps at which to save data. These will be of\n # the form [0, 1, p, p + 1, 2p, 2p + 1, ...] where p is the\n # sample interval.\n time_steps = make_time_steps(\n num_time_steps, sample_interval)\n # # Create a state space system\n # A_plant, B_plant, C_plant = util.drss(\n # num_states_plant, num_inputs, num_outputs)\n A_plant = util.load_array_text(\n join(self.data_dir, 'A_in%d_out%d.txt') % (\n num_inputs, num_outputs))\n B_plant = util.load_array_text(\n join(self.data_dir, 'B_in%d_out%d.txt') % (\n num_inputs, num_outputs))\n C_plant = util.load_array_text(\n join(self.data_dir, 'C_in%d_out%d.txt') % (\n num_inputs, num_outputs))\n\n # Simulate an impulse response using the state space system.\n # This will generate Markov parameters at all timesteps [0,\n # 1, 2, 3, ...]. Only keep data at the desired time steps,\n # which are separated by a sampling interval (see above\n # comment).\n Markovs = util.impulse(\n A_plant, B_plant, C_plant,\n time_steps[-1] + 1)[time_steps]\n\n # Compute a model using ERA\n my_ERA = era.ERA(verbosity=0)\n A_model, B_model, C_model = my_ERA.compute_model(\n Markovs, num_states_model)\n\n # Save ERA model to disk\n A_path_computed = join(self.out_dir, 'A_computed.txt')\n B_path_computed = join(self.out_dir, 'B_computed.txt')\n C_path_computed = join(self.out_dir, 'C_computed.txt')\n my_ERA.put_model(\n A_path_computed, B_path_computed, C_path_computed)\n\n # Check normalized Markovs\n rtol = 1e-5 # 1e-6\n atol = 1e-5 # 1e-10\n Markovs_model = util.impulse(\n A_model, B_model, C_model,\n time_steps[-1] + 1)[time_steps]\n max_Markov = np.amax(Markovs)\n eigs_plant = np.linalg.eig(A_plant)[0]\n eigs_model = np.linalg.eig(A_model)[0]\n # print 'markovs shape', Markovs.shape\n # print 'max plant eig', np.abs(eigs_plant).max()\n # print 'max model eig', np.abs(eigs_model).max()\n # print 'max plant markov', max_Markov\n # print 'max model markov', np.amax(Markovs_model)\n # print 'markov diffs', (\n # Markovs - Markovs_model).squeeze().max()\n\n '''\n import matplotlib.pyplot as plt\n plt.figure()\n plt.semilogy(np.abs(Markovs).squeeze(), 'b')\n plt.semilogy(np.abs(Markovs_model).squeeze(), 'r--')\n plt.axis(\n [0, time_steps[-1], Markovs.min(), Markovs.max()])\n '''\n\n np.testing.assert_allclose(\n Markovs_model.squeeze(),\n Markovs.squeeze(),\n rtol=rtol, atol=atol)\n\n\n # plt.show()\n '''\n # Use Scipy to check that transfer function of ERA model is\n # close to transfer function of full model. Do so by\n # computing the infinity norm (H_inf) of the difference\n # between the transfer functions. Since Scipy can't handle\n # MIMO transfer functions, loop through each input-output\n # pair individually.\n for input_idx in range(num_inputs):\n for output_idx in range(num_outputs):\n\n # Compute transfer functions\n tf_plant = scipy.signal.StateSpace(\n A_plant, B_plant[:, input_idx:input_idx + 1],\n C_plant[output_idx:output_idx + 1, :],\n 0, dt=dt).to_tf()\n tf_model = scipy.signal.StateSpace(\n A_model,\n B_model[:, input_idx:input_idx + 1],\n C_model[output_idx:output_idx + 1, :],\n 0, dt=dt).to_tf()\n tf_diff = util.sub_transfer_functions(\n tf_plant, tf_model, dt=dt)\n\n # Compute transfer function norms\n tf_plant_inf_norm = util.compute_inf_norm_discrete(\n tf_plant, dt)\n tf_diff_inf_norm = util.compute_inf_norm_discrete(\n tf_diff, dt)\n\n # Test values\n print 'err_frac', (\n tf_diff_inf_norm / tf_plant_inf_norm)\n self.assertTrue(\n tf_diff_inf_norm / tf_plant_inf_norm <\n tf_rel_tol)\n '''\n\n # Also test that saved reduced model mats are equal to those\n # returned in memory\n np.testing.assert_equal(\n util.load_array_text(A_path_computed), A_model)\n np.testing.assert_equal(\n util.load_array_text(B_path_computed), B_model)\n np.testing.assert_equal(\n util.load_array_text(C_path_computed), C_model)", "def test_merge_add(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(5)(x1)\n x4 = merge([x2, x3], mode=\"sum\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )", "def test_output_is_counterfactuals(self):\n\n output = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertGreaterEqual(len(output), 1)\n target_prediction = self._predict_and_return_argmax_label(self._example)\n for cf_example in output:\n cf_prediction = self._predict_and_return_argmax_label(cf_example)\n self.assertNotEqual(cf_prediction, target_prediction)", "def test08(self):\n model = self.setup_model02()\n model.x[1].fix(1)\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n to_json(model, fname=self.fname, wts=StoreSpec.suffix())\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n model.g.deactivate()\n model.x[1].setlb(-4)\n model.x[1].unfix()\n model.x[2].fix(6)\n\n from_json(model, fname=self.fname, wts=StoreSpec.suffix())\n assert value(model.x[1]) == 1\n assert value(model.x[2]) == 6\n assert not model.x[1].fixed\n assert model.x[2].fixed\n assert not model.g.active\n assert model.dual[model.g] == 1\n assert model.ipopt_zL_out[model.x[1]] == 1\n assert model.ipopt_zL_out[model.x[2]] == 1\n assert model.ipopt_zU_out[model.x[1]] == 1\n assert model.ipopt_zU_out[model.x[2]] == 1\n assert model.x[1].lb == -4", "def run_tests():\n source1 = TextModel('prep')\n source1.add_file('source_model_1.txt')\n \n source2 = TextModel('athletes')\n source2.add_file('source_model_2.txt')\n\n new1 = TextModel('my_writing')\n new1.add_file('my_writing.txt')\n new1.classify(source1, source2)\n\n # Add code for three other new models below.", "def test_addOutput(self):\n print(\"\\nTest 2: Adding OutputNode\")\n builder = StaticBuilder()\n builder.addInput(10, name=\"In\")\n builder.addInner(3, name=\"Det\")\n o_name = builder.addOutput(name=\"Out\")\n \n o1 = builder.nodes[o_name]\n print(\"\\nNode keys in builder:\", list(builder.nodes.keys()))\n print(\"This node's key:\", o_name)\n self.assertEqual(o1.label, 2, \"The label has not been assigned correctly\")\n self.assertEqual(builder.num_nodes, 3, \"The number of nodes has not been \"\n \"assigned correctly\")\n self.assertEqual(o1.num_declared_outputs, 0, \"The number of outputs of the \"\n \"OutputNode has not been assigned correctly\")\n self.assertEqual(o1.num_declared_inputs, 0, \"The number of inputs of the \"\n \"OutputNode has not been assigned correctly\")", "def test_predictor():", "def test_shared_objects_wrapper(self):\n input_ = keras.Input(shape=(1,))\n unwrapped = keras.layers.Layer(name='unwrapped')\n wrapped = keras.layers.Wrapper(unwrapped, name='wrapped')\n model = keras.Model(inputs=input_,\n outputs=[unwrapped(input_), wrapped(input_)])\n\n # Test recreating directly from config\n config = model.get_config()\n loaded = keras.Model.from_config(config)\n self.assertIs(loaded.layers[1], loaded.layers[2].layer)\n\n # Test saving and loading to disk\n save_format = testing_utils.get_save_format()\n saved_model_dir = self._save_model_dir()\n keras.models.save_model(model, saved_model_dir, save_format=save_format)\n loaded = keras.models.load_model(saved_model_dir)\n self.assertIs(loaded.layers[1], loaded.layers[2].layer)", "def test_save_and_load_model_additional_inputs(tmp_path):\n spectrum_binner, test_generator = get_test_binner_and_generator_additional_inputs()\n # generic retrieval of the input shape of additional inputs\n input, _ = test_generator[0]\n\n spectrum_length = len(input[0][0])\n nr_of_additional_input = len(input[1][0])\n\n model = SiameseModel(spectrum_binner, base_dims=(200, 200, 200), embedding_dim=200, dropout_rate=0.2)\n model.compile(loss='mse', optimizer=AdamOptimizer(learning_rate=0.001))\n model.summary()\n \n assert model.base.layers[2].input_shape == [(None, spectrum_length), (None, nr_of_additional_input)], \\\n \"Concatenate Layer has a false input shape\"\n model.fit(test_generator,\n validation_data=test_generator,\n epochs=2)\n\n # Write to test file\n filename = os.path.join(tmp_path, \"model_export_test_additional_inputs.hdf5\")\n model.save(filename)\n\n # Test if file exists\n assert os.path.isfile(filename)\n\n # Test if content is correct\n model_import = load_model(filename)\n weights_original = model.base.layers[4].get_weights()[0]\n weights_imported = model_import.base.layers[4].get_weights()[0]\n assert np.all(weights_original == weights_imported), \\\n \"Imported and original model weights should be the same\"\n assert model.model.summary() == model_import.model.summary(), \\\n \"Expect same architecture for original and imported model\"\n assert model.spectrum_binner.additional_metadata == (StandardScaler(\"precursor_mz\", mean=0, std=1000), StandardScaler(\"precursor_mz\", mean=0, std=100), )", "def test_13_output(self):\n\n # Now attempt to receive from a repository.\n self.pkgrepo(\"create {0}\".format(self.tempdir))\n self.pkgrecv(self.dpath1, \"-d {0} -n -v \\*\".format(self.tempdir))\n expected = \"\"\"\\\nRetrieving packages (dry-run) ...\n Packages to add: 9\n Files to retrieve: 17\nEstimated transfer size: 528.00 B\n\"\"\"\n self.assert_(expected in self.output, self.output)\n for s in self.published:\n self.assert_(fmri.PkgFmri(s).get_fmri(anarchy=True,\n include_scheme=False) in self.output)\n\n # Clean up for next test.\n shutil.rmtree(self.tempdir)\n\n # Now attempt to receive from a repository to a package archive.\n self.pkgrecv(self.dpath1, \"-a -d {0} -n -v \\*\".format(self.tempdir))\n expected = \"\"\"\\\nArchiving packages (dry-run) ...\n Packages to add: 9\n Files to retrieve: 17\nEstimated transfer size: 528.00 B\n\"\"\"\n self.assert_(expected in self.output, self.output)\n for s in self.published:\n self.assert_(fmri.PkgFmri(s).get_fmri(anarchy=True,\n include_scheme=False) in self.output)\n\n # Now attempt to clone a repository.\n self.pkgrepo(\"create {0}\".format(self.tempdir))\n self.pkgrecv(self.dpath1, \"--clone -d {0} -p \\* -n -v\" \\\n .format(self.tempdir))\n expected = \"\"\"\\\nRetrieving packages (dry-run) ...\n Packages to add: 9\n Files to retrieve: 17\nEstimated transfer size: 528.00 B\n\"\"\"\n self.assert_(expected in self.output, self.output)\n for s in self.published:\n self.assert_(fmri.PkgFmri(s).get_fmri(anarchy=True,\n include_scheme=False) in self.output)\n\n # Test that output is correct if -n is not specified.\n self.pkgrecv(self.dpath1, \"-d {0} -v \\*\".format(self.tempdir))\n self.assert_(\"dry-run\" not in self.output)", "def test_trainer(testsetting, w2vmodel, tweets, targets, labels, ids, tweets_test, targets_test, labels_test, ids_test, hidden_size, max_epochs, tanhOrSoftmax, dropout, modeltype=\"conditional\", targetInTweet={}, testid = \"test-1\", pretrain = \"pre_cont\", acc_thresh=0.9, sep = False):\n\n # parameters\n learning_rate = 0.0001\n batch_size = 70\n input_size = 100\n\n outfolder = \"_\".join([testid, modeltype, testsetting, \"hidden-\" + str(hidden_size), tanhOrSoftmax])\n\n # real data stance-semeval\n target_size = 3\n max_seq_length = len(tweets[0])\n if modeltype == \"conditional-reverse\":\n data = [np.asarray(targets), np.asarray(tweets), np.asarray(ids), np.asarray(labels)]\n else:\n data = [np.asarray(tweets), np.asarray(targets), np.asarray(ids), np.asarray(labels)]\n\n X = w2vmodel.syn0\n vocab_size = len(w2vmodel.vocab)\n\n if modeltype == \"concat\":\n model, placeholders = get_model_concat(batch_size, max_seq_length, input_size,\n hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"tweetonly\":\n model, placeholders = get_model_tweetonly(batch_size, max_seq_length, input_size,\n hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout)\n data = [np.asarray(tweets), np.asarray(ids), np.asarray(labels)]\n elif modeltype == \"conditional\" or modeltype == \"conditional-reverse\":\n # output of get_model(): model, [inputs, inputs_cond]\n model, placeholders = get_model_conditional(batch_size, max_seq_length, input_size,\n hidden_size, target_size, vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"bicond\":\n model, placeholders = get_model_bidirectional_conditioning(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"conditional-target-feed\":\n model, placeholders = get_model_conditional_target_feed(batch_size, max_seq_length, input_size, hidden_size,\n target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout)\n elif modeltype == \"bicond-sepembed\":\n model, placeholders = get_model_bicond_sepembed(batch_size, max_seq_length, input_size, hidden_size,\n target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout)\n sep = True\n\n ids = tf.placeholder(tf.float32, [batch_size, 1], \"ids\") #ids are so that the dev/test samples can be recovered later since we shuffle\n targets = tf.placeholder(tf.float32, [batch_size, target_size], \"targets\")\n\n\n loss = tf.nn.softmax_cross_entropy_with_logits(model, targets) # targets: labels (e.g. pos/neg/neutral)\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n\n batcher = BatchBucketSampler(data, batch_size)\n acc_batcher = BatchBucketSampler(data, batch_size)\n\n placeholders += [ids]\n placeholders += [targets]\n\n pad_nr = batch_size - (\n len(labels_test) % batch_size) + 1 # since train/test batches need to be the same size, add padding for test\n\n # prepare the testing data. Needs to be padded to fit the batch size.\n if modeltype == \"tweetonly\":\n data_test = [np.lib.pad(np.asarray(tweets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(ids_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(labels_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0))\n ]\n elif modeltype == \"conditional-reverse\":\n data_test = [np.lib.pad(np.asarray(targets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(tweets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(ids_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(labels_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0))\n ]\n else:\n data_test = [np.lib.pad(np.asarray(tweets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(targets_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(ids_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0)),\n np.lib.pad(np.asarray(labels_test), ((0, pad_nr), (0, 0)), 'constant', constant_values=(0))\n ]\n\n corpus_test_batch = BatchBucketSampler(data_test, batch_size)\n\n\n with tf.Session() as sess:\n summary_writer = tf.train.SummaryWriter(\"./out/save\", graph_def=sess.graph_def)\n\n hooks = [\n SpeedHook(summary_writer, iteration_interval=50, batch_size=batch_size),\n SaveModelHookDev(path=\"../out/save/\" + outfolder, at_every_epoch=1),\n SemEvalHook(corpus_test_batch, placeholders, 1),\n LossHook(summary_writer, iteration_interval=50),\n AccuracyHook(summary_writer, acc_batcher, placeholders, 2),\n AccuracyHookIgnoreNeutral(summary_writer, acc_batcher, placeholders, 2)\n ]\n\n trainer = Trainer(optimizer, max_epochs, hooks)\n epoch = trainer(batcher=batcher, acc_thresh=acc_thresh, pretrain=pretrain, embedd=X, placeholders=placeholders,\n loss=loss, model=model, sep=sep)\n\n print(\"Applying to test data, getting predictions for NONE/AGAINST/FAVOR\")\n\n predictions_detailed_all = []\n predictions_all = []\n ids_all = []\n\n load_model_dev(sess, \"../out/save/\" + outfolder + \"_ep\" + str(epoch), \"model.tf\")\n\n total = 0\n correct = 0\n for values in corpus_test_batch:\n total += len(values[-1])\n feed_dict = {}\n for i in range(0, len(placeholders)):\n feed_dict[placeholders[i]] = values[i]\n truth = np.argmax(values[-1], 1) # values[2] is a 3-length one-hot vector containing the labels\n if pretrain == \"pre\" and sep == True: # this is a bit hacky. To do: improve\n vars = tf.all_variables()\n emb_var = vars[0]\n emb_var2 = vars[1]\n sess.run(emb_var.assign(X))\n sess.run(emb_var2.assign(X))\n if pretrain == \"pre\": # this is a bit hacky. To do: improve\n vars = tf.all_variables()\n emb_var = vars[0]\n sess.run(emb_var.assign(X))\n predictions = sess.run(tf.nn.softmax(model), feed_dict=feed_dict)\n predictions_detailed_all.extend(predictions)\n ids_all.extend(values[-2])\n predicted = sess.run(tf.arg_max(tf.nn.softmax(model), 1),\n feed_dict=feed_dict)\n predictions_all.extend(predicted)\n correct += sum(truth == predicted)\n\n print(\"Num testing samples \" + str(total) +\n \"\\tAcc \" + str(float(correct)/total) +\n \"\\tCorrect \" + str(correct) + \"\\tTotal \" + str(total))\n\n\n # postprocessing\n if targetInTweet != {}:\n\n predictions_new = []\n ids_new = []\n it = 0\n for pred_prob in predictions_detailed_all:\n id = ids_all[it]\n if id == 0.0:\n it += 1\n continue\n inTwe = targetInTweet[id.tolist()[0]]\n if inTwe == True: #and (pred_prob[2] > 0.1 or pred_prob[1] > 0.1): #NONE/AGAINST/FAVOUR\n #print(str(id), \"inTwe!\")\n pred = 1\n if pred_prob[2] > pred_prob[1]:\n pred = 2\n predictions_new.append(pred)\n else:\n plist = pred_prob.tolist()\n pred = plist.index(max(plist))\n predictions_new.append(pred)\n it += 1\n ids_new.append(id)\n return predictions_new, predictions_detailed_all, ids_new\n\n return predictions_all, predictions_detailed_all, ids_all", "def test_pregenerated_model(sub_test, case):\n\n if case.startswith(\"sensi2\"):\n model_name = sub_test + \"_o2\"\n else:\n model_name = sub_test\n\n model_swig_folder = str(\n Path(__file__).parents[2]\n / \"build\"\n / \"tests\"\n / \"cpp\"\n / f\"external_{model_name}-prefix\"\n / \"src\"\n / f\"external_{model_name}-build\"\n / \"swig\"\n )\n\n test_model_module = amici.import_model_module(\n module_name=model_name, module_path=model_swig_folder\n )\n model = test_model_module.getModel()\n solver = model.getSolver()\n amici.readModelDataFromHDF5(\n options_file, model.get(), f\"/{sub_test}/{case}/options\"\n )\n amici.readSolverSettingsFromHDF5(\n options_file, solver.get(), f\"/{sub_test}/{case}/options\"\n )\n\n edata = None\n if \"data\" in expected_results[sub_test][case].keys():\n edata = amici.readSimulationExpData(\n str(expected_results_file), f\"/{sub_test}/{case}/data\", model.get()\n )\n rdata = amici.runAmiciSimulation(model, solver, edata)\n\n check_derivative_opts = dict()\n\n if model_name == \"model_nested_events\":\n check_derivative_opts[\"rtol\"] = 1e-2\n elif model_name == \"model_events\":\n check_derivative_opts[\"atol\"] = 1e-3\n\n if (\n edata\n and solver.getSensitivityMethod()\n and solver.getSensitivityOrder()\n and len(model.getParameterList())\n and not model_name.startswith(\"model_neuron\")\n and not case.endswith(\"byhandpreeq\")\n ):\n check_derivatives(model, solver, edata, **check_derivative_opts)\n\n verify_simulation_opts = dict()\n\n if model_name.startswith(\"model_neuron\"):\n verify_simulation_opts[\"atol\"] = 1e-5\n verify_simulation_opts[\"rtol\"] = 1e-2\n\n if model_name.startswith(\"model_robertson\") and case == \"sensiforwardSPBCG\":\n verify_simulation_opts[\"atol\"] = 1e-3\n verify_simulation_opts[\"rtol\"] = 1e-3\n\n verify_simulation_results(\n rdata, expected_results[sub_test][case][\"results\"], **verify_simulation_opts\n )\n\n if model_name == \"model_steadystate\" and case == \"sensiforwarderrorint\":\n edata = amici.amici.ExpData(model.get())\n\n # Test runAmiciSimulations: ensure running twice\n # with same ExpData yields same results\n if (\n edata\n and model_name != \"model_neuron_o2\"\n and not (model_name == \"model_robertson\" and case == \"sensiforwardSPBCG\")\n ):\n if isinstance(edata, amici.amici.ExpData):\n edatas = [edata, edata]\n else:\n edatas = [edata.get(), edata.get()]\n\n rdatas = amici.runAmiciSimulations(\n model, solver, edatas, num_threads=2, failfast=False\n )\n verify_simulation_results(\n rdatas[0],\n expected_results[sub_test][case][\"results\"],\n **verify_simulation_opts,\n )\n verify_simulation_results(\n rdatas[1],\n expected_results[sub_test][case][\"results\"],\n **verify_simulation_opts,\n )\n\n # test residuals mode\n if solver.getSensitivityMethod() == amici.SensitivityMethod.adjoint:\n with pytest.raises(RuntimeError):\n solver.setReturnDataReportingMode(amici.RDataReporting.residuals)\n else:\n solver.setReturnDataReportingMode(amici.RDataReporting.residuals)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n verify_simulation_results(\n rdata,\n expected_results[sub_test][case][\"results\"],\n fields=[\"t\", \"res\", \"sres\", \"y\", \"sy\", \"sigmay\", \"ssigmay\"],\n **verify_simulation_opts,\n )\n with pytest.raises(RuntimeError):\n solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n\n chi2_ref = rdata.chi2\n\n # test likelihood mode\n solver.setReturnDataReportingMode(amici.RDataReporting.likelihood)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n verify_simulation_results(\n rdata,\n expected_results[sub_test][case][\"results\"],\n fields=[\"t\", \"llh\", \"sllh\", \"s2llh\", \"FIM\"],\n **verify_simulation_opts,\n )\n\n # test sigma residuals\n\n if (\n model_name == \"model_jakstat_adjoint\"\n and solver.getSensitivityMethod() != amici.SensitivityMethod.adjoint\n ):\n model.setAddSigmaResiduals(True)\n solver.setReturnDataReportingMode(amici.RDataReporting.full)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether activation changes chi2\n assert chi2_ref != rdata.chi2\n\n if (\n edata\n and solver.getSensitivityMethod()\n and solver.getSensitivityOrder()\n and len(model.getParameterList())\n ):\n check_derivatives(model, solver, edata, **check_derivative_opts)\n\n chi2_ref = rdata.chi2\n res_ref = rdata.res\n\n model.setMinimumSigmaResiduals(100)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether changing the minimum changes res but not chi2\n assert np.isclose(chi2_ref, rdata.chi2)\n assert not np.allclose(res_ref, rdata.res)\n\n model.setMinimumSigmaResiduals(-10)\n rdata = amici.runAmiciSimulation(model, solver, edata)\n # check whether having a bad minimum results in nan chi2\n assert np.isnan(rdata.chi2)\n\n with pytest.raises(RuntimeError):\n model.getParameterByName(\"thisParameterDoesNotExist\")", "def test_modelgroup00101m2_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/MGroupDef/modelGroup/modelGroup00101m/modelGroup00101m2.xsd\",\n instance=\"sunData/MGroupDef/modelGroup/modelGroup00101m/modelGroup00101m2_p.xml\",\n class_name=\"A\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_save_load(self):\n features = np.array([[0, 0], [0.1, 0.1], [0.4, 0.4], [1, 1]])\n labels = np.array([0, 0.1, 0.4, 1])\n num_inputs = 2\n qnn = TwoLayerQNN(\n num_inputs,\n feature_map=ZZFeatureMap(num_inputs),\n ansatz=RealAmplitudes(num_inputs),\n observable=PauliSumOp.from_list([(\"Z\" * num_inputs, 1)]),\n quantum_instance=self.qasm_quantum_instance,\n )\n regressor = NeuralNetworkRegressor(qnn, optimizer=COBYLA())\n regressor.fit(features, labels)\n\n # predicted labels from the newly trained model\n test_features = np.array([[0.5, 0.5]])\n original_predicts = regressor.predict(test_features)\n\n # save/load, change the quantum instance and check if predicted values are the same\n with tempfile.TemporaryDirectory() as dir_name:\n file_name = os.path.join(dir_name, \"regressor.model\")\n regressor.save(file_name)\n\n regressor_load = NeuralNetworkRegressor.load(file_name)\n loaded_model_predicts = regressor_load.predict(test_features)\n\n np.testing.assert_array_almost_equal(original_predicts, loaded_model_predicts)\n\n # test loading warning\n class FakeModel(SerializableModelMixin):\n \"\"\"Fake model class for test purposes.\"\"\"\n\n pass\n\n with self.assertRaises(TypeError):\n FakeModel.load(file_name)", "def test_same_models_are_equal(dbdiskrepo):\n fit1 = fit_model()\n fit2 = fit_model()\n assert fit1.artifact.id == fit2.artifact.id\n assert fit1.artifact.value_id == fit2.artifact.value_id\n assert hash(fit1) == hash(fit2)", "def model_output(model, t, s, i):\n return 0, 0, 0, 0", "def test01_name(self):\n model = self.setup_model01(\"m1\")\n model2 = self.setup_model01(\"m2\")\n\n model2.b[1].a = 0.11\n model2.b[1].b = 0.11\n model2.x = False\n to_json(model, fname=self.fname, human_read=True)\n from_json(model2, fname=self.fname)\n # make sure they are right\n assert pytest.approx(20) == value(model2.b[1].b)\n assert pytest.approx(2) == value(model2.b[1].a)\n assert value(model2.x) == True", "def generate_outcomes(outcome_model, feature_model, X_train, X_test, sigma_outcome):\n number_dimensions = X_train.shape[1]\n number_training_obeservations = X_train.shape[0]\n number_testing_obeservations = X_test.shape[0]\n\n poly_coeff_control_linear = np.random.uniform(0,1,(number_dimensions + 1,1))\n poly_coeff_treatment_linear = np.random.uniform(0,1,(number_dimensions + 1,1))\n \n ones = np.ones((number_training_obeservations,1))\n mean_train_control = np.dot(np.concatenate((ones, X_train), axis = 1) , poly_coeff_control_linear)\n mean_train_treatment = np.dot(np.concatenate((ones, X_train), axis = 1) , poly_coeff_treatment_linear) \n\n if outcome_model == \"quadratic\":\n mean_train_control_quad = np.zeros((number_training_obeservations, 1))\n mean_train_treatment_quad = np.zeros((number_training_obeservations, 1))\n\n poly_coeff_control_quad = np.random.uniform(0,1,(number_dimensions,number_dimensions))\n poly_coeff_treatment_quad = np.random.uniform(0,1,(number_dimensions,number_dimensions))\n\n for i in range(number_training_obeservations):\n mean_train_control_quad[i,0] = X_train[i,:] @ poly_coeff_control_quad @ X_train[i,:].T\n mean_train_treatment_quad[i,0] = X_train[i,:] @ poly_coeff_treatment_quad @ X_train[i,:].T\n\n mean_train_control += mean_train_control_quad\n mean_train_treatment += mean_train_treatment_quad\n\n\n sigma_train_potential_outcome = sigma_outcome\n train_potential_outcome_control = np.random.normal(mean_train_control, sigma_train_potential_outcome)\n train_potential_outcome_treatment = np.random.normal(mean_train_treatment, sigma_train_potential_outcome)\n train_potential_outcome = np.concatenate((train_potential_outcome_control,train_potential_outcome_treatment),axis=1)\n\n ones = np.ones((number_testing_obeservations,1))\n mean_test_control = np.dot(np.concatenate((ones, X_test), axis = 1) , poly_coeff_control_linear)\n mean_test_treatment = np.dot(np.concatenate((ones, X_test), axis = 1) , poly_coeff_treatment_linear)\n\n if outcome_model == \"quadratic\":\n mean_test_control_quad = np.zeros((number_testing_obeservations, 1))\n mean_test_treatment_quad = np.zeros((number_testing_obeservations, 1))\n\n for i in range(number_testing_obeservations):\n mean_test_control_quad[i,0] = X_test[i,:] @ poly_coeff_control_quad @ X_test[i,:].T\n mean_test_treatment_quad[i,0] = X_test[i,:] @ poly_coeff_treatment_quad @ X_test[i,:].T\n\n mean_test_control += mean_test_control_quad\n mean_test_treatment += mean_test_treatment_quad\n\n sigma_test_potential_outcome = sigma_outcome\n test_potential_outcome_control = np.random.normal(mean_test_control, sigma_test_potential_outcome)\n test_potential_outcome_treatment = np.random.normal(mean_test_treatment, sigma_test_potential_outcome)\n test_potential_outcome = np.concatenate((test_potential_outcome_control,test_potential_outcome_treatment),axis=1)\n\n return train_potential_outcome, test_potential_outcome", "def copy_inception(sess, model):\n\n print('Copying first layers ...')\n copy_conv(sess, 'conv', model.conv)\n copy_bn(sess, 'conv/batchnorm', model.bn_conv)\n copy_conv(sess, 'conv_1', model.conv_1)\n copy_bn(sess, 'conv_1/batchnorm', model.bn_conv_1)\n copy_conv(sess, 'conv_2', model.conv_2)\n copy_bn(sess, 'conv_2/batchnorm', model.bn_conv_2)\n copy_conv(sess, 'conv_3', model.conv_3)\n copy_bn(sess, 'conv_3/batchnorm', model.bn_conv_3)\n copy_conv(sess, 'conv_4', model.conv_4)\n copy_bn(sess, 'conv_4/batchnorm', model.bn_conv_4)\n\n for m in ['mixed', 'mixed_1', 'mixed_2']:\n print('Copying ', m, '...')\n copy_conv(sess, '{}/conv'.format(m), getattr(model, m).conv.conv)\n copy_bn(sess, '{}/conv/batchnorm'.format(m), getattr(model, m).conv.bn_conv)\n\n for t in ['tower', 'tower_1', 'tower_2']:\n copy_conv(sess, '{}/{}/conv'.format(m, t), getattr(getattr(model, m), t).conv)\n copy_bn(sess, '{}/{}/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv)\n\n if t == 'tower' or t == 'tower_1':\n copy_conv(sess, '{}/{}/conv_1'.format(m, t), getattr(getattr(model, m), t).conv_1)\n copy_bn(sess, '{}/{}/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_1)\n\n if t == 'tower_1':\n copy_conv(sess, '{}/{}/conv_2'.format(m, t), getattr(getattr(model, m), t).conv_2)\n copy_bn(sess, '{}/{}/conv_2/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_2)\n\n for m in ['mixed_3']:\n print('Copying ', m, '...')\n copy_conv(sess, '{}/conv'.format(m), getattr(model, m).conv.conv)\n copy_bn(sess, '{}/conv/batchnorm'.format(m), getattr(model, m).conv.bn_conv)\n\n for t in ['tower']:\n copy_conv(sess, '{}/{}/conv'.format(m, t), getattr(getattr(model, m), t).conv)\n copy_bn(sess, '{}/{}/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv)\n copy_conv(sess, '{}/{}/conv_1'.format(m, t), getattr(getattr(model, m), t).conv_1)\n copy_bn(sess, '{}/{}/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_1)\n copy_conv(sess, '{}/{}/conv_2'.format(m, t), getattr(getattr(model, m), t).conv_2)\n copy_bn(sess, '{}/{}/conv_2/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_2)\n\n for m in ['mixed_4', 'mixed_5', 'mixed_6', 'mixed_7']:\n print('Copying ', m, '...')\n copy_conv(sess, '{}/conv'.format(m), getattr(model, m).conv.conv)\n copy_bn(sess, '{}/conv/batchnorm'.format(m), getattr(model, m).conv.bn_conv)\n\n for t in ['tower', 'tower_1', 'tower_2']:\n copy_conv(sess, '{}/{}/conv'.format(m, t), getattr(getattr(model, m), t).conv)\n copy_bn(sess, '{}/{}/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv)\n\n if t == 'tower' or t == 'tower_1':\n copy_conv(sess, '{}/{}/conv_1'.format(m, t), getattr(getattr(model, m), t).conv_1)\n copy_bn(sess, '{}/{}/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_1)\n copy_conv(sess, '{}/{}/conv_2'.format(m, t), getattr(getattr(model, m), t).conv_2)\n copy_bn(sess, '{}/{}/conv_2/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_2)\n\n if t == 'tower_1':\n copy_conv(sess, '{}/{}/conv_3'.format(m, t), getattr(getattr(model, m), t).conv_3)\n copy_bn(sess, '{}/{}/conv_3/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_3)\n copy_conv(sess, '{}/{}/conv_4'.format(m, t), getattr(getattr(model, m), t).conv_4)\n copy_bn(sess, '{}/{}/conv_4/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_4)\n\n for m in ['mixed_8']:\n print('Copying ', m, '...')\n for t in ['tower', 'tower_1']:\n copy_conv(sess, '{}/{}/conv'.format(m, t), getattr(getattr(model, m), t).conv)\n copy_bn(sess, '{}/{}/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv)\n copy_conv(sess, '{}/{}/conv_1'.format(m, t), getattr(getattr(model, m), t).conv_1)\n copy_bn(sess, '{}/{}/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_1)\n\n if t == 'tower_1':\n copy_conv(sess, '{}/{}/conv_2'.format(m, t), getattr(getattr(model, m), t).conv_2)\n copy_bn(sess, '{}/{}/conv_2/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_2)\n copy_conv(sess, '{}/{}/conv_3'.format(m, t), getattr(getattr(model, m), t).conv_3)\n copy_bn(sess, '{}/{}/conv_3/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_3)\n\n for m in ['mixed_9', 'mixed_10']:\n print('Copying ', m, '...')\n copy_conv(sess, '{}/conv'.format(m), getattr(model, m).conv.conv)\n copy_bn(sess, '{}/conv/batchnorm'.format(m), getattr(model, m).conv.bn_conv)\n\n for t in ['tower', 'tower_1', 'tower_2']:\n copy_conv(sess, '{}/{}/conv'.format(m, t), getattr(getattr(model, m), t).conv)\n copy_bn(sess, '{}/{}/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv)\n\n if t == 'tower' or t == 'tower_1':\n copy_conv(sess, '{}/{}/mixed/conv'.format(m, t), getattr(getattr(model, m), t).mixed.conv.conv)\n copy_bn(sess, '{}/{}/mixed/conv/batchnorm'.format(m, t), getattr(getattr(model, m), t).mixed.conv.bn_conv)\n copy_conv(sess, '{}/{}/mixed/conv_1'.format(m, t), getattr(getattr(model, m), t).mixed.conv_1.conv_1)\n copy_bn(sess, '{}/{}/mixed/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).mixed.conv_1.bn_conv_1)\n\n if t == 'tower_1':\n copy_conv(sess, '{}/{}/conv_1'.format(m, t), getattr(getattr(model, m), t).conv_1)\n copy_bn(sess, '{}/{}/conv_1/batchnorm'.format(m, t), getattr(getattr(model, m), t).bn_conv_1)\n\n print('Copying logit...')\n w = sess.graph.get_operation_by_name(\"softmax/logits/MatMul\").inputs[1].eval()\n b = sess.graph.get_tensor_by_name(\"softmax/biases:0\").eval()\n\n assert w.T.shape == model.logit.W.shape\n assert b.shape == model.logit.b.shape\n\n model.logit.W.data = w.T\n model.logit.b.data = b", "def test_input_mutations(self):\n local_sf = copy.copy(self.sf)\n local_dist = copy.deepcopy(self.distance)\n local_radius = copy.deepcopy(self.radius)\n local_min_core_neighbors = copy.deepcopy(self.min_core_neighbors)\n\n local_model = tc.dbscan.create(\n self.sf,\n distance=self.distance,\n radius=self.radius,\n min_core_neighbors=self.min_core_neighbors,\n verbose=False,\n )\n\n assert_sframe_equal(self.sf, local_sf)\n self.assertEqual(self.distance, local_dist)\n self.assertEqual(self.radius, local_radius)\n self.assertEqual(self.min_core_neighbors, local_min_core_neighbors)", "def build_model(model_id1='bert-base-multilingual-cased',\n model_id2='bert-base-multilingual-uncased',\n max_len=192, dropout=0.2,\n **_):\n print(model_id1, model_id2)\n\n transformer1 = TFAutoModel.from_pretrained(model_id1)\n transformer2 = TFAutoModel.from_pretrained(model_id2)\n\n input_word_ids1 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids1\")\n out1 = transformer1(input_word_ids1)\n\n input_word_ids2 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids2\")\n out2 = transformer2(input_word_ids2)\n\n sequence_output1 = out1[0]\n sequence_output2 = out2[0]\n cls_token1 = sequence_output1[:, 0, :]\n cls_token2 = sequence_output2[:, 0, :]\n\n x = Dropout(dropout)(cls_token1) + Dropout(dropout)(cls_token2)\n out = Dense(1, activation='sigmoid')(x)\n\n model = Model(inputs=[input_word_ids1, input_word_ids2], outputs=out)\n\n return model", "def build_nn_experimental(dropout: float=0.3, verbosity: int=0):\n # Setting Up Input layer\n input_q1 = Input(shape=(512,))\n input_q2 = Input(shape=(512,))\n \n # Network for 1st input Dense 128 --> Relu --> Dense 264 --> Relu\n input1_layer = Dense(512, activation='relu')(input_q1)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Dense(512, activation='relu')(input1_layer)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Model(inputs=input_q1, outputs=input1_layer)\n \n # Network for 2st input Dense 128 --> Relu --> Dense 264 --> Relu\n input2_layer = Dense(512, activation='relu')(input_q2)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Dense(512, activation='relu')(input2_layer)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Model(inputs=input_q2, outputs=input2_layer)\n \n merged = concatenate([input1_layer.output, input2_layer.output])\n\n # Fully connected layer & final prediction layer\n pred_layer = Dense(4096, activation='relu')(merged)\n pred_layer = Dense(1024, activation='relu')(pred_layer)\n pred_layer = Dense(256, activation='relu')(pred_layer)\n pred_layer = Dense(64, activation='relu')(pred_layer)\n pred_layer = Dropout(dropout)(pred_layer)\n \n pred_layer = Dense(1, activation='sigmoid')(pred_layer)\n \n model = Model(inputs=[input1_layer.input, input2_layer.input], outputs=pred_layer)\n if verbosity > 0:\n model.summary()\n return model", "def test_ignored_output(self):\n process_group = self._get_process_group()\n\n class IgnoredOutput(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n return F.softmax(x, dim=1)\n\n model = DistributedDataParallel(\n IgnoredOutput().float(),\n process_group=process_group,\n )\n\n batch_size = 4\n criterion = nn.CrossEntropyLoss()\n input = torch.rand([batch_size, 2], dtype=torch.float)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])\n\n # Run a few iterations where we ignore the output.\n for _ in range(4):\n output = model(input)\n del output\n\n # Run a few iterations where we use the output.\n for _ in range(4):\n output = model(input)\n loss = criterion(output, target)\n loss.backward()", "def test_part_2(arguments, distance, output):\n assert part_2.solution(arguments, distance) == output", "def test_custom_model_fn(self):\n mock_estimator = tf.test.mock.MagicMock(spec=tf.estimator.Estimator)\n mock_model_fn = mock_estimator.model_fn()\n # Set fake values to DNNLinearCombinedClassifier prediction output\n predict_output_values = {\n 'id_col': tf.constant([[1]]),\n 'class_ids': tf.constant([[1]]),\n 'classes': tf.constant([[1]]),\n 'logistic': tf.constant([[0.3]]),\n 'logits': tf.constant([[0.2]]),\n 'probabilities': tf.constant([[0.5], [1.0]])\n }\n mock_export_output = {\n 'predict': tf.test.mock.ANY,\n 'serving_default': tf.test.mock.ANY\n }\n type(mock_model_fn).predictions = mock.PropertyMock(\n return_value=predict_output_values)\n type(mock_model_fn).export_outputs = mock.PropertyMock(\n return_value=mock_export_output)\n custom_model_fn = model.custom_model_fn(mock_estimator)\n estimator_spec = custom_model_fn(None, None, tf.estimator.ModeKeys.TRAIN)\n\n actual_predict = estimator_spec.export_outputs['predict']\n actual_serving = estimator_spec.export_outputs['serving_default']\n\n # Assert that both exported `predict` and `serving_default` classes\n # are instances of Tensorflow export PredictOutput class\n self.assertIsInstance(actual_predict, tf.estimator.export.PredictOutput)\n self.assertIsInstance(actual_serving, tf.estimator.export.PredictOutput)\n\n # Assert that export_output have correct lengths\n self.assertEqual(len(actual_predict.outputs), len(predict_output_values))\n self.assertEqual(len(actual_serving.outputs), 3)", "def test_make_update_script_for_equal_models(self):\n\n self.setup_model_params()\n self.write_file(self.first_model_path, self.base_source + self.model_source)\n self.write_file(self.second_model_path, self.base_source + self.model_source)\n\n source_script = self.pyscript.make_update_script_for_model(\n engine=self.engine,\n oldmodel=load_model('testmodel_first:meta'),\n model=load_model('testmodel_second:meta'),\n repository=self.repo_path,\n )\n\n self.assertFalse('User.create()' in source_script)\n self.assertFalse('User.drop()' in source_script)", "def test_different_seeds_result_in_different_models(dbdiskrepo):\n fit1 = fit_model(seed=0)\n fit2 = fit_model(seed=1)\n\n assert p.hash(fit1) != p.hash(fit2)\n assert fit1.artifact.id != fit2.artifact.id\n assert fit1.artifact.value_id != fit2.artifact.value_id", "def testsvm_step2(self, models_name, sample_name, output_name):\n args = self.get_file_args(models_name)\n args += self.get_commonlib()\n\n args += \" -outputformat text \"\n\n self.mapreduce_core(sample_name=sample_name,\n output_name=output_name,\n exe_file=self.exe_testsvm2,\n is_cat=False,\n args=args)", "def test_data_manipulation(self):\n target_name = self.project['target']['name']\n self.api_mock.return_value.get_metadata.return_value = [\n {'_id': '0',\n 'pid': '1',\n 'created': datetime.datetime.now(),\n 'name':'universe',\n 'originalName': 'credit-sample-200.csv',\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '1',\n 'pid': '1',\n 'name':'test',\n 'originalName': 'credit-sample-200.csv',\n 'created': datetime.datetime.now(),\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '2',\n 'pid': '1',\n 'name':'new',\n 'created': datetime.datetime.now(),\n 'originalName': 'credit-sample-200.csv',\n 'newdata':True,\n 'controls':{},\n 'shape': [2, 100],\n 'varTypeString': 'NN',\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}}]\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1',\n 'command': 'fit', 'max_reps': 0,\n 'samplepct': 100})\n\n #target\n #this will map the target values to (0,1) because target type is Binary\n target_vector = self.dataprocessor.target_vector()\n target_series = target_vector['main']\n self.assertItemsEqual(np.unique(target_series), [0,1])\n\n #this will be none because 'holdout_pct' isn't set in the project data\n self.assertIsNone(target_vector['holdout'])\n\n #prediction dataset\n predictors = self.dataprocessor.predictors()\n pred_dataframe = predictors['1']['main']\n self.assertItemsEqual(list(pred_dataframe.columns), [\"age\"])\n self.assertEqual(self.dataprocessor.get_vartypestring_without_target('1'), \"N\")\n\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1', 'scoring_dataset_id': '2', 'command': 'predict', 'max_reps': 0, 'samplepct':100})\n dp2 = DataProcessor(request)\n data = dp2.request_datasets()\n self.assertEqual(data.keys(), ['1'])\n self.assertEqual(data['1'].keys(), ['scoring', 'vartypes'])\n scoring_data = data['1']['scoring']\n vartypes = data['1']['vartypes']\n self.assertEqual(list(scoring_data.columns), [\"age\"])\n self.assertEqual(vartypes, \"N\")", "def compute_output(api, args):\n source = None\n dataset = None\n model = None\n models = None\n fields = None\n other_label = OTHER\n ensemble_ids = []\n multi_label_data = None\n multi_label_fields = []\n local_ensemble = None\n test_dataset = None\n datasets = None\n\n # variables from command-line options\n resume = args.resume_\n model_ids = args.model_ids_\n output = args.predictions\n dataset_fields = args.dataset_fields_\n\n # It is compulsory to have a description to publish either datasets or\n # models\n if (not args.description_ and\n (args.black_box or args.white_box or args.public_dataset)):\n sys.exit(\"You should provide a description to publish.\")\n\n # When using --max-categories, it is compulsory to specify also the\n # objective_field\n if args.max_categories > 0 and args.objective_field is None:\n sys.exit(\"When --max-categories is used, you must also provide the\"\n \" --objective field name or column number\")\n\n # When using --new-fields, it is compulsory to specify also a dataset\n # id\n if args.new_fields and not args.dataset:\n sys.exit(\"To use --new-fields you must also provide a dataset id\"\n \" to generate the new dataset from it.\")\n\n path = u.check_dir(output)\n session_file = \"%s%s%s\" % (path, os.sep, SESSIONS_LOG)\n csv_properties = {}\n # If logging is required set the file for logging\n log = None\n if args.log_file:\n u.check_dir(args.log_file)\n log = args.log_file\n # If --clear_logs the log files are cleared\n clear_log_files([log])\n\n # labels to be used in multi-label expansion\n labels = (None if args.labels is None else\n [label.strip() for label in\n args.labels.split(args.args_separator)])\n if labels is not None:\n labels = sorted([label for label in labels])\n\n # multi_label file must be preprocessed to obtain a new extended file\n if args.multi_label and args.training_set is not None:\n (args.training_set, multi_label_data) = ps.multi_label_expansion(\n args.training_set, args.train_header, args, path,\n labels=labels, session_file=session_file)\n args.train_header = True\n args.objective_field = multi_label_data[\"objective_name\"]\n all_labels = l.get_all_labels(multi_label_data)\n if not labels:\n labels = all_labels\n else:\n all_labels = labels\n if args.source_file:\n # source is retrieved from the contents of the given local JSON file\n source, csv_properties, fields = u.read_local_resource(\n args.source_file,\n csv_properties=csv_properties)\n else:\n # source is retrieved from the remote object\n source, resume, csv_properties, fields = ps.source_processing(\n api, args, resume,\n csv_properties=csv_properties, multi_label_data=multi_label_data,\n session_file=session_file, path=path, log=log)\n if args.multi_label and source:\n multi_label_data = l.get_multi_label_data(source)\n (args.objective_field,\n labels,\n all_labels,\n multi_label_fields) = l.multi_label_sync(args.objective_field,\n labels,\n multi_label_data,\n fields,\n multi_label_fields)\n\n if args.dataset_file:\n # dataset is retrieved from the contents of the given local JSON file\n model_dataset, csv_properties, fields = u.read_local_resource(\n args.dataset_file,\n csv_properties=csv_properties)\n if not args.datasets:\n datasets = [model_dataset]\n dataset = model_dataset\n else:\n datasets = u.read_datasets(args.datasets)\n if not datasets:\n # dataset is retrieved from the remote object\n datasets, resume, csv_properties, fields = pd.dataset_processing(\n source, api, args, resume,\n fields=fields,\n csv_properties=csv_properties,\n multi_label_data=multi_label_data,\n session_file=session_file, path=path, log=log)\n if datasets:\n dataset = datasets[0]\n if args.to_csv is not None:\n resume = pd.export_dataset(dataset, api, args, resume,\n session_file=session_file, path=path)\n\n # Now we have a dataset, let's check if there's an objective_field\n # given by the user and update it in the fields structure\n args.objective_id_ = get_objective_id(args, fields)\n\n # If test_split is used, split the dataset in a training and a test dataset\n # according to the given split\n if args.test_split > 0:\n dataset, test_dataset, resume = pd.split_processing(\n dataset, api, args, resume,\n multi_label_data=multi_label_data,\n session_file=session_file, path=path, log=log)\n datasets[0] = dataset\n \n # Check if the dataset has a categorical objective field and it\n # has a max_categories limit for categories\n if args.max_categories > 0 and len(datasets) == 1:\n if pd.check_max_categories(fields.fields[args.objective_id_]):\n distribution = pd.get_categories_distribution(dataset,\n args.objective_id_)\n if distribution and len(distribution) > args.max_categories:\n categories = [element[0] for element in distribution]\n other_label = pd.create_other_label(categories, other_label)\n datasets, resume = pd.create_categories_datasets(\n dataset, distribution, fields, args,\n api, resume, session_file=session_file, path=path, log=log,\n other_label=other_label)\n else:\n sys.exit(\"The provided objective field is not categorical nor \"\n \"a full terms only text field. \"\n \"Only these fields can be used with\"\n \" --max-categories\")\n\n # If multi-dataset flag is on, generate a new dataset from the given\n # list of datasets\n if args.multi_dataset:\n dataset, resume = pd.create_new_dataset(\n datasets, api, args, resume, fields=fields,\n session_file=session_file, path=path, log=log)\n datasets = [dataset]\n\n # Check if the dataset has a generators file associated with it, and\n # generate a new dataset with the specified field structure\n if args.new_fields:\n dataset, resume = pd.create_new_dataset(\n dataset, api, args, resume, fields=fields,\n session_file=session_file, path=path, log=log)\n datasets[0] = dataset\n # rebuild fields structure for new ids and fields\n fields = pd.get_fields_structure(dataset, csv_properties)\n args.objective_id_ = get_objective_id(args, fields)\n if args.multi_label and dataset and multi_label_data is None:\n multi_label_data = l.get_multi_label_data(dataset)\n (args.objective_field,\n labels,\n all_labels,\n multi_label_fields) = l.multi_label_sync(args.objective_field,\n labels,\n multi_label_data,\n fields, multi_label_fields)\n\n if dataset:\n # retrieves max_categories data, if any\n args.max_categories = get_metadata(dataset, 'max_categories',\n args.max_categories)\n other_label = get_metadata(dataset, 'other_label',\n other_label)\n if args.model_file:\n # model is retrieved from the contents of the given local JSON file\n model, csv_properties, fields = u.read_local_resource(\n args.model_file,\n csv_properties=csv_properties)\n models = [model]\n model_ids = [model['resource']]\n ensemble_ids = []\n elif args.ensemble_file:\n # model is retrieved from the contents of the given local JSON file\n ensemble, csv_properties, fields = u.read_local_resource(\n args.ensemble_file,\n csv_properties=csv_properties)\n model_ids = ensemble['object']['models'][:]\n ensemble_ids = [ensemble['resource']]\n models = model_ids[:]\n model = retrieve_resource(bigml.api.BigML(storage='./storage'),\n models[0],\n query_string=r.ALL_FIELDS_QS)\n models[0] = model\n else:\n # model is retrieved from the remote object\n models, model_ids, ensemble_ids, resume = pm.models_processing(\n datasets, models, model_ids,\n api, args, resume, fields=fields,\n session_file=session_file, path=path, log=log, labels=labels,\n multi_label_data=multi_label_data, other_label=other_label)\n\n if models:\n model = models[0]\n single_model = len(models) == 1\n # If multi-label flag is set and no training_set was provided, label\n # info is extracted from the user_metadata. If models belong to an\n # ensemble, the ensemble must be retrieved to get the user_metadata.\n if model and args.multi_label and multi_label_data is None:\n if len(ensemble_ids) > 0 and isinstance(ensemble_ids[0], dict):\n resource = ensemble_ids[0]\n elif belongs_to_ensemble(model):\n ensemble_id = get_ensemble_id(model)\n resource = r.get_ensemble(ensemble_id, api=api,\n verbosity=args.verbosity,\n session_file=session_file)\n else:\n resource = model\n multi_label_data = l.get_multi_label_data(resource)\n\n # We update the model's public state if needed\n if model:\n if isinstance(model, basestring):\n if not args.evaluate and not has_test(args):\n query_string = MINIMUM_MODEL\n elif not args.test_header:\n query_string = r.ALL_FIELDS_QS\n else:\n query_string = \"%s;%s\" % (r.ALL_FIELDS_QS, r.FIELDS_QS)\n model = u.check_resource(model, api.get_model,\n query_string=query_string)\n if (args.black_box or args.white_box or\n (args.shared_flag and r.shared_changed(args.shared, model))):\n model_args = {}\n if args.shared_flag and r.shared_changed(args.shared, model):\n model_args.update(shared=args.shared)\n if args.black_box or args.white_box:\n model_args.update(r.set_publish_model_args(args))\n if model_args:\n model = r.update_model(model, model_args, args,\n api=api, path=path,\n session_file=session_file)\n models[0] = model\n\n # We get the fields of the model if we haven't got\n # them yet and need them\n if model and not args.evaluate and args.test_set:\n # If more than one model, use the full field structure\n if (not single_model and not args.multi_label and\n belongs_to_ensemble(model)):\n if len(ensemble_ids) > 0:\n ensemble_id = ensemble_ids[0]\n else:\n ensemble_id = get_ensemble_id(model)\n local_ensemble = Ensemble(ensemble_id, api=api,\n max_models=args.max_batch_models)\n fields = pm.get_model_fields(\n model, csv_properties, args, single_model=single_model,\n multi_label_data=multi_label_data)\n # Free memory after getting fields\n local_ensemble = None\n gc.collect()\n\n # Fills in all_labels from user_metadata\n if args.multi_label and not all_labels:\n (args.objective_field,\n labels,\n all_labels,\n multi_label_fields) = l.multi_label_sync(args.objective_field, labels,\n multi_label_data, fields,\n multi_label_fields)\n if model:\n # retrieves max_categories data, if any\n args.max_categories = get_metadata(model, 'max_categories',\n args.max_categories)\n other_label = get_metadata(model, 'other_label',\n other_label)\n # If predicting\n if (models and (has_test(args) or (test_dataset and args.remote))\n and not args.evaluate):\n models_per_label = 1\n if test_dataset is None:\n test_dataset = get_test_dataset(args)\n\n if args.multi_label:\n # When prediction starts from existing models, the\n # multi_label_fields can be retrieved from the user_metadata\n # in the models\n if args.multi_label_fields is None and multi_label_fields:\n multi_label_field_names = [field[1] for field\n in multi_label_fields]\n args.multi_label_fields = \",\".join(multi_label_field_names)\n test_set = ps.multi_label_expansion(\n args.test_set, args.test_header, args, path,\n labels=labels, session_file=session_file, input_flag=True)[0]\n test_set_header = True\n\n # Remote predictions: predictions are computed as batch predictions\n # in bigml.com except when --no-batch flag is set on or multi-label\n # or max-categories are used\n if (args.remote and not args.no_batch and not args.multi_label\n and not args.method in [THRESHOLD_CODE, COMBINATION]):\n # create test source from file\n test_name = \"%s - test\" % args.name\n if args.test_source is None:\n (test_source,\n resume,\n csv_properties,\n test_fields) = ps.test_source_processing(\n api, args, resume, session_file=session_file,\n path=path, log=log)\n else:\n test_source_id = bigml.api.get_source_id(args.test_source)\n test_source = api.check_resource(test_source_id)\n if test_dataset is None:\n # create test dataset from test source\n dataset_args = r.set_basic_dataset_args(args, name=test_name)\n test_dataset, resume = pd.alternative_dataset_processing(\n test_source, \"test\", dataset_args, api, args,\n resume, session_file=session_file, path=path, log=log)\n else:\n test_dataset_id = bigml.api.get_dataset_id(test_dataset)\n test_dataset = api.check_resource(test_dataset_id)\n\n csv_properties.update(objective_field=None,\n objective_field_present=False)\n test_fields = pd.get_fields_structure(test_dataset,\n csv_properties)\n\n batch_prediction_args = r.set_batch_prediction_args(\n args, fields=fields,\n dataset_fields=test_fields)\n\n remote_predict(model, test_dataset, batch_prediction_args, args,\n api, resume, prediction_file=output,\n session_file=session_file, path=path, log=log)\n else:\n models_per_label = args.number_of_models\n if (args.multi_label and len(ensemble_ids) > 0\n and args.number_of_models == 1):\n # use case where ensembles are read from a file\n models_per_label = len(models) / len(ensemble_ids)\n predict(models, fields, args, api=api, log=log,\n resume=resume, session_file=session_file, labels=labels,\n models_per_label=models_per_label, other_label=other_label,\n multi_label_data=multi_label_data)\n\n # When combine_votes flag is used, retrieve the predictions files saved\n # in the comma separated list of directories and combine them\n if args.votes_files_:\n model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\\.csv$',\n r'\\1', args.votes_files_[0]).replace(\"_\", \"/\")\n try:\n model = u.check_resource(model_id, api.get_model)\n except ValueError, exception:\n sys.exit(\"Failed to get model %s: %s\" % (model_id, str(exception)))\n\n local_model = Model(model)\n message = u.dated(\"Combining votes.\\n\")\n u.log_message(message, log_file=session_file,\n console=args.verbosity)\n\n combine_votes(args.votes_files_, local_model.to_prediction,\n output, method=args.method)\n\n # If evaluate flag is on, create remote evaluation and save results in\n # json and human-readable format.\n if args.evaluate:\n # When we resume evaluation and models were already completed, we\n # should use the datasets array as test datasets\n if args.dataset_off and not args.test_dataset_ids:\n args.test_dataset_ids = datasets\n if args.test_dataset_ids:\n eval_ensembles = len(ensemble_ids) == len(args.test_dataset_ids)\n models_or_ensembles = (ensemble_ids if eval_ensembles else\n models)\n # Evaluate the models with the corresponding test datasets.\n resume = evaluate(models_or_ensembles, args.test_dataset_ids, api,\n args, resume,\n fields=fields, dataset_fields=dataset_fields,\n session_file=session_file, path=path,\n log=log, labels=labels, all_labels=all_labels,\n objective_field=args.objective_field)\n else:\n if args.multi_label and args.test_set is not None:\n # When evaluation starts from existing models, the\n # multi_label_fields can be retrieved from the user_metadata\n # in the models\n if args.multi_label_fields is None and multi_label_fields:\n args.multi_label_fields = multi_label_fields\n test_set = ps.multi_label_expansion(\n test_set, test_set_header, args, path,\n labels=labels, session_file=session_file)[0]\n test_set_header = True\n\n if args.test_split > 0:\n dataset = test_dataset\n dataset_fields = pd.get_fields_structure(dataset, None)\n models_or_ensembles = (ensemble_ids if ensemble_ids != []\n else models)\n resume = evaluate(models_or_ensembles, [dataset], api,\n args, resume,\n fields=fields, dataset_fields=dataset_fields,\n session_file=session_file, path=path,\n log=log, labels=labels, all_labels=all_labels,\n objective_field=args.objective_field)\n\n # If cross_validation_rate is > 0, create remote evaluations and save\n # results in json and human-readable format. Then average the results to\n # issue a cross_validation measure set.\n if args.cross_validation_rate > 0:\n args.sample_rate = 1 - args.cross_validation_rate\n cross_validate(models, dataset, fields, api, args, resume,\n session_file=session_file,\n path=path, log=log)\n\n u.print_generated_files(path, log_file=session_file,\n verbosity=args.verbosity)\n if args.reports:\n clear_reports(path)\n if args.upload:\n upload_reports(args.reports, path)", "def get_model_output(\n model,\n batch_x\n):\n outputs = model(batch_x, training=False)\n return outputs", "def testSampleOutput(self):\n beam_width = 3\n max_decode_length = 2\n\n smart_compose_model = model.create_smart_compose_model(self.embedding_layer_param, self.empty_url, self.min_len, self.max_len,\n beam_width, max_decode_length, self.feature_type_2_name, self.min_seq_prob,\n self.length_norm_power)\n\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.7357671, -2.7361841, -2.7503903]] (could vary due to random initialization),\n # 'predicted_texts': [[b'[CLS] build is', b'[CLS] build source', b'[CLS] build token']]}\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['bui'])\n }))\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.7357671, -2.7361841, -2.7503903]] (could vary due to random initialization),\n # 'predicted_texts': [[b'[CLS] build is', b'[CLS] build source', b'[CLS] build token']]}\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['build'])\n }))\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.7357671, -2.7361841, -2.7503903]] (could vary due to random initialization),\n # 'predicted_texts': [[b'build is [PAD]', b'build source [PAD]', b'build token [PAD]']]}\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['build '])\n }))\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.711434 , -2.7171993, -2.7329462]] (could vary due to random initialization),\n # 'predicted_texts': [[b'build function token', b'build function test', b'build function is']]\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['build f'])\n }))", "def out(self, inputs):", "def test_score(self):\n pred_copy_simple = np.copy(self.regression_single.y_pred)\n pred_copy_boston = np.copy(self.regression_boston.y_pred)\n\n self.assertEqual(pred_copy_simple.shape, self.y_test.shape)\n self.assertEqual(pred_copy_boston.shape, self.boston_y_test.shape)", "def test_converts_to_photomodeler_and_back_by_optimization() -> None:\n # fmm must be non-equal\n cam = Camera(\n imgsz=(4288, 2848), fmm=(3100, 3200), cmm=(0.5, -0.4), sensorsz=(35.1, 24.2)\n )\n xcam_initial = PhotoModeler.from_camera(cam, optimize=False)\n residuals_initial = Converter(xcam_initial, cam).residuals()\n xcam = PhotoModeler.from_camera(cam)\n residuals = Converter(xcam, cam).residuals()\n assert np.sum(residuals ** 2) < np.sum(residuals_initial ** 2)\n np.testing.assert_allclose(residuals, 0, rtol=0, atol=1e-12)\n # k* or p* must be non-zero (but small)\n cam = Camera(\n imgsz=(4288, 2848),\n fmm=(3200, 3200),\n cmm=(0.5, -0.4),\n sensorsz=(35.1, 24.2),\n k=(0.1, -0.05),\n p=(0.03, 0.04),\n )\n xcam_initial = PhotoModeler.from_camera(cam, optimize=False)\n residuals_initial = Converter(xcam_initial, cam).residuals()\n xcam = PhotoModeler.from_camera(cam)\n residuals = Converter(xcam, cam).residuals()\n assert np.sum(residuals ** 2) < np.sum(residuals_initial ** 2)\n np.testing.assert_allclose(residuals, 0, rtol=0, atol=1e-2)\n cam_initial = xcam.to_camera(optimize=False)\n residuals_initial = Converter(xcam, cam_initial).residuals()\n cam = xcam.to_camera()\n residuals = Converter(xcam, cam).residuals()\n assert np.sum(residuals ** 2) < np.sum(residuals_initial ** 2)\n np.testing.assert_allclose(residuals, 0, rtol=0, atol=1e-2)", "def combineModelsAndExport(builderSpec, nmsSpec, fileName, quantize=False):\n try:\n print(f\"Combine CoreMl model with nms and export model\")\n # Combine models to a single one\n pipeline = ct.models.pipeline.Pipeline(\n input_features=[\n (\"image\", ct.models.datatypes.Array(3, 460, 460)),\n (\"iouThreshold\", ct.models.datatypes.Double()),\n (\"confidenceThreshold\", ct.models.datatypes.Double()),\n ],\n output_features=[\"confidence\", \"coordinates\"],\n )\n\n # Required version (>= ios13) in order for mns to work\n pipeline.spec.specificationVersion = 4\n\n pipeline.add_model(builderSpec)\n pipeline.add_model(nmsSpec)\n\n pipeline.spec.description.input[0].ParseFromString(\n builderSpec.description.input[0].SerializeToString()\n )\n pipeline.spec.description.output[0].ParseFromString(\n nmsSpec.description.output[0].SerializeToString()\n )\n pipeline.spec.description.output[1].ParseFromString(\n nmsSpec.description.output[1].SerializeToString()\n )\n\n # Metadata for the model‚\n pipeline.spec.description.input[\n 1\n ].shortDescription = \"(optional) IOU Threshold override (Default: 0.6)\"\n pipeline.spec.description.input[\n 2\n ].shortDescription = \"(optional) Confidence Threshold override (Default: 0.4)\"\n pipeline.spec.description.output[\n 0\n ].shortDescription = \"Boxes \\xd7 Class confidence\"\n pipeline.spec.description.output[\n 1\n ].shortDescription = \"Boxes \\xd7 [x, y, width, height] (relative to image size)\"\n pipeline.spec.description.metadata.versionString = \"1.0\"\n pipeline.spec.description.metadata.shortDescription = \"yolov5\"\n pipeline.spec.description.metadata.author = \"Leon De Andrade\"\n pipeline.spec.description.metadata.license = \"\"\n\n model = ct.models.MLModel(pipeline.spec)\n model.save(fileName)\n\n if quantize:\n fileName16 = fileName.replace(\".mlmodel\", \"_16.mlmodel\")\n modelFp16 = ct.models.neural_network.quantization_utils.quantize_weights(\n model, nbits=16\n )\n modelFp16.save(fileName16)\n\n fileName8 = fileName.replace(\".mlmodel\", \"_8.mlmodel\")\n modelFp8 = ct.models.neural_network.quantization_utils.quantize_weights(\n model, nbits=8\n )\n modelFp8.save(fileName8)\n\n print(f\"CoreML export success, saved as {fileName}\")\n except Exception as e:\n print(f\"CoreML export failure: {e}\")", "def __init__(self,inputSize,outputSize, *args, **kwds):\n #currently the code is only for 2 hidden layers, apart from in and out\n self._saveFile = kwds.get('saveFile')\n self._inputSize = inputSize\n self._outputSize= outputSize\n self._layer1 = keras.layers.Dense(128,activation='relu')\n self._layer2 = keras.layers.Dense(64,activation='relu') \n self._layer3 = keras.layers.Dense(128,activation='relu')\n self._piLayer = keras.layers.Dense(self._outputSize-1,activation='softmax')\n self._zLayer = keras.layers.Dense(1,activation='tanh')\n self._inputs = keras.Input(shape=(self._inputSize,)) #returns placeholder\n x = self._layer1(self._inputs)\n x = self._layer2(x)\n x = self._layer3(x)\n self._outPi = self._piLayer(x)\n self._outZ = self._zLayer(x)\n self._output = keras.layers.concatenate([self._outPi,self._outZ],axis = -1)\n self._model = keras.Model(inputs=self._inputs,outputs=self._outPi)\n# self._model = keras.Model(inputs=self._inputs,outputs=self._outPi)\n self._model.compile(optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.99, beta_2=0.999, epsilon=1e-10, decay=0.0001),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n self._epochSize = 256", "def test_two_outputs_to_same_stage(sdc_executor):\n builder = sdc_executor.get_pipeline_builder()\n\n generator = builder.add_stage(label='Dev Data Generator')\n\n selector = builder.add_stage('Stream Selector')\n\n trash = builder.add_stage(label='Trash')\n\n generator >> selector >> trash\n selector >> trash\n\n selector.condition = [dict(outputLane=selector.output_lanes[0], predicate='${1 == 1}'),\n dict(outputLane=selector.output_lanes[1], predicate='default')]\n pipeline = builder.build()\n\n sdc_executor.add_pipeline(pipeline)\n\n with pytest.raises(Exception) as e:\n sdc_executor.validate_pipeline(pipeline)\n\n assert 'VALIDATION_0039' in e.value.issues", "def test_ijones2_out(self):\r\n current = main('test_ijones/ijones2.in', 'test_ijones/ijones2.out')\r\n\r\n self.assertEqual(current, 2)", "def save_predict_results():\n\n ori_lst = []\n for i in range(1, 4):\n ori_df = pd.read_csv('Training_Model'+str(i)+'.csv')\n ori_list = ori_df['SMILES'].tolist()\n ori_lst.append(ori_list)\n frames = []\n gen_mols = []\n gen_fps = []\n for i, group in enumerate(['all', 'class3', 'prom']):\n gen_df = pd.read_csv('novel_sampled_cano_script_'+group+'_until.csv')\n gen_list = gen_df['SMILES'].tolist()\n print('Number of molecules in training for model {} is {}'.format(i+1, len(ori_lst[i])))\n over, num, smi_list = get_smi_list_overlap(ori_lst[i], gen_list)\n smi_mols = get_mols(smi_list)\n smi_fps, failed_mols = get_fingerprints(smi_mols)\n for idx in sorted(failed_mols, reverse=True):\n del smi_list[idx]\n smi_df = pd.Series(data=smi_list, name='SMILES').to_frame()\n smi_df.loc[:,'Group'] = i+1\n frames.append(smi_df)\n\n unique_df = pd.concat(frames)\n gen_smi = unique_df['SMILES'].tolist()\n gen_mols = get_mols(gen_smi)\n gen_fps, _ = get_fingerprints(gen_mols)\n unique_df['Gaps'] = predict_property('gbdt_regessor_gap_regu.joblib', gen_fps)\n unique_df['Dips'] = predict_property('gbdt_regessor_dip_reg.joblib', gen_fps)\n promising_df = unique_df.loc[(unique_df['Gaps'] <= 2.0) & (unique_df['Dips']<=3.66)]\n unique_df.to_csv('Unique_models_15epoch.csv', index=False)\n promising_df.to_csv('Promising_models_15epoch.csv', index=False)", "def test_co2():\n test_path = tempfile.mkdtemp()\n x_train, metadata = co2(test_path)\n try:\n assert x_train.shape == (237, 2)\n except:\n shutil.rmtree(test_path)\n raise()", "def moc_pipeline(argv: argparse.Namespace):\n fem = argv.feManager\n log.debug('Available front ends: {}'.format(\n str(fem.get_available_front_ends())))\n log.debug('Initializing new FE for framework {}'.format(argv.framework))\n fe = fem.load_by_framework(argv.framework)\n input_model = fe.load_from_file(argv.input_model)\n\n user_shapes, outputs, freeze_placeholder = fe_user_data_repack(\n input_model, argv.placeholder_shapes, argv.placeholder_data_types,\n argv.output, argv.freeze_placeholder_with_value)\n\n def check_places_are_same(places_original: List[Place], places_new: List[Place]):\n \"\"\"\n Check if set of new places is same as original or not.\n :param places_original: List[Place] Original model places\n :param places_new: List[Place] New list of places\n :return: True if new list of places is same as original\n \"\"\"\n return len(places_original) == len(places_new) and len(\n [item for item in places_original if any(\n [item.is_equal(item2['node']) for item2 in places_new])]) == len(places_original)\n\n inputs_equal = True\n if user_shapes:\n inputs_equal = check_places_are_same(input_model.get_inputs(), user_shapes)\n\n outputs_equal = True\n if outputs:\n outputs_equal = check_places_are_same(input_model.get_outputs(), outputs)\n log.debug('Inputs are same: {}, outputs are same: {}'.format(\n inputs_equal, outputs_equal))\n\n if not inputs_equal and not outputs_equal:\n # Use ExtractSubgraph\n new_input_places = [x['node'] for x in user_shapes]\n new_output_places = [x['node'] for x in outputs]\n log.debug('Using extract subgraph')\n input_model.extract_subgraph(new_input_places, new_output_places)\n elif not inputs_equal:\n new_input_places = [x['node'] for x in user_shapes]\n log.debug('Using override_all_inputs')\n input_model.override_all_inputs(new_input_places)\n elif not outputs_equal:\n new_output_places = [x['node'] for x in outputs]\n log.debug('Using override_all_outputs')\n input_model.override_all_outputs(new_output_places)\n\n if user_shapes:\n for user_shape in user_shapes:\n if user_shape.get('shape') is not None:\n input_model.set_partial_shape(\n user_shape['node'], PartialShape(user_shape['shape']))\n if user_shape.get('data_type') is not None:\n data_type = get_element_type(user_shape['data_type'])\n log.debug('Set data type: {}'.format(data_type))\n input_model.set_element_type(user_shape['node'], data_type)\n\n def shape_to_array(shape: PartialShape):\n return [shape.get_dimension(i) for i in range(shape.rank.get_length())]\n return\n\n # Set batch size\n if argv.batch is not None and argv.batch > 0:\n log.debug('Setting batch size to {}'.format(argv.batch))\n for place in input_model.get_inputs():\n old_partial_shape = input_model.get_partial_shape(place)\n old_shape_array = shape_to_array(old_partial_shape) if old_partial_shape.rank.is_static else []\n joined_name = ' '.join(place.get_names())\n validate_batch_in_shape(old_shape_array, joined_name)\n\n # Assume batch size is always 1-st dimension in shape\n # Keep other dimensions unchanged\n new_shape = [old_partial_shape.get_dimension(i)\n for i in range(old_partial_shape.rank.get_length())]\n new_shape[0] = Dimension(argv.batch)\n\n new_partial_shape = PartialShape(new_shape)\n log.debug('Input: {}, Old shape: {}, New shape: {}'.format(\n joined_name, old_shape_array, new_shape))\n input_model.set_partial_shape(place, new_partial_shape)\n\n ngraph_function = fe.convert(input_model)\n return ngraph_function", "def test_shapes_coupling_out(self):\n\n out_single = self.coupling_net_odd(self.x_single_odd, self.y_single)\n out_batch = self.coupling_net_odd(self.x_batch_odd, self.y_batch)\n\n self.assertEqual(out_single.shape[0], 1,\n 'Batch shape mismatch on single instance in CouplingNet')\n self.assertEqual(out_single.shape[1], self.x_dim_odd//2,\n 'Input/Output shape mismatch on single instance in CouplingNet')\n self.assertEqual(out_batch.shape[0], self.batch_size,\n 'Batch shape mismatch on a batch in CouplingNet')\n self.assertEqual(out_batch.shape[1], self.x_dim_odd // 2,\n 'Input/Output shape mismatch on a batch in CouplingNet')", "def test_shapes_coupling_out(self):\n\n out_single = self.coupling_net_odd(self.x_single_odd, self.y_single)\n out_batch = self.coupling_net_odd(self.x_batch_odd, self.y_batch)\n\n self.assertEqual(out_single.shape[0], 1,\n 'Batch shape mismatch on single instance in CouplingNet')\n self.assertEqual(out_single.shape[1], self.x_dim_odd//2,\n 'Input/Output shape mismatch on single instance in CouplingNet')\n self.assertEqual(out_batch.shape[0], self.batch_size,\n 'Batch shape mismatch on a batch in CouplingNet')\n self.assertEqual(out_batch.shape[1], self.x_dim_odd // 2,\n 'Input/Output shape mismatch on a batch in CouplingNet')", "def test_training_multiple(self):\n model = PoincareModel(self.data_large, burn_in=0, negative=3)\n model.train(epochs=2)\n old_vectors = np.copy(model.kv.syn0)\n\n model.train(epochs=1)\n self.assertFalse(np.allclose(old_vectors, model.kv.syn0))\n\n old_vectors = np.copy(model.kv.syn0)\n model.train(epochs=0)\n self.assertTrue(np.allclose(old_vectors, model.kv.syn0))", "def __init__(self,model,X_test, y_test):\n self.model = model\n self.X_test = X_test\n self.y_test = y_test", "def test_save_load_model(self):\n model = self.get_model()\n\n # Keep track of the current weights and biases for comparing.\n (model_weights, model_biases,\n tile_weights, tile_biases) = self.get_model_and_tile_weights(model)\n assert_array_almost_equal(model_weights, tile_weights)\n assert_array_almost_equal(model_biases, tile_biases)\n\n # Save the model to a file.\n file = TemporaryFile()\n save(model, file)\n\n # Load the model.\n file.seek(0)\n new_model = load(file)\n file.close()\n\n # Compare the new model weights and biases.\n (new_model_weights, new_model_biases,\n new_tile_weights, new_tile_biases) = self.get_model_and_tile_weights(new_model)\n\n assert_array_almost_equal(model_weights, new_model_weights)\n assert_array_almost_equal(model_biases, new_model_biases)\n assert_array_almost_equal(tile_weights, new_tile_weights)\n assert_array_almost_equal(tile_biases, new_tile_biases)", "def test_simple_train_named_output(self):\n data_source = nemo.backends.pytorch.tutorials.RealFunctionDataLayer(n=10000, batch_size=128,)\n trainable_module = nemo.backends.pytorch.tutorials.TaylorNet(dim=4)\n loss = nemo.backends.pytorch.tutorials.MSELoss()\n\n data = data_source()\n self.assertEqual(\n first=type(data).__name__,\n second='RealFunctionDataLayerOutput',\n msg='Check output class naming coherence.',\n )\n y_pred = trainable_module(x=data.x)\n loss_tensor = loss(predictions=y_pred, target=data.y)\n\n optimizer = nemo.backends.pytorch.actions.PtActions()\n optimizer.train(\n tensors_to_optimize=[loss_tensor], optimizer=\"sgd\", optimization_params={\"lr\": 0.0003, \"num_epochs\": 1},\n )", "def perform_backtests(self):\r\n \r\n for test_name in self.testing_dates:\r\n print('\\t|--Test #{}'.format(test_name))\r\n test_dates = self.testing_dates[test_name]\r\n print('\\t\\t|--Performing Nested Cross-Validation')\r\n cross_validation = CrossValidate()\r\n cross_validation.output_names = self.output_names\r\n cross_validation.feature_names = self.feature_names\r\n cross_validation.feature_dict = self.feature_dict\r\n cross_validation.full_df = self.final_df_output\r\n cross_validation.cv_params = self.testing_dates\r\n cross_validation.test_name = test_name\r\n cross_validation.walk_forward_cv()\r\n self.optimal_params['Test #{}'.format(test_name)] = cross_validation.optimal_params_by_output\r\n self.cv_model_metadata['Test #{}'.format(test_name)] = cross_validation.cv_metadata_by_output\r\n \r\n print('\\t\\t|--Performing Out-Of-Sample Testing')\r\n prediction = Predict()\r\n prediction.output_names = self.output_names\r\n prediction.feature_names = self.feature_names\r\n prediction.feature_dict = self.feature_dict\r\n prediction.optimal_params_by_output = cross_validation.optimal_params_by_output\r\n prediction.cv_predictions_by_output = cross_validation.cv_predictions_by_output\r\n prediction.full_df = self.final_df_output\r\n prediction.pred_start = test_dates['pred_start']\r\n prediction.pred_end = test_dates['pred_end']\r\n prediction.run_prediction()\r\n self.full_predictions['Test #{}'.format(test_name)] = prediction.predictions_by_output\r\n self.pred_model_metadata['Test #{}'.format(test_name)] = prediction.pred_metadata_by_output\r\n \r\n print('\\nSaving model metadata...')\r\n with open(path.deployment_cv_results, 'w') as file:\r\n json.dump(self.optimal_params, file)\r\n with open(path.deployment_cv_metadata, 'w') as file:\r\n json.dump(self.cv_model_metadata, file)\r\n with open(path.deployment_pred_model_metadata, 'w') as file:\r\n json.dump(self.pred_model_metadata, file)\r\n with open(path.deployment_full_predictions, 'w') as file:\r\n json.dump(self.full_predictions, file)", "def test_copy_graph(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n copied_graph = pipeline.copy_graph()\n # convert from sets to lists and sort for side-by-side comparison\n graph_steps_sorted = sorted(graph.steps, key=lambda x: x.name)\n copied_graph_steps_sorted = sorted(copied_graph.steps, key=lambda x: x.name)\n\n for orig_step, copied_step in zip(graph_steps_sorted, copied_graph_steps_sorted):\n assert orig_step is not copied_step\n assert orig_step.name == copied_step.name\n assert orig_step.predecessors == copied_step.predecessors\n assert (\n orig_step.registered_model_version.id\n == copied_step.registered_model_version.id\n )\n assert copied_graph is not graph", "def model_test(epo, natural):\n\tmodel.eval()\n\twith torch.no_grad():\n\t\tn = batch_size\n\n\t\tif natural:\n\t\t\tloader = nat_test_loader\n\t\t\tprefix = \"nat\"\n\t\telse:\n\t\t\tloader = syn_test_loader\n\t\t\tprefix = \"syn\"\n\n\t\tlog_cor_file = open(directory + \"/logs/test_\" + prefix + \"_cor_log.txt\", \"a\") # Correct\n\t\tlog_mae_file = open(directory + \"/logs/test_\" + prefix + \"_mae_log.txt\", \"a\") # MAE\n\t\tlog_dev_file = open(directory + \"/logs/test_\" + prefix + \"_dev_log.txt\", \"a\") # DEV\n\t\tlog_sam_file = open(directory + \"/logs/test_\" + prefix + \"_sam_log.txt\", \"a\") # Sample\n\n\t\tccs = []\n\t\tlabls = []\n\t\tnum_unlabeled = 0\n\t\tfor batch_idx, (data, labels) in enumerate(loader):\n\t\t\tdata = data.cuda()\n\t\t\tlabels = labels.float().cuda()\n\n\t\t\tmodel.mode = 'natural' if natural else 'synth'\n\t\t\trecon_batch, mu, logvar, cc = model(data)\n\n\t\t\tcc[labels == 0] = 0 # Sets the counted cells to 0 for unlabeled data, so that regressor_loss=0\n\t\t\tnum_unlabeled += (labels == 0).sum()\n\t\t\t_, _, _ = loss_function(recon_batch, data, mu, logvar, cc, labels, natural)\n\n\t\t\tccs.append(cc.cpu().detach().numpy())\n\t\t\tlabls.append(labels.cpu().detach().numpy())\n\n\t\t\tif batch_idx == 0 and epo % 1000 == 0:\n\t\t\t\t# Save test sample\n\t\t\t\tcomparison = torch.cat([data[:n], recon_batch.view(batch_size, 1, img_size, img_size)[:n]])\n\t\t\t\tsave_image(comparison.cpu(), directory + \"/\" + prefix + \"_\" + str(epo) + \".png\", nrow=n)\n\n\t\t\t\t# Save switch sample\n\t\t\t\tmodel.mode = 'synth' if natural else 'natural'\n\t\t\t\trecon_batch, _, _, _ = model(data)\n\t\t\t\tcomparison = torch.cat([data[:n], recon_batch.view(batch_size, 1, img_size, img_size)[:n]])\n\t\t\t\tsave_image(comparison.cpu(), directory + \"/switch_\" + prefix + \"_\" + str(epo) + \".png\", nrow=n)\n\n\t\tpreds = np.concatenate(ccs, axis=None) # Elementwise round of cellcounts\n\t\tlbls = np.concatenate(labls, axis=None) # Elementswise round of labels\n\n\t\tlog_sam_file.write(str(np.round(preds, 2)) + \"\\n\" + str(lbls) + \"\\n\")\n\t\tpreds = np.around(preds)\n\t\t#lbls = np.around(lbls)\n\n\t\tcorrect = np.sum(preds == lbls) # Count elementwise equality of predictions and labels\n\t\tlen_set = len(loader.dataset)\n\t\tcorrect -= num_unlabeled # Remove zero_indices from numerator\n\t\tcorrect = float(correct) / float(len_set - num_unlabeled) # Remove zero_indices from denominator\n\n\t\tdist_sum = np.sum(np.abs(np.subtract(preds, lbls))) # Elementwise addition of dist between preds and lbls\n\t\tMAE = dist_sum / float(len_set - num_unlabeled)\n\n\t\tlen_labeled = float(len_set - num_unlabeled)\n\t\tdev = np.ones(len_set) - np.divide(preds, lbls) # Deviation contains NaNs because syn data has lbl=0\n\t\tavg_dev = np.sum(np.abs(np.where(np.isnan(dev), 0, dev))) / len_labeled # Take the avg only of those deviations that weren't NaN\n\n\t\tlog_cor_file.write(str(correct)+\"\\n\")\n\t\tlog_mae_file.write(str(MAE)+\"\\n\")\n\t\tlog_dev_file.write(str(avg_dev)+\"\\n\")\n\n\t\t#logfile.write(str(correct) + \" correct, MAE: \" + str(MAE) + \", DEV: \" + str(avg_dev) + \" in \" + prefix + \" set in epoch \" + str(epoch) + \"\\n\\n\")\n\t\tlog_cor_file.close()\n\t\tlog_mae_file.close()\n\t\tlog_dev_file.close()\n\t\tlog_sam_file.close()\n\n\t\tglobal distance_sum\n\t\tdistance_sum = dist_sum\n\t\treturn correct, MAE", "def __init__(self, resultDir: str, modelName: str, x_train, y_train_oh, x_dev, y_dev_oh, x_test, y_test_oh, drop1, drop2, drop3):\n\t\tself.resultDir = resultDir\n\t\tself.modelName = modelName\n\t\tself.x_train = x_train\n\t\tself.x_dev = x_dev\n\t\tself.x_test = x_test\n\t\tself.y_train_oh = y_train_oh\n\t\tself.y_dev_oh = y_dev_oh\n\t\tself.y_test_oh = y_test_oh\n\n\t\tself.drop1 = drop1\n\t\tself.drop2 = drop2\n\t\tself.drop3 = drop3\n\t\t\n\t\tself.model = Sequential()\n\n\t\tself.model.add(Dense(500, activation='relu', input_shape=(1361,)))\n\t\tself.model.add(Dropout(self.drop1))\n\n\t\tself.model.add(Dense(500, activation='relu'))\n\t\tself.model.add(Dropout(self.drop2))\n\n\t\tself.model.add(Dense(256, activation='relu'))\n\t\tself.model.add(Dropout(self.drop3))\n\n\t\tself.model.add(Dense(256, activation='softmax'))\n\n\t\tself.model.compile(loss='categorical_crossentropy', metrics=['categorical_accuracy'], optimizer='adam')\n\t\tprint(\"Model summary\\n\")\n\t\tprint(self.model.summary())", "def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline", "def test(args, device, test_generator, model):\n model.eval()\n\n with torch.no_grad():\n # Get inputs and labels\n inputs, inputs_prev, labels, image, _, omit = test_generator.generate_batch()\n\n # Send to device\n inputs = torch.from_numpy(inputs).to(device)\n inputs_prev = torch.from_numpy(inputs_prev).to(device)\n labels = torch.from_numpy(labels).to(device)\n\n # Initialize syn_x or hidden state\n model.syn_x = model.init_syn_x(args.batch_size).to(device)\n model.hidden = model.init_hidden(args.batch_size).to(device)\n\n output, hidden, inputs = model(inputs, inputs_prev)\n # Convert to binary prediction\n output = torch.sigmoid(output)\n pred = torch.bernoulli(output).byte()\n\n # Compute hit rate and false alarm rate\n hit_rate = (pred * (labels == 1)).sum().float().item() / \\\n (labels == 1).sum().item()\n fa_rate = (pred * (labels == -1)).sum().float().item() / \\\n (labels == -1).sum().item()\n\n # Compute dprime\n # dprime_true = dprime(hit_rate, fa_rate)\n go = (labels == 1).sum().item()\n catch = (labels == -1).sum().item()\n num_trials = (labels != 0).sum().item()\n assert (go + catch) == num_trials\n\n # dprime_true = compute_dprime(hit_rate, fa_rate, go, catch, num_trials)\n # dprime_old = dprime(hit_rate, fa_rate)\n dprime_true = dprime(hit_rate, fa_rate)\n # try:\n # assert dprime_true == dprime_old\n # except:\n # print(hit_rate, fa_rate)\n # print(dprime_true, dprime_old)\n\n return dprime_true.item(), hit_rate, fa_rate, inputs, hidden, output, pred, image, labels, omit", "def assert_wrappers_equal(first, second):\n assert first.sk_params == second.sk_params\n assert first.history_ == second.history_\n if not first.model_ or not second.model_:\n assert first.model_ == second.model_\n else:\n assert_models_equal(first.model, second.model)", "def test_same_seeds_result_in_same_models(dbdiskrepo):\n fit1 = fit_model(seed=0)\n fit2 = fit_model(seed=0)\n\n assert p.hash(fit1) == p.hash(fit2)\n assert fit1.artifact.id == fit2.artifact.id\n assert fit1.artifact.value_id == fit2.artifact.value_id", "def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)", "def test_persistence_separate_file(self):\n model = PoincareModel(self.data, burn_in=0, negative=3)\n model.train(epochs=1)\n model.save(testfile(), sep_limit=1)\n loaded = PoincareModel.load(testfile())\n self.models_equal(model, loaded)", "def model(x_crop, y_, reuse):\n with tf.variable_scope(\"model\", reuse=reuse):\n net = tl.layers.InputLayer(x_crop, name='input')\n output1 = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1')\n net = tl.layers.MaxPool2d(output1, (3, 3), (2, 2), padding='SAME', name='pool1')\n output2 = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn2')\n net = tl.layers.MaxPool2d(output2, (3, 3), (2, 2), padding='SAME', name='pool2')\n net = tl.layers.FlattenLayer(net, name='flatten')\n output3 = tl.layers.DenseLayer(net, 384, act=tf.nn.relu, name='d1relu')\n output4 = tl.layers.DenseLayer(output3, 192, act=tf.nn.relu, name='d2relu')\n output5 = tl.layers.DenseLayer(output4, 10, act=None, name='output')\n\n return output1.outputs, output2.outputs, output3.outputs, output4.outputs, output5.outputs, output5", "def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)", "def test_machine_learning():", "def test_output_head_layers():\n for output_dim in [[[\"linear\", 3],[\"linear\", 9]], [[\"linear\", 4], [\"linear\", 20]], [[\"linear\", 1], [\"linear\", 1]]]:\n nn_instance = RNN(input_dim=5, layers_info=[[\"gru\", 20], [\"lstm\", 8], output_dim],\n hidden_activations=\"relu\", output_activation=[\"softmax\", None])\n assert nn_instance.output_layers[0].out_features == output_dim[0][1]\n assert nn_instance.output_layers[0].in_features == 8\n assert nn_instance.output_layers[1].out_features == output_dim[1][1]\n assert nn_instance.output_layers[1].in_features == 8", "def test(self):\n self.model.eval()\n\n for step, sample in enumerate(self.test_loader):\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n self.test_losses.append(loss.item())\n\n if step % (max(8, len(self.test_loader)) // 8) == 0:\n out_img = torch.cat([x[0], torch.clamp(y_pred[0], 0, 1)], dim=2)\n self.sw.add_image(tag=f'sample_{step}', img_tensor=out_img, global_step=self.epoch)\n\n # log average loss on test set\n mean_test_loss = np.mean(self.test_losses)\n self.test_losses = []\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ patience: ', end='')\n self.sw.add_scalar(tag='test_loss', scalar_value=mean_test_loss, global_step=self.epoch)\n\n # save best model and update training patience\n if self.best_test_loss is None or mean_test_loss < self.best_test_loss:\n self.best_test_loss = mean_test_loss\n self.patience = conf.FX_PATIENCE\n torch.save(self.model.state_dict(), self.log_path / 'best.pth')\n else:\n self.patience = self.patience - 1\n print(f'{self.patience}/{conf.FX_PATIENCE}')\n\n if self.patience == 0:\n self.show_completion_msg()", "def create_and_save_model(datapath, test_percentage = 0.2):\r\n \r\n pick_in = open(datapath, \"rb\")\r\n data = pickle.load(pick_in)\r\n pick_in.close()\r\n pick_parameter = open('parameters.data', \"rb\")\r\n parameters = pickle.load(pick_parameter)\r\n pick_parameter.close()\r\n #random.shuffle(keys)\r\n #shuffled_data = [(key, data[key]) for key in keys]\r\n \r\n features = []\r\n labels = []\r\n \r\n # sift/surf return dictonary, while hog returns list\r\n # convert both in same format\r\n if type(data) == dict:\r\n farray = []\r\n for label, label_features in data.items():\r\n for feature in label_features:\r\n farray.append([feature, label])\r\n data = farray\r\n \r\n random.shuffle(data)\r\n\r\n for feature, label in data:\r\n features.append(feature)\r\n labels.append(label)\r\n \r\n \r\n \r\n xtrain, xtest, ytrain, ytest = train_test_split(features, labels, test_size=test_percentage)\r\n \r\n # unpack parameters\r\n model = SVC(**parameters)\r\n model.fit(xtrain, ytrain)\r\n \r\n pick = open('model.data', \"wb\") #save model\r\n pickle.dump(model, pick)\r\n pick.close()\r\n\r\n test_data = list(zip(xtest,ytest))\r\n\r\n pick1 = open('data_test.data', \"wb\") #save test data, so that we don't mix up training and test data\r\n pickle.dump(test_data, pick1)\r\n pick1.close()\r\n\r\n print(\"n_test: \", len(xtest))\r\n print(\"n_train: \", len(xtrain))", "def test_model(iteration, sess, log_path, output, loss, data_in, data_gt):\n test_loss = sess.run(loss, feed_dict={'input_features:0': data_in, 'gt_color:0': data_gt})\n start = time.time()\n test_output = sess.run(output, feed_dict={'input_features:0': data_in, 'gt_color:0': data_gt})\n pred_time = time.time()-start\n save_image(log_path+'/'+str(iteration)+\"_out.png\", test_output)\n save_image(log_path+'/'+str(iteration)+\"_gt.png\", data_gt)\n save_image(log_path+'/'+str(iteration)+\"_in.png\", data_in[:, :, :, 0:3])\n return test_loss, pred_time", "def task1(dataset,printoutput=True,writepickle = False,pfile=None,usepickle=False):\n bitext = get_data_json(dataset)\n if usepickle == True:\n with open(pfile, 'rb') as f:\n model=pickle.load(f)\n else:\n bitext_list = deepcopy(bitext)\n model = task1_model(bitext_list)\n if writepickle == True:\n with open(pfile, 'wb') as f:\n pickle.dump(model, f)\n\n if printoutput == True:\n print_output(bitext,model.alignment_words,dataset)\n return model,bitext", "def test_constructor(self):\n # Record the model types of all the models to be created\n all_model_types = model_type_to_display_name.keys()\n\n # Record the attribute / value pairs that are common to all models.\n common_attr_value_dict = {\"data\": self.fake_df,\n \"name_spec\": self.fake_names,\n \"design\": self.fake_design,\n \"ind_var_names\": self.fake_names[\"x\"],\n \"alt_id_col\": self.alt_id_col,\n \"obs_id_col\": self.obs_id_col,\n \"choice_col\": self.choice_col,\n \"specification\": self.fake_specification,\n \"alt_IDs\": self.fake_df[\"alt_id\"].values,\n \"choices\": self.fake_df[\"choice\"].values}\n\n # Create a shape name dictionary to relate the various models to the\n # names of their shape parameters.\n shape_name_dict = {\"MNL\": None,\n \"Asym\": self.fake_shape_names[:2],\n \"Cloglog\": None,\n \"Scobit\": self.fake_shape_names,\n \"Uneven\": self.fake_shape_names,\n \"Nested Logit\": None,\n \"Mixed Logit\": None}\n\n # Create a shape reference position dictionary to relate the various\n # models to their shape reference positions.\n shape_ref_dict = {}\n for key in shape_name_dict:\n shape_ref_dict[key] = (None if key != \"Asym\" else\n self.fake_shape_ref_pos)\n\n # Create an intercept_names and intercept_ref_position dictionary to\n # relate the various models to their respective kwargs.\n intercept_names_dict = {}\n intercept_ref_dict = {}\n for key in shape_name_dict:\n if key in [\"MNL\", \"Nested Logit\", \"Mixed Logit\"]:\n intercept_names_dict[key] = None\n intercept_ref_dict[key] = None\n else:\n intercept_names_dict[key] = self.fake_intercept_names\n intercept_ref_dict[key] = self.fake_intercept_ref_pos\n\n # Create a nest_names dictionary to relate the various models to their\n # nest_name attributes\n nest_name_dict = {}\n nest_spec_dict = {}\n for key in shape_name_dict:\n if key != \"Nested Logit\":\n nest_name_dict[key] = None\n nest_spec_dict[key] = None\n else:\n nest_name_dict[key] = list(self.fake_nest_spec.keys())\n nest_spec_dict[key] = self.fake_nest_spec\n\n # Create dictionaries for the mixing_id_col, mixing_vars, and\n # mixing_pos attributes\n mixing_id_col_dict = {}\n mixing_vars_dict = {}\n mixing_pos_dict = {}\n\n for key in shape_name_dict:\n if key != \"Mixed Logit\":\n mixing_id_col_dict[key] = None\n mixing_vars_dict[key] = None\n mixing_pos_dict[key] = None\n else:\n mixing_id_col_dict[key] = self.obs_id_col\n mixing_vars_dict[key] = self.fake_names[\"x\"]\n mixing_pos_dict[key] = [0]\n\n # Record the attribute / value pairs that vary across models\n varying_attr_value_dict = {\"model_type\": model_type_to_display_name,\n \"intercept_names\": intercept_names_dict,\n \"intercept_ref_position\":\n intercept_ref_dict,\n \"shape_names\": shape_name_dict,\n \"shape_ref_position\": shape_ref_dict,\n \"nest_names\": nest_name_dict,\n \"nest_spec\": nest_spec_dict,\n \"mixing_id_col\": mixing_id_col_dict,\n \"mixing_vars\": mixing_vars_dict,\n \"mixing_pos\": mixing_pos_dict}\n\n # Set up the keyword arguments that are needed for each of the model\n # types\n variable_kwargs = {}\n for model_name in all_model_types:\n variable_kwargs[model_name] = {}\n variable_kwargs[model_name][\"intercept_names\"] =\\\n intercept_names_dict[model_name]\n variable_kwargs[model_name][\"intercept_ref_pos\"] =\\\n intercept_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_ref_pos\"] =\\\n shape_ref_dict[model_name]\n variable_kwargs[model_name][\"shape_names\"] =\\\n shape_name_dict[model_name]\n variable_kwargs[model_name][\"nest_spec\"] =\\\n nest_spec_dict[model_name]\n variable_kwargs[model_name][\"mixing_id_col\"] =\\\n mixing_id_col_dict[model_name]\n variable_kwargs[model_name][\"mixing_vars\"] =\\\n mixing_vars_dict[model_name]\n\n # Execute the test for each model type\n for model_name in all_model_types:\n # Update the model type in the list of constructor args\n self.constructor_args[-1] = model_name\n\n # Use this specific model's keyword arguments\n self.constructor_kwargs.update(variable_kwargs[model_name])\n\n # Construct the model object\n model_obj = pylogit.create_choice_model(*self.constructor_args,\n **self.constructor_kwargs)\n\n # Make sure that the constructor has all of the required attributes\n for attr in common_attr_value_dict:\n value = common_attr_value_dict[attr]\n if isinstance(value, pd.DataFrame):\n self.assertTrue(value.equals(model_obj.data))\n elif isinstance(value, np.ndarray):\n npt.assert_allclose(value,\n model_obj.__getattribute__(attr))\n else:\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n for attr in varying_attr_value_dict:\n value = varying_attr_value_dict[attr][model_name]\n\n self.assertEqual(value,\n model_obj.__getattribute__(attr))\n\n return None", "def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )", "def save_results(model, model_type, set_name, paths, pathr, losses_train, losses_val, accuracy_train,\n accuracy_det_train, accuracy_val, accuracy_det_val, losses_test, accuracy_test,\n accuracy_det_test, losses_cum_train, losses_cum_val):\n print('Save trained model losses and accuracies..')\n torch.save(model.state_dict(), os.path.join(paths, 'model_state_dict.zip'))\n torch.save(model, os.path.join(model_result_path, 'models', set_name+'_'+model_type, 'model.zip'))\n np.save(os.path.join(pathr, 'losses_train.npy'), np.array(losses_train))\n np.save(os.path.join(pathr, 'losses_cum_train.npy'), np.array(losses_cum_train))\n np.save(os.path.join(pathr, 'losses_validation.npy'), np.array(losses_val))\n np.save(os.path.join(pathr, 'losses_cum_validation.npy'), np.array(losses_cum_val))\n np.save(os.path.join(pathr, 'accuracy_train.npy'), np.array(accuracy_train))\n np.save(os.path.join(pathr, 'accuracy_detailed_train.npy'), np.array(accuracy_det_train))\n np.save(os.path.join(pathr, 'accuracy_validation.npy'), np.array(accuracy_val))\n np.save(os.path.join(pathr, 'accuracy_detailed_validation.npy'), np.array(accuracy_det_val))\n np.save(os.path.join(pathr, 'losses_test.npy'), np.array(losses_test))\n np.save(os.path.join(pathr, 'accuracy_test.npy'), np.array(accuracy_test))\n np.save(os.path.join(pathr, 'accuracy_detailed_test.npy'), np.array(accuracy_det_test))\n plot_dataframe(pd.DataFrame(losses_cum_train, columns = ['Epoch', 'Loss']),\n save_path=pathr, save_name='losses_train', title='Losses [train dataset]',\n x_name='Epoch', y_name='Loss', ending='.png', ylog=False, figsize=(10,5),\n xints=float, yints=float)\n plot_dataframe(pd.DataFrame(accuracy_train, columns = ['Epoch', 'Accuracy']),\n save_path=pathr, save_name='accuracy_train', title='Accuracy [train dataset] in %',\n x_name='Epoch', y_name='Accuracy', ending='.png', ylog=False, figsize=(10,5),\n xints=float, yints=float)\n plot_dataframe(pd.DataFrame(losses_cum_val, columns = ['Epoch', 'Loss']),\n save_path=pathr, save_name='losses_val', title='Losses [validation dataset]',\n x_name='Epoch', y_name='Loss', ending='.png', ylog=False, figsize=(10,5),\n xints=float, yints=float)\n plot_dataframe(pd.DataFrame(accuracy_val, columns = ['Epoch', 'Accuracy']),\n save_path=pathr, save_name='accuracy_val', title='Accuracy [validation dataset] in %',\n x_name='Epoch', y_name='Accuracy', ending='.png', ylog=False, figsize=(10,5),\n xints=float, yints=float)\n plot_dataframe(pd.DataFrame(losses_test, columns = ['Batch', 'Loss']),\n save_path=pathr, save_name='losses_test', title='Losses [test dataset]',\n x_name='Batch', y_name='Loss', ending='.png', ylog=False, figsize=(10,5),\n xints=float, yints=float)\n plot_dataframe(pd.DataFrame(accuracy_test, columns = ['Batch', 'Accuracy']),\n save_path=pathr, save_name='accuracy_test', title='Accuracy [test dataset] in %',\n x_name='Batch', y_name='Accuracy', ending='.png', ylog=False, figsize=(10,5),\n xints=int, yints=int)" ]
[ "0.6694175", "0.6504223", "0.63788325", "0.6321789", "0.6308089", "0.6245688", "0.6215992", "0.61850905", "0.6116955", "0.6110523", "0.609222", "0.6081803", "0.60653377", "0.6058217", "0.6056005", "0.6037613", "0.60202926", "0.5977124", "0.5976717", "0.59375304", "0.59170985", "0.5893511", "0.58933586", "0.58925146", "0.5884767", "0.58826715", "0.5853007", "0.5833269", "0.583173", "0.5831216", "0.5826202", "0.5807393", "0.5800945", "0.5782794", "0.57797265", "0.5768241", "0.5761552", "0.57590616", "0.57393587", "0.57361037", "0.57308155", "0.57298666", "0.57277095", "0.5723252", "0.5708514", "0.5704869", "0.57018405", "0.570045", "0.5698589", "0.5696464", "0.5689827", "0.5689717", "0.56827945", "0.56789", "0.56678116", "0.56592125", "0.5658674", "0.56387234", "0.5637638", "0.5630871", "0.56306934", "0.56295335", "0.5618283", "0.561315", "0.5611004", "0.5606633", "0.56034964", "0.5599619", "0.5593899", "0.55915254", "0.55857813", "0.5583915", "0.558348", "0.55831325", "0.55831325", "0.5580867", "0.55767", "0.5575122", "0.5570059", "0.55644244", "0.5559748", "0.55585253", "0.55561596", "0.55517286", "0.5546387", "0.5541446", "0.55403894", "0.55389804", "0.5530931", "0.55292255", "0.5527052", "0.5524574", "0.55208", "0.55204356", "0.5513818", "0.55137825", "0.5504772", "0.5501392", "0.5498278", "0.548867" ]
0.7202675
0
Builds a model with 2 inputs. Test ConcatNode
def test_BuildModel2(self): print("\nTest 6: Building a Model with Concat") builder = StaticBuilder("Concat") in1 = builder.addInput(10) in2 = builder.addInput(20) enc1 = builder.addInner(3, num_islots=2) out1 = builder.addOutput() builder.addDirectedLink(in1, enc1, islot=0) builder.addDirectedLink(in2, enc1, islot=1) builder.addDirectedLink(enc1, out1) builder.build()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concat_model():\n x = tf.keras.Input(shape=[10, 10, 3, ])\n x1 = tf.keras.layers.Conv2D(5, (2, 2))(x)\n x2 = tf.keras.layers.Conv2D(6, (2, 2))(x)\n x3 = tf.keras.layers.Conv2D(7, (2, 2))(x)\n z = tf.keras.layers.concatenate([x2, x1, x3], axis=-1)\n z1 = tf.keras.layers.Conv2D(10, (2, 2))(z)\n z2 = tf.keras.layers.Conv2D(10, (2, 2))(z)\n z = tf.add(z1, z2)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"concat_model\")(z)\n return output", "def _create_concat(cls, onnx_node, inputs, opset_version):\n factor = onnx_node.attrs[\"axis\"]\n if factor < 0:\n factor = len(inputs[0].shape\n ) + factor # in order to support the negative axis\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return None, forward(axis=factor)", "def create_helper_concat_node(inputs, output_name, axis=0):\n concat_node = onnx.helper.make_node(\n \"Concat\",\n inputs=inputs,\n outputs=[output_name],\n name=output_name,\n axis=axis,\n )\n return [concat_node]", "def convert_concat(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n axis = int(attrs.get(\"dim\", 1))\n concat_node = onnx.helper.make_node(\n \"Concat\",\n input_nodes,\n [name],\n axis=axis,\n name=name\n )\n return [concat_node]", "def build(data_shape_1, data_shape_2):\n # create NN model \n # design network\n \n inputs = keras.Input(shape=(data_shape_1, data_shape_2), name='inp')\n cnn1 = layers.Conv1D(16, 5, activation='relu')(inputs)\n cnn2 = layers.Conv1D(32, 3, activation='relu')(cnn1)\n cnn3 = layers.Conv1D(64, 3, activation='relu')(cnn2)\n cnn3 = layers.Flatten()(cnn3)\n lstm = layers.LSTM(100,return_sequences = True, activation='relu')(inputs)\n lstm = layers.Flatten()(lstm)\n x = layers.concatenate([cnn3,lstm])\n x = layers.Dense(100, activation='sigmoid')(x)\n outputs = layers.Dense(24)(x)\n\n model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model')\n \n return model", "def _create_concat(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('axis', op.axis),\n ])\n return node", "def test_BuildModel1(self):\n print(\"\\nTest 5: Building a Model with cloning\")\n builder = StaticBuilder(\"Clone\")\n in1 = builder.addInput(10)\n enc1 = builder.addInner(3)\n out1 = builder.addOutput(name=\"Out1\")\n out2 = builder.addOutput(name=\"Out2\")\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc1, out2)\n \n builder.build()", "def build_model(model_id1='bert-base-multilingual-cased',\n model_id2='bert-base-multilingual-uncased',\n max_len=192, dropout=0.2,\n **_):\n print(model_id1, model_id2)\n\n transformer1 = TFAutoModel.from_pretrained(model_id1)\n transformer2 = TFAutoModel.from_pretrained(model_id2)\n\n input_word_ids1 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids1\")\n out1 = transformer1(input_word_ids1)\n\n input_word_ids2 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids2\")\n out2 = transformer2(input_word_ids2)\n\n sequence_output1 = out1[0]\n sequence_output2 = out2[0]\n cls_token1 = sequence_output1[:, 0, :]\n cls_token2 = sequence_output2[:, 0, :]\n\n x = Dropout(dropout)(cls_token1) + Dropout(dropout)(cls_token2)\n out = Dense(1, activation='sigmoid')(x)\n\n model = Model(inputs=[input_word_ids1, input_word_ids2], outputs=out)\n\n return model", "def convert_concat(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[i]) for i in range(len(op.input(\"X\")))]\n axis = op.attr(\"axis\")\n inputs = _dtype_shape_promotion(inputs)\n out = _op.concatenate(inputs, axis=axis)\n g.add_node(op.output(\"Out\")[0], out)", "def test_BuildModel0(self):\n print(\"\\nTest 4: Building a Basic Model\")\n builder = StaticBuilder(scope=\"Basic\")\n in_name = builder.addInput(10)\n enc_name = builder.addInner(3)\n out_name = builder.addOutput()\n builder.addDirectedLink(in_name, enc_name)\n builder.addDirectedLink(enc_name, out_name)\n \n self.assertEqual(builder.num_nodes, 3, \"The number of nodes has not been \"\n \"assigned correctly\")\n \n builder.build()\n inn, enc, out = ( builder.nodes[in_name], builder.nodes[enc_name],\n builder.nodes[out_name] )\n self.assertEqual(inn._oslot_to_otensor[0].shape.as_list()[-1],\n enc._islot_to_itensor[0].shape.as_list()[-1], \n \"The input tensors have not been assigned correctly\")\n self.assertEqual(enc._oslot_to_otensor[0].shape.as_list()[-1],\n out._islot_to_itensor[0].shape.as_list()[-1], \n \"The input tensors have not been assigned correctly\")", "def test_add_02():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def multiple_input_model():\n\n input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))\n input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))\n x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a')(input1)\n x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b')(input2)\n x = tf.keras.layers.add([x1, x2])\n x = tf.keras.layers.Conv2D(4, (1, 1), name='conv2')(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name=\"multiple_input_model\")(x)\n\n return outputs", "def convert_rnn_param_concat(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n axis = int(attrs.get(\"dim\"))\n\n # mxnet RNN node and ONNX RNN/LSTM/GRU nodes\n # use different ways to store their parameters\n\n # The conversion between these formats is broken into 2 steps\n # The first step (performed here in _rnn_param_concat) regroups the\n # flattened parameters according to the table below.\n # The second step corrects the shapes and orders of gates and is\n # performed and described in more detail in the RNN node\n\n # mxnet [ONNX] -> ONNX (group)\n # i2h_weights [W (+ WB)] -> W (input weights)\n # h2h_weights [R (+ RB)] -> R (recurrence weights)\n # i2h_biases [Wb (+ WBb)] -> B = [Wb + Rb (+ WBb + RBb)]\n # h2h_biases [Rb (+ RBb)] -> (biases)\n\n split = len(input_nodes) // 2\n weights, biases = input_nodes[:split], input_nodes[split:]\n i2h_weights = weights[::2]\n h2h_weights = weights[1::2]\n i2h_biases = biases[::2]\n h2h_biases = biases[1::2]\n reordered_biases = [\n bias\n for pair in zip(i2h_biases, h2h_biases)\n for bias in pair\n ]\n\n # The order of mxnet parameters in the inputs is:\n # [\n # '{}{}_{}_{}'.format(d, l, g, t)\n # for t in ['weight', 'bias']\n # for l in range(num_layers)\n # for d in ['l', 'r'][:num_directions]\n # for g in ['i2h', 'h2h']\n # ]\n\n w = onnx.helper.make_node(\n \"Concat\",\n inputs=i2h_weights,\n outputs=[name + \"__W\"],\n axis=axis,\n name=name + \"__W\"\n )\n r = onnx.helper.make_node(\n \"Concat\",\n inputs=h2h_weights,\n outputs=[name + \"__R\"],\n axis=axis,\n name=name + \"__R\"\n )\n b = onnx.helper.make_node(\n \"Concat\",\n inputs=reordered_biases,\n outputs=[name + \"__B\"],\n axis=axis,\n name=name + \"__B\"\n )\n return [w, r, b]", "def build_model(input_classes,output_classes):\n dimensions = 20\n inputs = []\n embedded_outputs = []\n for i in input_classes:\n input_layer = Input((1,))\n inputs.append(input_layer)\n embedder = Embedding(input_dim=i,output_dim=dimensions,input_length=1,embeddings_constraint=UnitNorm(axis=0))\n embedded_layer = embedder(input_layer)\n embedded_outputs.append(embedded_layer)\n\n embedded_concats = Concatenate()(embedded_outputs)\n flatten_layer = Flatten()\n\n dense_layer = Dense(output_classes)\n\n flattened_output = flatten_layer(embedded_concats)\n dense_output = dense_layer(flattened_output)\n\n # dense_output = dense_layer(embedded_concats)\n\n model = Model(inputs,dense_output)\n print(model.summary())\n model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')\n\n return model", "def build_model():", "def test_add_01():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (1, 1, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def test_add_03():\n\n a_shape = (1, 2, 3, 4)\n b_shape = (3, 4)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [info(\"A\", TensorProto.FLOAT, a_shape)]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n B = np.random.rand(*b_shape).astype(np.float32)\n\n b_init = from_array(B, \"B\")\n graph = make_graph([node], \"add_graph\", inputs, outputs, initializer=[b_init])\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a}, outputs).run()", "def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]", "def __build_model_pyramid(name, model, features):\n return keras.layers.Concatenate(axis=1, name=name)([model(f) for f in features])", "def construct(self, x1, x2):\n x1 = self.up(x1)\n x = self.concat((x1, x2))\n return self.conv(x)", "def create_split_concat_net_const(self, input_shape, output_shapes, axis, ir_version):\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n import numpy as np\n\n concat_axis = 0\n concat_output_shape = input_shape.copy()\n concat_output_shape[concat_axis] *= 2\n\n const_number = np.prod(input_shape)\n constant = np.random.randint(-127, 127, const_number).astype(np.float)\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)\n outputs, split = [], []\n for id, output_shape in enumerate(output_shapes):\n helper.make_tensor_value_info('output_{}'.format(id), TensorProto.FLOAT, output_shape)\n outputs.append('output_{}'.format(id))\n split.append(output_shape[axis])\n\n # Output for concat\n output_concat = helper.make_tensor_value_info('output_dyn_concat', TensorProto.FLOAT, concat_output_shape)\n\n node_const_def = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['const1'],\n value=helper.make_tensor(\n name='const_tensor',\n data_type=TensorProto.FLOAT,\n dims=input_shape,\n vals=constant,\n ),\n )\n\n node_split_def = onnx.helper.make_node(\n 'Split',\n inputs=['const1'],\n outputs=outputs,\n axis=axis,\n split=split\n )\n\n node_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=outputs,\n outputs=['output_concat'],\n axis=axis\n )\n\n node_dyn_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=['input', 'output_concat'],\n outputs=['output_dyn_concat'],\n axis=concat_axis\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_const_def, node_split_def, node_concat_def, node_dyn_concat_def],\n 'test_split_model',\n [input],\n [output_concat],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_split_model')\n\n #\n # Create reference IR net\n # Please, spesify 'type': 'Input' for inpit node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return onnx_net, ref_net", "def get_model_concat(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n inputs_cond = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\":\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n embedded_inputs_cond = tf.nn.embedding_lookup(embedding_matrix, inputs_cond)\n\n embedded_inputs_all = tf.concat(1, [embedded_inputs, embedded_inputs_cond]) # concatenating the two embeddings\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length*2, embedded_inputs_all)]\n\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n outputs, states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n outputs_fin = outputs[-1]\n\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh)(outputs_fin) #tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax)(outputs_fin) # tf.nn.softmax\n\n\n return model, [inputs, inputs_cond]", "def test_add_00():\n\n a_shape = (1, 1, 3, 4)\n b_shape = (1, 2, 3, 1)\n out_shape = (1, 2, 3, 4)\n\n class AddTester(Base):\n def create_onnx(self) -> onnx.ModelProto:\n node = make_node(\"Add\", inputs=[\"A\", \"B\"], outputs=[\"C\"])\n inputs = [\n info(\"A\", TensorProto.FLOAT, a_shape),\n info(\"B\", TensorProto.FLOAT, b_shape),\n ]\n outputs = [info(\"C\", TensorProto.FLOAT, out_shape)]\n\n graph = make_graph([node], \"add_graph\", inputs, outputs)\n model = make_model(graph)\n return model\n\n a = np.random.rand(*a_shape).astype(np.float32)\n b = np.random.rand(*b_shape).astype(np.float32)\n\n outputs = [\"C\"]\n AddTester({\"A\": a, \"B\": b}, outputs).run()", "def test_concat_get_op_product_graph(self):\n\n tf.compat.v1.reset_default_graph()\n\n _ = concat_model()\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'], ['concat_model/Softmax'])\n self.assertTrue(validate_branch_ops(conn_graph))\n self.assertTrue(validate_product_tensor_lists(conn_graph))\n self.assertEqual(2, conn_graph.branch_count)\n self.assertEqual(13, len(conn_graph.get_all_ops()))\n self.assertEqual(12 + len(tf.compat.v1.get_default_graph().get_collection('variables')),\n len(conn_graph.get_all_products()))\n\n # Check that the order of input products to the concat op matches the order of input tensors in the tf graph\n concat_tf_op = tf.compat.v1.get_default_graph().get_operation_by_name(\"concatenate/concat\")\n concat_op = conn_graph.get_all_ops()['concatenate/concat']\n for index, product in enumerate(concat_op.get_input_products()):\n self.assertTrue(len(product.consumers) == 1)\n self.assertEqual(product.tensor_dict[product.consumers[0]], concat_tf_op.inputs[index])", "def convert_concat(self, op):\n try:\n from tflite.Operator import Operator\n from tflite.ConcatenationOptions import ConcatenationOptions\n from tflite.BuiltinOptions import BuiltinOptions\n from tflite.ActivationFunctionType import ActivationFunctionType\n except ImportError:\n raise ImportError(\"The tflite package must be installed\")\n\n assert isinstance(op, Operator)\n input_tensors = self.get_input_tensors(op)\n assert len(input_tensors) > 1, \"input tensors length should be greater than 1\"\n\n data_nodes = [self.tensor_tab[t.tensor_idx] for t in input_tensors]\n\n output_tensors = self.get_output_tensors(op)\n assert len(output_tensors) == 1, \"output tensors length should be 1\"\n output_tensor = output_tensors[0]\n output_tensor_idx = output_tensor.tensor_idx\n output_tensor_shape = output_tensor.tensor.ShapeAsNumpy()\n\n assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions\n op_options = op.BuiltinOptions()\n concat_options = ConcatenationOptions()\n concat_options.Init(op_options.Bytes, op_options.Pos)\n concat_dim = concat_options.Axis()\n fused_activation_fn = concat_options.FusedActivationFunction()\n assert fused_activation_fn == ActivationFunctionType.NONE, \\\n 'Concat operator with fused activation is not supported yet.'\n\n out_nodes = self.nn_concat(concat_dim, data_nodes, output_tensor_shape)\n\n self.tensor_tab[output_tensor_idx] = out_nodes\n return out_nodes", "def test_merge_add(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(5)(x1)\n x4 = merge([x2, x3], mode=\"sum\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )", "def build_model(hype_space):\n print(\"Hyperspace:\")\n print(hype_space)\n\n input = Input(shape=(MAXLEN_SEQ, int(hype_space['embed_dim']) ))\n\n profiles_input = Input(shape=(MAXLEN_SEQ, NB_FEATURES,))\n x1 = concatenate([input, profiles_input])\n x2 = concatenate([input, profiles_input])\n inp = [input, profiles_input]\n\n x1 = Dense(1200, activation=\"relu\")(x1)\n x1 = Dropout(0.5)(x1)\n\n # x1 = Bidirectional(CuDNNGRU(units=100, return_sequences=True))(x1)\n # Defining a bidirectional LSTM using the embedded representation of the inputs\n x2 = Bidirectional(CuDNNGRU(units=500, return_sequences=True))(x2)\n # x2 = Dropout(0.5)(x2)\n x2 = Bidirectional(CuDNNGRU(units=100, return_sequences=True))(x2)\n # x2 = Dropout(0.5)(x2)\n COMBO_MOVE = concatenate([x1, x2])\n w = Dense(500, activation=\"relu\")(COMBO_MOVE) # try 500\n w = Dropout(0.4)(w)\n w = tcn.TCN(return_sequences=True)(w)\n\n y = TimeDistributed(Dense(NB_CLASSES_Q8, activation=\"softmax\"))(w)\n\n # Defining the model as a whole and printing the summary\n model = Model(inp, y)\n # model.summary()\n\n # Setting up the model with categorical x-entropy loss and the custom accuracy function as accuracy\n adamOptimizer = Adam(lr=0.001, beta_1=0.8, beta_2=0.8, epsilon=None, decay=0.0001, amsgrad=False)\n model.compile(optimizer=adamOptimizer, loss=\"categorical_crossentropy\", metrics=[accuracy])\n\n return model", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def model_build(self):\n\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\n X_input = Input(self.inputData[0].shape)\n\n '''\n # CONV -> BN -> RELU Block applied to X\n X = Conv2D(8, (8, 8), name='conv0')(X_input)\n X = BatchNormalization(name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool0')(X)\n X = Dropout(0.1, name='dropout0')(X)\n\n X = Conv2D(16, (16, 16), name='conv1')(X)\n X = BatchNormalization(name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool1')(X)\n X = Dropout(0.1, name='dropout1')(X)\n\n X = Conv2D(16, (32, 32), name='conv2')(X)\n X = BatchNormalization(name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool2')(X)\n X = Dropout(0.1, name='dropout2')(X)\n' '''\n\n X = Dense(500, activation='relu', name='fc0')(X_input)\n X = Dropout(0.1, name='dropout1')(X)\n X = Dense(500, activation='relu', name='fc1')(X)\n X = Dropout(0.1, name='dropout2')(X)\n X = Dense(3, activation='softmax', name='fc2')(X)\n\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\n self.model = Model(inputs=X_input, outputs=X, name='acouModel')", "def create_split_concat_net(self, input_shape, output_shapes, axis, ir_version):\n\n #\n # Create ONNX model\n #\n\n import onnx\n from onnx import helper\n from onnx import TensorProto\n\n input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)\n outputs, split = [], []\n for id, output_shape in enumerate(output_shapes):\n helper.make_tensor_value_info('output_{}'.format(id), TensorProto.FLOAT, output_shape)\n outputs.append('output_{}'.format(id))\n split.append(output_shape[axis])\n\n # Output for concat\n output_concat = helper.make_tensor_value_info('output_concat', TensorProto.FLOAT, input_shape)\n\n node_split_def = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=outputs,\n axis=axis,\n split=split\n )\n\n node_concat_def = onnx.helper.make_node(\n 'Concat',\n inputs=outputs,\n outputs=['output_concat'],\n axis=axis\n )\n\n # Create the graph (GraphProto)\n graph_def = helper.make_graph(\n [node_split_def, node_concat_def],\n 'test_split_model',\n [input],\n [output_concat],\n )\n\n # Create the model (ModelProto)\n onnx_net = helper.make_model(graph_def, producer_name='test_split_model')\n\n #\n # Create reference IR net\n # Please, spesify 'type': 'Input' for inpit node\n # Moreover, do not forget to validate ALL layer attributes!!!\n #\n\n ref_net = None\n\n return onnx_net, ref_net", "def test_BuildModel3(self):\n print(\"\\nTest 7: Building a more complicated Model\")\n builder = StaticBuilder(\"BreakIt\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3)\n enc2 = builder.addInner(5, num_islots=2)\n out1 = builder.addOutput()\n out2 = builder.addOutput()\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(in2, enc2, islot=0)\n builder.addDirectedLink(enc1, enc2, islot=1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc2, out2)\n \n builder.build()", "def MakeModel(self):\n group1, group2 = self.data\n self.n, self.m = len(group1), len(group2)\n self.pool = np.hstack((group1, group2))", "def MakeModel(self):\n group1, group2 = self.data\n self.n, self.m = len(group1), len(group2)\n self.pool = np.hstack((group1, group2))", "def split_and_concat_model():\n x = tf.keras.Input(shape=[224, 224, 3, ])\n # TODO: implement split for the following commented out method of splitting\n # y1 = x[:, :100, :, :]\n # y2 = x[:, 101:, :, :]\n y1, y2 = tf.split(x, [100, 124], 1)\n y1 = tf.nn.relu(y1)\n y2 = tf.keras.layers.BatchNormalization()(y2)\n z = tf.keras.layers.concatenate([y1, y2], axis=1)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"split_and_concat_model\")(z)\n return output", "def build(self, hp, inputs=None):\n input_node = inputs\n embedding_dim = self.embedding_dim or hp.Choice('embedding_dim', [8, 16], default=8)\n output_node = tf.stack(\n [\n tf.tensordot(input_node[0][:, col_id], tf.keras.layers.Embedding(1, embedding_dim)(0), axes=0)\n for col_id in range(self.num_of_fields)\n ],\n axis=1\n )\n return output_node", "def build_model(self):\n for link in self.links:\n # if from neuron is input to graph, add it to input_neurons set\n if self.is_input_neuron(link.from_neuron_id):\n self.input_neurons.add(link.from_neuron_id)\n # add weight to neuron\n if link.to_neuron_id not in self.weights:\n self.weights[link.to_neuron_id] = []\n self.weights[link.to_neuron_id].append(link.weight)\n # add input to neuron\n if link.to_neuron_id not in self.connections:\n self.connections[link.to_neuron_id] = []\n self.connections[link.to_neuron_id].append(link.from_neuron_id)", "def build_model(self) -> nn.Module:\n pass", "def build_model_fn(self):", "def concat(a, b):\n return torch.cat((a, b), 1)", "def build_adjudicator(len_encoded_str, len_padded_str=300, lr=1e-4):\n # Second: let's build the Encoded String input handling head\n str_input_dimension = (len_padded_str, len_encoded_str)\n str_processing_head_input = keras.Input(shape=str_input_dimension, name='Discriminator_Num_Input')\n\n # Third: let's build the Encoded Number input handling head\n num_input_dimension = (len_padded_str, 1)\n num_processing_head_input = keras.Input(shape=num_input_dimension, name='Discriminator_Str_Input')\n\n # Fourth: Concatenate the String and Number processed outputs\n combined_name_input = tf.concat([str_processing_head_input, num_processing_head_input], -1)\n combined_name_processed = layers.LSTM(units=1024, return_sequences=True,\n name='Adj_Combined_Name_LSTM_1')(combined_name_input)\n combined_input_processed = layers.LSTM(units=1024, return_sequences=True, dropout=0.1,\n name='Adj_Combined_Input_LSTM_1')(combined_name_processed)\n combined_input_processed = layers.LSTM(units=1024, return_sequences=True, dropout=0.1,\n name='Adj_Combined_Input_LSTM_2')(combined_input_processed)\n combined_input_processed = layers.LSTM(units=1024, return_sequences=True, dropout=0.1,\n name='Adj_Combined_Input_LSTM_3')(combined_input_processed)\n combined_input_processed = layers.LSTM(units=1024, return_sequences=True, dropout=0.1,\n name='Adj_Combined_Input_LSTM_4')(combined_input_processed)\n\n # Sixth: Define each output tail and compile the model\n discriminator_output = layers.Flatten(name='Adj_Flatten')(combined_input_processed)\n discriminator_output = layers.Dropout(0.4, name='Adj_Dropout')(discriminator_output)\n discriminator_output = layers.Dense(units=1, name='Adj_Dense',\n activation='sigmoid')(discriminator_output)\n\n adjudicator_model = models.Model(\n inputs=[str_processing_head_input, num_processing_head_input],\n outputs=discriminator_output, name=\"InChI_Name_Adjudicator\"\n )\n\n optimizer = tf.keras.optimizers.RMSprop(lr, clipvalue=1.0, decay=1e-8)\n losses = {\n 'Adj_Dense': tf.losses.BinaryCrossentropy()\n }\n adjudicator_model.compile(optimizer=optimizer, loss=losses)\n\n adjudicator_model.trainable = False\n\n print(\"\\n\\n\")\n adjudicator_model.summary()\n print(\"\\n\\n\")\n\n adjudicator_model", "def create_helper_build_values_node(\n inputs, output_name,\n dtype, kwargs, axis=0\n ):\n values = []\n tensor_nodes = []\n for idx, inp in enumerate(inputs):\n if not isinstance(inp, (str, bytes)):\n inp, = create_helper_tensor_node(\n np.array([inp], dtype=dtype),\n output_name + \"__value\" + str(idx),\n kwargs\n )\n tensor_nodes.append(inp)\n inp = inp.name\n values.append(inp)\n concat_node, = create_helper_concat_node(values, output_name, axis=axis)\n return tensor_nodes + [concat_node,]", "def concatenate_tasks(\n tasks,\n concat_train=True,\n concat_valid=True,\n concat_test=True,\n):\n new_task = deepcopy(tasks[0])\n new_task._name = \"+\".join(task.name for task in tasks)\n if concat_train:\n new_task._train_data = ConcatDataset(\n [task.train_data for task in tasks])\n if concat_valid:\n new_task._valid_data = ConcatDataset(\n [task.valid_data for task in tasks])\n if concat_test:\n new_task._test_data = ConcatDataset([task.test_data for task in tasks])", "def create_model(self):\n\n #Initialises input for left component\n user_embed = Input(shape=(self.embedding_size_useritem,))\n item_embed = Input(shape=(self.embedding_size_useritem,))\n\n #Initialises input for right component\n user_read = Input(shape=(self.history, self.embedding_size_article))\n user_case = Input(shape=(self.embedding_size_article, ))\n\n # Creates Layers for the left component\n concatenated_layer = concatenate([user_embed, item_embed])\n left_layer1 = Dense(128, activation='relu')(concatenated_layer)\n left_layer2 = Dense(64, activation='relu')(left_layer1)\n\n # Creates Layers for the right component\n lstm_layer = Bidirectional(LSTM(64, return_sequences=True))(user_read)\n attention_layer = AttentionWithContext()(lstm_layer)\n\n right_layer_input = Dense(128, activation='relu')(user_case)\n\n elem_wise = multiply([attention_layer, right_layer_input])\n right_layer1 = Dense(64, activation='relu')(elem_wise)\n\n\n # Merges the left and right component\n merged_layer = concatenate([left_layer2, right_layer1])\n merged_layer1 = Dense(256, activation='relu')(merged_layer)\n merged_layer2 = Dense(128, activation='relu')(merged_layer1)\n merged_layer3 = Dense(64, activation='relu')(merged_layer2)\n output = Dense(1, activation='sigmoid')(merged_layer3)\n\n\n self.model = Model(inputs=[user_embed, item_embed] + [user_read] + [user_case], outputs=output)\n self.model.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])", "def concat(self):\n nfa2 = self.aut_stack.pop()\n nfa1 = self.aut_stack.pop()\n\n nfa1_star = nfa1.transform('X')\n nfa2_star = nfa2.transform('Y')\n\n nfa_concat = Automaton()\n nfa_concat.final = nfa2_star.final\n nfa_concat.q_0 = nfa1_star.q_0\n nfa_concat.states = list(set(nfa1_star.states).union(nfa2_star.states))\n nfa_concat.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet))\n nfa_concat.transition = dict(nfa1_star.transition, **nfa2_star.transition)\n for a in nfa1_star.final:\n key = a + ', .'\n if nfa_concat.transition.get(key, 0) == 0:\n nfa_concat.transition[key] = [nfa2_star.q_0]\n else:\n nfa_concat.transition[key].append(nfa2_star.q_0)\n\n self.aut_stack.append(nfa_concat)", "def build_nn_experimental(dropout: float=0.3, verbosity: int=0):\n # Setting Up Input layer\n input_q1 = Input(shape=(512,))\n input_q2 = Input(shape=(512,))\n \n # Network for 1st input Dense 128 --> Relu --> Dense 264 --> Relu\n input1_layer = Dense(512, activation='relu')(input_q1)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Dense(512, activation='relu')(input1_layer)\n input1_layer = BatchNormalization()(input1_layer)\n input1_layer = Dropout(dropout)(input1_layer)\n \n input1_layer = Model(inputs=input_q1, outputs=input1_layer)\n \n # Network for 2st input Dense 128 --> Relu --> Dense 264 --> Relu\n input2_layer = Dense(512, activation='relu')(input_q2)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Dense(512, activation='relu')(input2_layer)\n input2_layer = BatchNormalization()(input2_layer)\n input2_layer = Dropout(dropout)(input2_layer)\n \n input2_layer = Model(inputs=input_q2, outputs=input2_layer)\n \n merged = concatenate([input1_layer.output, input2_layer.output])\n\n # Fully connected layer & final prediction layer\n pred_layer = Dense(4096, activation='relu')(merged)\n pred_layer = Dense(1024, activation='relu')(pred_layer)\n pred_layer = Dense(256, activation='relu')(pred_layer)\n pred_layer = Dense(64, activation='relu')(pred_layer)\n pred_layer = Dropout(dropout)(pred_layer)\n \n pred_layer = Dense(1, activation='sigmoid')(pred_layer)\n \n model = Model(inputs=[input1_layer.input, input2_layer.input], outputs=pred_layer)\n if verbosity > 0:\n model.summary()\n return model", "def build(self, x: popxl.Tensor, seed: Optional[popxl.Tensor] = None) -> popxl.Tensor:\n # x: [b*s, h/tp2]\n # x: identical tp1, sharded tp2\n\n heads_seed = None\n if not self.config.model.eval:\n assert seed is not None, \"A seed Tensor must be provided when creating a non-eval model.\"\n seed, heads_seed = ops.split_random_seed(seed)\n\n z = self.ln_1(x)\n\n z = replicated_all_reduce_identical_inputs(z, group=self.rg_tp1)\n # z: identical tp1, sharded tp2\n\n z = self.heads(z, seed=heads_seed)\n z = self.output(z)\n\n z = replicated_all_reduce_identical_grad_inputs(z, group=self.rg_tp1)\n # z: identical tp1, sharded tp2\n\n self.output_bias = self.add_variable_input(\n \"output_bias\", lambda: np.zeros(z.shape[-1]), z.dtype, replica_grouping=self.rg_tp2.transpose()\n )\n z = z + self.output_bias\n\n if not self.config.model.eval:\n assert seed is not None, \"A seed Tensor must be provided when creating a non-eval model.\"\n z = ops.dropout(z, seed, p=self.config.model.dropout_prob)\n\n z = x + z\n\n # z: identical tp1, sharded tp2\n return z", "def concat(cls, c1, c2, op):\r\n if c1.clause and c2.clause:\r\n return cls('({}) {} ({})'.format(c1.clause, op, c2.clause), c1.params + c2.params)\r\n elif c1.clause:\r\n return c1\r\n elif c2.clause:\r\n return c2\r\n else:\r\n return cls('', ())", "def build_model(self, n_inputs, n_outputs, trainable=True):\n\n\t\t# common layers\n\t\tcomm_input = Input(shape=(n_inputs,))\n\t\tX = Dense(32, activation='relu', name=\"val0\", trainable=trainable)(comm_input)\n\t\tX = Dense(64, activation='relu', name=\"vaaal3\", trainable=trainable)(X)\n\t\tX = Dense(64, activation='relu', name=\"valdd3\", trainable=trainable)(X)\n\n\t\t# value network\n\t\tval_head = Dense(32, activation='relu', name=\"val3\", trainable=trainable)(X)\n\t\tval_head = Dense(1, activation='linear', name=\"val4\", trainable=trainable)(val_head)\n\t\tval_head = RepeatVector(n_outputs)(val_head)\n\t\tval_head = Flatten(name='meanActivation')(val_head)\n\n\t\t# advantage network\n\t\tadv_head = Dense(32, activation='tanh', name=\"val2\", trainable=trainable)(X)\n\t\tadv_head = Dense(n_outputs, activation='linear', name='Activation', trainable=trainable)(adv_head)\n\n\t\tm_adv_head = Lambda(lambda layer: layer - K.mean(layer))(adv_head)\n\t\t# adv_head= Subtract()([adv_head,m_adv_head])\n\n\t\t# Merge both\n\t\tq_values = Add(name=\"Q-value\")([val_head, adv_head])\n\t\tmodel = Model(inputs=[comm_input], outputs=q_values)\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=0.001))\n\t\tmodel.summary()\n\t\treturn model", "def build(self,input_shape):\n\n self.w = self.add_weight(shape=(input_shape[-1],self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)", "def build_model(encoders):\n\n # Pclass\n input_pclass_size = len(encoders['pclass_encoder'].classes_)\n input_pclass = Input(shape=(\n input_pclass_size if input_pclass_size != 2 else 1,), name=\"input_pclass\")\n\n # Sex\n input_sex_size = len(encoders['sex_encoder'].classes_)\n input_sex = Input(\n shape=(input_sex_size if input_sex_size != 2 else 1,), name=\"input_sex\")\n\n # Age\n input_age = Input(shape=(10,), name=\"input_age\")\n\n # Siblings/Spouses Aboard\n input_siblings_spouses_aboard_size = len(\n encoders['siblings_spouses_aboard_encoder'].classes_)\n input_siblings_spouses_aboard = Input(shape=(\n input_siblings_spouses_aboard_size if input_siblings_spouses_aboard_size != 2 else 1,), name=\"input_siblings_spouses_aboard\")\n\n # Parents/Children Aboard\n input_parents_children_aboard_size = len(\n encoders['parents_children_aboard_encoder'].classes_)\n input_parents_children_aboard = Input(shape=(\n input_parents_children_aboard_size if input_parents_children_aboard_size != 2 else 1,), name=\"input_parents_children_aboard\")\n\n # Fare\n input_fare = Input(shape=(10,), name=\"input_fare\")\n\n # Combine all the inputs into a single layer\n concat = concatenate([\n input_pclass,\n input_sex,\n input_age,\n input_siblings_spouses_aboard,\n input_parents_children_aboard,\n input_fare\n ], name=\"concat\")\n\n # Multilayer Perceptron (MLP) to find interactions between all inputs\n hidden = Dense(256, activation=\"relu\", name=\"hidden_1\",\n kernel_regularizer=l2(1e-3))(concat)\n hidden = BatchNormalization(name=\"bn_1\")(hidden)\n hidden = Dropout(0.0, name=\"dropout_1\")(hidden)\n\n for i in range(2-1):\n hidden = Dense(64, activation=\"relu\", name=\"hidden_{}\".format(\n i+2), kernel_regularizer=l2(1e-3))(hidden)\n hidden = BatchNormalization(name=\"bn_{}\".format(i+2))(hidden)\n hidden = Dropout(0.0, name=\"dropout_{}\".format(i+2))(hidden)\n\n output = Dense(1, activation=\"sigmoid\", name=\"output\",\n kernel_regularizer=None)(hidden)\n\n # Build and compile the model.\n model = Model(inputs=[\n input_pclass,\n input_sex,\n input_age,\n input_siblings_spouses_aboard,\n input_parents_children_aboard,\n input_fare\n ],\n outputs=[output])\n model.compile(loss=\"binary_crossentropy\",\n optimizer=AdamWOptimizer(learning_rate=0.1,\n weight_decay=0.05))\n\n return model", "def FixInputsFC(model, inputs):\n first_input = K.constant(inputs[0])\n second_input = K.constant(inputs[1][:,:-1])\n\n Tensor_Input0 = Input(batch_shape = (model.input_shape[1][0], 1))\n\n n_input = keras.layers.Lambda(lambda x: K.concatenate([second_input,x],axis=-1))(Tensor_Input0)\n n2_input = keras.layers.Lambda(lambda x: [first_input, x])(n_input)\n Out1 = model(n2_input)\n# Out2 = keras.layers.Lambda(lambda x : x[:,:,0] - x[:,:,1])(Out1)\n Out2 = keras.layers.Lambda(lambda x : x)(Out1)\n M = keras.Model( Tensor_Input0, Out2 )\n return(M)", "def _build(self):\n if self.attn:\n self.Attn = AttentionNet(self.dim_b1, channels=self.channels, name='Attn')\n self.predsb1 = self.Attn(self.xb1, is_training=self.is_training)\n self.predsb2 = self.Attn(self.xb2, is_training=self.is_training, reuse=True)\n #TODO: generators want to make their synthetics look like b1/b2 to attn model\n\n self.loss_attn = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predsb1, labels=tf.zeros_like(self.predsb1)))\n self.loss_attn += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predsb2, labels=tf.ones_like(self.predsb2)))\n\n self.attnb1 = tf.gradients(self.loss_attn, self.xb1)[0]\n self.attnb2 = tf.gradients(self.loss_attn, self.xb2)[0]\n\n self.attnb1 = tf.abs(self.attnb1)\n self.attnb1 = self.attnb1 / tf.reduce_sum(self.attnb1, axis=1, keep_dims=True)\n self.attnb1 = self.attnb1 / tf.reduce_max(self.attnb1, axis=1, keep_dims=True)\n\n self.attnb2 = tf.abs(self.attnb2)\n self.attnb2 = self.attnb2 / tf.reduce_sum(self.attnb2, axis=1, keep_dims=True)\n self.attnb2 = self.attnb2 / tf.reduce_max(self.attnb2, axis=1, keep_dims=True)\n\n self.attnb1 = nameop(self.attnb1, 'attnb1')\n self.attnb2 = nameop(self.attnb2, 'attnb2')\n\n self.G12 = GeneratorResnet(self.dim_b1, self.dim_b2, channels=self.channels, name='G12')\n self.Gb2 = self.G12(self.xb1, is_training=self.is_training)\n self.Gb2 = nameop(self.Gb2, 'Gb2')\n\n self.G21 = GeneratorResnet(self.dim_b2, self.dim_b1, channels=self.channels, name='G21')\n self.Gb1 = self.G21(self.xb2, is_training=self.is_training)\n self.Gb1 = nameop(self.Gb1, 'Gb1')\n\n\n self.Gb2_reconstructed = self.G12(self.Gb1, is_training=self.is_training, reuse=True)\n self.Gb1_reconstructed = self.G21(self.Gb2, is_training=self.is_training, reuse=True)\n\n self.Gb1_reconstructed = nameop(self.Gb1_reconstructed, 'xb1_reconstructed')\n self.Gb2_reconstructed = nameop(self.Gb2_reconstructed, 'xb2_reconstructed')\n\n self.D1 = Discriminator(self.dim_b1, 1, channels=self.channels, name='D1')\n self.D2 = Discriminator(self.dim_b2, 1, channels=self.channels, name='D2')\n\n self.D1_probs_z = self.D1(self.xb1, is_training=self.is_training)\n self.D1_probs_G = self.D1(self.Gb1, is_training=self.is_training, reuse=True)\n self.D1_probs_z = nameop(self.D1_probs_z, 'D1_probs_z')\n self.D1_probs_G = nameop(self.D1_probs_G, 'D1_probs_G')\n\n self.D2_probs_z = self.D2(self.xb2, is_training=self.is_training)\n self.D2_probs_G = self.D2(self.Gb2, is_training=self.is_training, reuse=True)\n self.D2_probs_z = nameop(self.D2_probs_z, 'D2_probs_z')\n self.D2_probs_G = nameop(self.D2_probs_G, 'D2_probs_G')\n\n self._build_loss()\n\n self._build_optimization()", "def test_simple_merge(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(6)(x1)\n x4 = merge([x2, x3], mode=\"concat\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )", "def build_model(self):\n insts1, attrs1, rels1 = self.arg1.get_triples()\n insts2, attrs2, rels2 = self.arg2.get_triples()\n for items, shld_norm in [(insts1, True), (insts2, True), (attrs1, True),\n (attrs2, True), (rels1, False), (rels2, False)]:\n for i in range(len(items)):\n # GUROBI cant handle Unicode so step down to ASCII\n items[i] = [items[i][0].encode('ascii', 'ignore').lower(),\n items[i][1].encode('ascii', 'ignore'),\n items[i][2].encode('ascii', 'ignore')]\n # normalize concept names -- instances and attributes\n if shld_norm:\n items[i][2] = SmatchILP.normalize(items[i][2])\n\n # Attributes are same as relations\n rels1.extend(attrs1)\n rels2.extend(attrs2)\n\n log.debug(\"AMR 1 Instances:\\n %s\" % insts1)\n log.debug(\"AMR 1 Relations:\\n %s\" % rels1)\n log.debug(\"AMR 2 Instances:\\n %s\" % insts2)\n log.debug(\"AMR 2 Relations:\\n %s\" % rels2)\n\n for index, items in [(self.arg1vars, insts1), (self.arg2vars, insts2)]:\n for name, var, concept in items:\n assert name == 'instance' # relation name is instance ==> variable definition\n assert var not in index # variable name is unique\n index[var] = concept\n\n var_choices = set() # possible variable matches\n for v1 in self.arg1vars.keys():\n for v2 in self.arg2vars.keys():\n var_choices.add((v1, v2))\n\n # instances are relations too\n rels1.extend(insts1)\n rels2.extend(insts2)\n\n self.arg1size = len(rels1)\n self.arg2size = len(rels2)\n\n trpl_choices = set()\n trpl_var_consts = {}\n for name1, var11, var12 in rels1:\n id1 = \"%s:%s:%s\" % (name1, var11, var12)\n for name2, var21, var22 in rels2:\n possible = 0\n id2 = \"%s:%s:%s\" % (name2, var21, var22)\n # triple name matches && first argument to triples can be matched\n if name1 == name2 and (var11, var21) in var_choices:\n # second argument to triple can also be matched OR\n possible += 1\n if (var12, var22) in var_choices or (\n # they are the same concepts\n # var12 not in self.arg1vars and var22 not in self.arg2vars and\n var12 == var22):\n possible += 1\n trpl_choices.add((id1, id2))\n # constrains between variables and triples\n trpl_var_consts[id1, id2] = [(var11, var21)]\n # if second argument is also variable\n\n if (var12, var22) in var_choices:\n trpl_var_consts[id1, id2].append((var12, var22))\n log.debug('\\t %s <--> %s ? %s ' % (id1, id2, possible))\n\n # Add variables to ILP model\n model = GRBModel('Smatch ILP')\n if log.getLogger().getEffectiveLevel() >= log.INFO:\n model.Params.OutputFlag = 0 # disable output\n log.info(\"Number of possible variable matches %s\" % len(var_choices))\n log.info(\"Number of possible triple matches %s\" % len(trpl_choices))\n\n self.vars = model.addVars(var_choices, vtype=GRB.BINARY, name=\"v\")\n self.trpls = model.addVars(trpl_choices, vtype=GRB.BINARY, name=\"t\")\n\n # constraints\n for v1 in self.arg1vars:\n model.addConstr(self.vars.sum(v1, '*') <= 1, name='to max 1 var')\n for v2 in self.arg2vars:\n model.addConstr(self.vars.sum('*', v2) <= 1, name='from max 1 var')\n\n for trpl_idx, var_idxs in trpl_var_consts.items():\n for var_idx in var_idxs:\n model.addConstr(self.trpls[trpl_idx] <= self.vars[var_idx], name=\"%s::%s\" % (trpl_idx, var_idx))\n\n # objective\n model.setObjective(self.trpls.sum(), GRB.MAXIMIZE)\n self.model = model\n\n # stats for how big the problem is\n var_trpl_consts_count = sum(len(x) for x in trpl_var_consts.values())\n num_constr = len(var_choices) + len(trpl_choices) + var_trpl_consts_count\n num_vars = len(var_choices) + len(trpl_choices)\n log.info(\"ILP SIZE: %d binary variables (%d vars + %d triple vars)\" % (num_vars, len(var_choices), len(trpl_choices)))\n log.info(\"ILP SIZE: %d constraints (%d b/w arg vars and triples)\" % (num_constr, var_trpl_consts_count))", "def build_model(self):\n\n input_placeholder = Input(shape = self.input_shape)\n x = ZeroPadding2D((3, 3))(input_placeholder)\n\n # Stage 1\n x = self.main_path_block(x, 64, (7, 7), 'valid', 'conv1', 'bn_conv1', 'relu', (2, 2))\n x = MaxPooling2D((3, 3), strides = (2, 2))(x)\n\n # Stage 2\n x = self.convolutional_block(x, 3, [64, 64, 256], 2, 'a', 1)\n x = self.identity_block(x, 3, [64, 64, 256], 2, 'b')\n x = self.identity_block(x, 3, [64, 64, 256], 2, 'c')\n\n # Stage 3\n x = self.convolutional_block(x, 3, [128, 128, 512], 3, 'a', 2)\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'b')\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'c')\n x = self.identity_block(x, 3, [128, 128, 512], 3, 'd')\n\n # Stage 4\n x = self.convolutional_block(x, 3, [256, 256, 1024], 4, 'a', 2)\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'b')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'c')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'd')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'e')\n x = self.identity_block(x, 3, [256, 256, 1024], 4, 'f')\n\n # Stage 5\n x = self.convolutional_block(x, 3, [512, 512, 2048], 5, 'a', 2)\n x = self.identity_block(x, 3, [512, 512, 2048], 5, 'b')\n x = self.identity_block(x, 3, [512, 512, 2048], 5, 'c')\n \n # Average Pooling Layer\n x = AveragePooling2D((2, 2), name = 'avg_pool')(x)\n \n # Fully Connected Layer\n x = Flatten()(x)\n x = Dense(\n self.classes,\n activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet50')", "def creat_model_dcca(layer_sizes1, layer_sizes2, input_size1, input_size2, learning_rate, reg_par, outdim_size, use_all_singular_values, beta):\n view1_model = build_mlp_net(layer_sizes1, input_size1, reg_par)\n view2_model = build_mlp_net(layer_sizes2, input_size2, reg_par)\n in_a = Input(shape=(input_size1,))\n in_b = Input(shape=(input_size2,))\n in_c = Input(shape=(1,))\n out_a= view1_model(in_a)\n out_b= view2_model(in_b)\n concat1 = Lambda(myconcat1)([out_a,out_b])\n concat2 = Lambda(myconcat1)([concat1,in_c])\n model = Model([in_a, in_b, in_c], concat2, name='all_model')\n model_optimizer = RMSprop(lr=0.001)\n model.compile(loss=cca_loss(outdim_size, True, 200, True, beta), optimizer=model_optimizer)\n return model", "def build(self):\n\n self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])\n self.b_AA = model_ops.zeros(shape=[\n self.n_hidden_AA,\n ])\n\n self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])\n self.b_PA = model_ops.zeros(shape=[\n self.n_hidden_PA,\n ])\n\n self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])\n self.b_A = model_ops.zeros(shape=[\n self.n_atom_output_feat,\n ])\n\n self.trainable_weights = [\n self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A\n ]\n if self.update_pair:\n self.W_AP = self.init([self.n_atom_input_feat * 2, self.n_hidden_AP])\n self.b_AP = model_ops.zeros(shape=[\n self.n_hidden_AP,\n ])\n\n self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])\n self.b_PP = model_ops.zeros(shape=[\n self.n_hidden_PP,\n ])\n\n self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])\n self.b_P = model_ops.zeros(shape=[\n self.n_pair_output_feat,\n ])\n\n self.trainable_weights.extend(\n [self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P])", "def build(self, input_shape):\n node_embed_shape = input_shape.node_embed\n edge_embed_shape = input_shape.edge_embed\n\n with tf.name_scope('node'):\n with tf.name_scope('U'):\n self.U = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.U.build(node_embed_shape)\n\n with tf.name_scope('V'):\n self.V = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.V.build(node_embed_shape)\n\n with tf.name_scope('norm'):\n self.norm_h = {\n \"batch\": tf.keras.layers.BatchNormalization(),\n \"layer\": tf.keras.layers.LayerNormalization()\n }.get(self.normalization, None)\n if self.norm_h:\n self.norm_h.build(node_embed_shape)\n\n with tf.name_scope('edge'):\n with tf.name_scope('A'):\n self.A = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.A.build(edge_embed_shape)\n \n with tf.name_scope('B'):\n self.B = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.B.build(node_embed_shape)\n\n with tf.name_scope('C'):\n self.C = tf.keras.layers.Dense(self.units, use_bias=self.use_bias)\n self.C.build(node_embed_shape)\n\n with tf.name_scope('norm'):\n self.norm_e = {\n 'batch': tf.keras.layers.BatchNormalization(),\n 'layer': tf.keras.layers.LayerNormalization(axis=-1)\n }.get(self.normalization, None)\n if self.norm_e:\n self.norm_e.build(edge_embed_shape)\n \n super().build(input_shape)", "def create_org_model( width=28, \r\n height=28, channel=1, verbose=True,epochs=10):\r\n input1 = Input(\r\n shape=(\r\n width,\r\n height,\r\n channel,\r\n ), name='concat_input')\r\n conv1 = Conv2D(32, kernel_size=5, activation='relu', padding='same')\r\n conv2 = Conv2D(32, kernel_size=5, activation='relu', padding='same')\r\n conv3 = Conv2D(64, kernel_size=3, activation='relu', padding='same')\r\n conv4 = Conv2D(64, kernel_size=3, activation='relu', padding='same')\r\n dense1 = Dense(256, activation='relu')\r\n predict = Dense(10, activation='softmax')\r\n\r\n conv1o = conv1(input1)\r\n conv2o = conv2(conv1o)\r\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv2o)\r\n drop1 = Dropout(.25)(pool1)\r\n conv3o = conv3(drop1)\r\n conv4o = conv4(conv3o)\r\n pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2,2))(conv4o)\r\n drop2 = Dropout(.25)(pool2)\r\n drop2f = Flatten()(drop2)\r\n fc1 = dense1(drop2f)\r\n softmax1 = predict(fc1)\r\n\r\n drop2_2 = Input(shape=(7,7,64), name='concat_input') \r\n drop2f_2 = Flatten()(drop2_2)\r\n fc1_2 = dense1(drop2f_2)\r\n softmax1_2 = predict(fc1_2)\r\n\r\n mlp = Model(input1, softmax1)\r\n optimizer = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\r\n mlp.compile(\r\n loss='sparse_categorical_crossentropy',\r\n optimizer=optimizer,\r\n metrics=['accuracy'])\r\n\r\n\r\n mlp.load_weights(model_dir+'complete_model.h5')\r\n\r\n for layer in mlp.layers:\r\n layer.trainable = False\r\n\r\n feature_model = Model(input1, drop2)\r\n predict_model = Model(drop2_2, softmax1_2)\r\n\r\n return feature_model, predict_model, mlp", "def build_model_from_inputs(self):\n if self.term_list is None:\n # no supplied token list -- use vocabulary of the training dataset\n # self.term_list = self.vocabulary\n # info(\"Setting bag dimension to {} from input vocabulary.\".format(len(self.term_list)))\n # will generate the vocabulary from the input\n pass\n info(f\"Building {self.name} model\")\n bagger = None\n if self.config.max_terms is not None:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range, max_terms=self.config.max_terms)\n else:\n bagger = Bag(vocabulary=self.term_list, weighting=self.base_name, ngram_range=self.ngram_range)\n\n train_idx = self.indices.get_train_instances()\n texts = Text.get_strings(self.text.data.get_slice(train_idx))\n bagger.map_collection(texts, fit=True, transform=False)\n self.term_list = bagger.get_vocabulary()\n\n self.dimension = len(self.term_list)\n self.config.dimension = self.dimension", "def build_model(self,nn1=32,nn2=64,lr=0.01,dp=0.1,decay=1e-4,dn1=50,dn2=100):\n\n opt = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, decay=self.decay)\n model = models.Sequential()\n model.add(Conv1D(filters=self.nn1, kernel_size=3, padding=\"same\", input_shape=(self.n_stp, self.n_feats)))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n model.add(Conv1D(filters=self.nn2, kernel_size=2, padding=\"same\"))\n model.add(MaxPool1D(pool_size=1))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n\n model.add(Dropout(self.dp))\n model.add(Flatten())\n model.add(Dense(self.dn1))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n model.add(Dense(self.dn2))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n model.add(Dense(1))\n model.add(Activation('relu'))\n\n model.compile(loss=\"mse\",\n optimizer=opt,\n metrics=[self.soft_acc])\n\n return model", "def _build(self, input_var, name=None):\n out = input_var\n for model in self._models:\n self._last_network = model.build(out, name=name)\n if self._first_network is None:\n self._first_network = self._last_network\n out = self._last_network.outputs\n\n return out", "def _build_model(self):\n \n #convolutional part\n conv_inputs = keras.Input(shape = self._state_shape[0])\n c1 = layers.Conv2D(filters = 4, kernel_size = 2, strides = (2,2), padding = \"same\", activation = 'relu')(conv_inputs)\n c2 = layers.Conv2D(filters = 8, kernel_size = 2, strides = (1,1), padding = \"same\", activation = 'relu')(c1)\n flat = layers.Flatten()(c2)\n\n\n #current green phase layer\n # phase_inputs = keras.Input(shape = (self._state_shape[1],))\n \n #elapsed green time layer\n elapsed_time_inputs = keras.Input(shape = (self._state_shape[2],))\n \n \n #combine elapsed time and green time layer\n # combined_green = layers.concatenate([phase_inputs, elapsed_time_inputs])\n # green_dense = layers.Dense(10, activation='relu')(elapsed_time_inputs)\n \n #combine green layer with conv layer\n all_combined = layers.concatenate([elapsed_time_inputs, flat])\n dense = layers.Dense(32, activation='relu')(all_combined)\n dense = layers.Dense(16, activation='relu')(dense)\n outputs = layers.Dense(self._output_dim, activation='linear')(dense)\n \n model = keras.Model(inputs = [conv_inputs, elapsed_time_inputs], outputs = outputs, name='simple_CNN') \n model.compile(loss=losses.mean_squared_error, optimizer=Adam(lr=self._learning_rate))\n \n return model", "def _build_model(self, name, hidden_layers, nodes):\n with tf.variable_scope(name):\n self.inputs_ = tf.placeholder(tf.float32, [None, self.state_size], name='inputs')\n self.actions_ = tf.placeholder(tf.int32, [None], name='actions')\n one_hot_actions = tf.one_hot(self.actions_, self.action_size)\n self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')\n self.layers = list()\n self.layers.append(fully_connected(\"hidden1\", self.inputs_, nodes))\n for layer in range(hidden_layers):\n self.layers.append(fully_connected(f\"hidden{layer+2}\", self.layers[layer], nodes))\n self.output = fully_connected(\"output\", self.layers[-1], self.action_size, activation=None)\n self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)\n self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))\n self.opt = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)", "def _build_model(self, name, obs_dim, action_dim, action_bounds):\n with tf.variable_scope(name):\n network = TFNetwork(name)\n num_layers = len(self.hidden_nodes)\n\n x = tf.placeholder(dtype=tf.float32, shape=[None, obs_dim], name='observation')\n network.add_layer(x)\n h = x\n\n # Set layer_func to Fully-Connected or Batch-Normalization layer\n layer_func = fc_layer\n if self.batch_norm:\n layer_func = bn_layer\n\n # Hidden layers\n for i in range(num_layers):\n h, h_weights = layer_func(h, self.hidden_nodes[i], tf.nn.relu, layer_idx=i, phase=self.is_training)\n network.add_layer(h, h_weights)\n\n # Output layer\n n_in = h.get_shape().as_list()[1]\n w_init = tf.random_uniform([n_in, action_dim], minval=-3e-3, maxval=3e-3)\n output, output_weights = fc_layer(h, action_dim, tf.nn.tanh, w_init=w_init, name='mu', phase=self.is_training)\n network.add_layer(output, output_weights)\n scaled_output = tf.multiply(output, action_bounds, name='mu_scaled')\n network.add_layer(scaled_output)\n\n return scaled_output, x, network", "def build(self, input_tensors, is_training, lengths=None, hparams=None):", "def conv_cond_concat(x, y):\n\n # Unfinished -- but not needed??\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n return tf.concat(4, [x , y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2] , y_shapes[3]])])", "def SRCNNv2(input_shape, depth_multiplier=1, multi_output=False):\n inputs = Input(input_shape, name=\"inputs\")\n conv1 = Convolution2D(filters=64, kernel_size=9, padding=\"same\", name=\"conv1\", activation=\"relu\")(inputs)\n conv2 = Convolution2D(filters=64, kernel_size=7, padding=\"same\", name=\"conv2\", activation=\"relu\")(conv1)\n #conv3 = Convolution2D(filters=64, kernel_size=3, padding=\"same\", name=\"conv3\", activation=\"relu\")(conv2)\n\n mapping = Convolution2D(filters=32, kernel_size=5, padding=\"same\", name=\"mapping\", activation=\"relu\")(conv2)\n #mapping2 = Convolution2D(filters=16, kernel_size=7, padding=\"same\", name=\"mapping2\", activation=\"relu\")(mapping)\n \n \n if multi_output:\n out = Convolution2D(filters=2, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n else:\n out = Convolution2D(filters=1, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n return Model(inputs, out)", "def _build(self, *args, **kwargs):\n num_targets = self.get('num_targets', self.config)\n dropout_rate = self.get('dropout_rate', self.config)\n input_shape = self.get('input_shape', self.config)\n\n inputs = Input(shape=input_shape)\n block_A = self.reduction_block_I(inputs, 32, scope='Block_A')\n block_B = self.reduction_block_I(block_A, 64, scope='Block_B')\n block_C = self.reduction_block_II(block_B, 128, scope='Block_C')\n block_D = self.reduction_block_II(block_C, 256, scope='Block_D')\n block_E = self.reduction_block_II(block_D, 256, scope='Block_E')\n\n z = self.dense_block(block_E, units=self.get('units', self.config),\n dropout=dropout_rate, scope='DenseBlock-I')\n\n output_tensor = Dense(num_targets, activation='sigmoid', name='output')(z)\n\n return [inputs], [output_tensor]", "def ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)", "def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model", "def build_model(f1, f2, f3, k1, k2, k3, s1, s2, s3, opt, ssim=True):\n # perform convolution on input images twice\n inputs = Input(shape=(512, 512, 2))\n x = inputs\n \n conv1 = Conv2D(filters=f1, kernel_size=k1, activation='relu', strides=s1, padding='same')(x)\n conv2 = Conv2D(filters=f2, kernel_size=k2, activation='relu', strides=s2, padding='same')(conv1)\n output = Conv2D(filters=f3, kernel_size=k3, activation='sigmoid', strides=s3, padding='same')(conv2)\n \n model = Model(inputs, final)\n \n # add loss function\n if ssim:\n model.compile(optimizer=opt, loss=ssim_loss, metrics=[ssim_loss, 'mse'])\n else:\n model.compile(optimizer=opt, loss='mse')\n\n return model", "def reconstruct_input_ext(self, model_in):", "def merge(input):\n input1, input2, examples = input\n out1 = tf.gather(input1, examples[:, 0])\n out2 = tf.gather(input2, examples[:, 1])\n # Ligand-receptor pairs\n output1 = tf.concat([out1, out2], axis=0)\n # Receptor-ligand pairs\n output2 = tf.concat([out2, out1], axis=0)\n return tf.concat((output1, output2), axis=1)", "def _build(self, inp, is_training):\n x = inp\n orig_x = x\n if self.in_filters is None:\n self.in_filters = x.get_shape().as_list()[-1]\n assert self.in_filters == x.get_shape().as_list()[-1], 'Module was initialised for a different input shape'\n\n pool_op = tf.nn.max_pool if len(x.get_shape().as_list()) == 4 else tf.nn.max_pool3d\n\n # Handle strided convolutions\n kernel_size = self.kernel_size\n if np.prod(self.stride) != 1:\n kernel_size = self.stride\n orig_x = pool_op(orig_x, [1, ] + self.stride + [1, ], [1, ] + self.stride + [1, ], 'VALID')\n\n # Add a convolutional layer\n with tf.variable_scope('sub1'):\n x = BatchNorm()(x, is_training)\n x = leaky_relu(x, self.relu_leakiness)\n x = Convolution(self.out_filters, kernel_size, self.stride)(x)\n\n # Add a convolutional layer\n with tf.variable_scope('sub2'):\n x = BatchNorm()(x, is_training)\n x = leaky_relu(x, self.relu_leakiness)\n x = Convolution(self.out_filters, self.kernel_size)(x)\n\n # Add the residual\n with tf.variable_scope('sub_add'):\n # Handle differences in input and output filter sizes\n if self.in_filters < self.out_filters:\n orig_x = tf.pad(orig_x, [[0, 0]] * (len(x.get_shape().as_list()) - 1) +\n [[int(np.floor((self.out_filters - self.in_filters) / 2.)),\n int(np.ceil((self.out_filters - self.in_filters) / 2.))]])\n elif self.in_filters > self.out_filters:\n orig_x = Convolution(self.out_filters, [1] * len(self.kernel_size), 1)(orig_x)\n\n x += orig_x\n return x", "def brepalgo_ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)", "def build_model_mobilenet(num_classes):", "def combined_model(video_frames, audio_frames):\n\n audio_features = audio_model([], audio_frames)\n visual_features = video_model(video_frames,[])\n\n return tf.concat(2, (audio_features, visual_features), name='concat')", "def build(self):\n sequence_input = Input(shape=(self.max_sequence_length, ), dtype='int32')\n embedded_sequences = self.embedding_layer(sequence_input)\n x = Conv1D(128, 5, activation='relu')(embedded_sequences)\n x = MaxPooling1D(5)(x)\n x = Conv1D(128, 5, activation='relu')(x)\n x = MaxPooling1D(5)(x)\n x = Flatten()(x)\n x = Dense(128, activation='relu')(x)\n\n y = Bidirectional(LSTM(50, dropout=0.2, recurrent_dropout=0.2))(embedded_sequences)\n z = concatenate([x, y])\n preds = Dense(6, activation='softmax')(z)\n self.model = Model(sequence_input, preds)", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def build(bottom_model, num_classes):\n\n top_model = bottom_model.output\n top_model = GlobalAveragePooling2D()(top_model)\n top_model = Dense(1024,activation='relu')(top_model)\n top_model = Dense(1024,activation='relu')(top_model)\n top_model = Dense(512,activation='relu')(top_model)\n top_model = Dense(num_classes,activation='softmax')(top_model)\n return top_model", "def concat(seq1, seq2):\n if type_tag(seq1) == type_tag(seq2):\n return seq1 + seq2\n else:\n types = (type_tag(seq1), type_tag(seq2))\n if types in concat.adders:\n return concat.adders[types](seq1, seq2)", "def conv_cond_concat(x, y):\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n return tf.concat(axis=3, values=[x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])", "def build_model(self):\n # Define model inputs for the encoder/decoder stack\n x_enc = Input(shape=(self.seq_len_in, self.input_feature_amount), name=\"x_enc\")\n x_dec = Input(shape=(self.seq_len_out, self.output_feature_amount), name=\"x_dec\")\n\n # Add noise\n x_dec_t = GaussianNoise(0.2)(x_dec)\n\n input_conv2 = Conv1D(filters=64, kernel_size=5, strides=2, activation='relu', padding='same')\n input_conv1 = Conv1D(filters=64, kernel_size=3, strides=2, activation='relu', padding='same', name=\"last_conv_layer\")\n\n input_conv2_out = input_conv2(x_enc)\n input_conv1_out = input_conv1(input_conv2_out)\n\n # Define the encoder GRU, which only has to return a state\n encoder_gru = GRU(self.state_size, return_sequences=True, return_state=True, name=\"encoder_gru\")\n encoder_out, encoder_state = encoder_gru(input_conv1_out)\n\n # Decoder GRU\n decoder_gru = GRU(self.state_size, return_state=True, return_sequences=True,\n name=\"decoder_gru\")\n # Use these definitions to calculate the outputs of out encoder/decoder stack\n dec_intermediates, decoder_state = decoder_gru(x_dec_t, initial_state=encoder_state)\n\n # Define the attention layer\n attn_layer = AttentionLayer(name=\"attention_layer\")\n attn_out, attn_states = attn_layer([encoder_out, dec_intermediates])\n\n # Concatenate decoder and attn out\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([dec_intermediates, attn_out])\n\n # Define the dense layer\n dense = Dense(self.output_feature_amount, activation='linear', name='output_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Define the encoder/decoder stack model\n encdecmodel = tsModel(inputs=[x_enc, x_dec], outputs=decoder_pred)\n\n # Define the separate encoder model for inferencing\n encoder_inf_inputs = Input(shape=(self.seq_len_in, self.input_feature_amount), name=\"encoder_inf_inputs\")\n\n input_conv2_inf = input_conv2(encoder_inf_inputs)\n input_conv1_inf_out = input_conv1(input_conv2_inf)\n\n encoder_inf_out, encoder_inf_state = encoder_gru(input_conv1_inf_out)\n encoder_model = tsModel(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n # Define the separate encoder model for inferencing\n decoder_inf_inputs = Input(shape=(1, self.output_feature_amount), name=\"decoder_inputs\")\n encoder_inf_states = Input(shape=(encdecmodel.get_layer('last_conv_layer').output_shape[1], self.state_size), name=\"decoder_inf_states\")\n decoder_init_state = Input(shape=(self.state_size,), name=\"decoder_init\")\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = tsModel(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return encoder_model, decoder_model, encdecmodel", "def merge_models(model_1, model_2, task=None):\n\n def _merge_models(model_1, model_2):\n\n result_model = copy.deepcopy(model_1)\n\n if isinstance(model_1, torch.nn.Embedding):\n\n result_model = _add_embedding_layer(model_1, model_2)\n\n elif isinstance(model_1, torch.nn.Linear):\n result_model = _add_linear_layer(model_1, model_2)\n\n elif isinstance(model_1, torch.nn.LayerNorm):\n result_model = _add_double_norm_layer(model_1, model_2)\n\n elif isinstance(model_1, BertSelfAttention):\n result_model = _add_bert_self_attention_layer(model_1, model_2)\n\n for name_1, name_2 in zip(model_1._modules, model_2._modules):\n module_1 = model_1._modules[name_1]\n module_2 = model_2._modules[name_2]\n\n result_model._modules[name_1] = _merge_models(module_1, module_2)\n\n return result_model\n\n result_model = _merge_models(model_1, model_2)\n\n result_model._text_field_embedder._token_embedders[\"tokens\"].output_dim = 1024\n\n if task == \"QA\":\n result_model._linear_layer = _add_final_linear_layer(\n model_1._linear_layer, model_2._linear_layer\n )\n else:\n result_model._classification_layer = _add_final_linear_layer(\n model_1._classification_layer, model_2._classification_layer\n )\n\n return result_model", "def conv_cond_concat(x, y):\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n return tf.concat(3, [x, y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])", "def __init__(self, incomings, a=tf.identity, name='ConcatLayer'):\n super(ConcatLayer, self).__init__()\n with tf.variable_scope(name) as self.layer_scope:\n self.incomings = []\n self.incoming_shapes = []\n \n for incoming in incomings:\n incoming, incoming_shape = get_input(incoming)\n self.incomings.append(incoming)\n self.incoming_shapes.append(incoming_shape)\n self.name = name\n self.a = a", "def build_model(data_tensor, reuse, training):\n with tf.variable_scope('cnn', reuse=reuse):\n with tf.variable_scope('input', reuse=reuse):\n conv_aux = {\n 'pretrained': os.path.join(\n 'weights',\n 'gabors_for_contours_7.npy'),\n 'pretrained_key': 's1',\n 'nonlinearity': 'square'\n }\n x = conv.conv_layer(\n bottom=data_tensor,\n name='gabor_input',\n stride=[1, 1, 1, 1],\n padding='SAME',\n trainable=training,\n use_bias=True,\n aux=conv_aux)\n layer_hgru = hgru.hGRU(\n 'hgru_1',\n x_shape=x.get_shape().as_list(),\n timesteps=8,\n h_ext=15,\n strides=[1, 1, 1, 1],\n padding='SAME',\n # aux={'gamma': False},\n train=training)\n h2 = layer_hgru.build(x)\n\n with tf.variable_scope('readout_1', reuse=reuse):\n activity = conv.conv_layer(\n bottom=h2,\n name='pre_readout_conv',\n num_filters=2,\n kernel_size=1,\n trainable=training,\n use_bias=False)\n pool_aux = {'pool_type': 'max'}\n activity = pooling.global_pool(\n bottom=activity,\n name='pre_readout_pool',\n aux=pool_aux)\n activity = normalization.batch(\n bottom=activity,\n name='readout_1_bn',\n training=training)\n\n with tf.variable_scope('readout_2', reuse=reuse):\n activity = tf.layers.flatten(\n activity,\n name='flat_readout')\n activity = tf.layers.dense(\n inputs=activity,\n units=2)\n return activity, h2", "def TextRCNN1(inputs, lstm_size=1024, cnns=[(32,1), (32,3), (64,5), (128,7)]):\n tf_input, cnn_feats = cnn_backbone(inputs=inputs, cnns=cnns)\n\n tf_x = tf.keras.layers.concatenate(cnn_feats)\n tf_x = tf.keras.layers.CuDNNLSTM(lstm_size)(tf_x)\n \n tf_readout = tf.keras.layers.Dense(1, activation='sigmoid')(tf_x)\n model = tf.keras.models.Model(tf_input, tf_readout, name='TextRCNN1') \n model.compile(loss='binary_crossentropy', optimizer='adam')\n return model", "def model(self, inputs):\n h1 = dense(inputs, self.weights[0], self.biases[0], tf.nn.relu) #hidden layer 1\n h2 = dense(h1, self.weights[1], self.biases[1], tf.nn.relu) #hidden layer 2\n\n out = dense(h2, self.weights[2], self.biases[2])\n\n return out", "def build_model(self):\n if not os.path.isdir(os.path.join(self.save_dir, self.name)):\n os.mkdir(os.path.join(self.save_dir, self.name))\n self.fitted = False\n else:\n self.fitted = True\n \n if self.hidden_ratio != 1.0:\n hidden_dim_A = int(self.dimension_A * self.hidden_ratio)\n hidden_dim_V = int(self.dimension_V * self.hidden_ratio)\n hidden_dim = int((self.dimension_A + self.dimension_V) * self.hidden_ratio / 4)\n else:\n hidden_dim_A = int(self.dimension_A * 0.75)\n hidden_dim_V = int(self.dimension_V * 0.75)\n hidden_dim = int((self.dimension_A + self.dimension_V) * 0.5)\n\n input_data_A = Input(shape=(self.dimension_A, ), name='audio_input')\n input_data_V = Input(shape=(self.dimension_V, ), name='video_input')\n encoded_input = Input(shape=(hidden_dim, ))\n \n encoded_A = Dense(hidden_dim_A, \n activation='relu', kernel_initializer='he_uniform', \n name='audio_encoded')(input_data_A)\n encoded_V = Dense(hidden_dim_V, \n activation='relu', kernel_initializer='he_uniform', \n name='video_encoded')(input_data_V)\n\n shared = Concatenate(axis=1, name='concat')([encoded_A, encoded_V])\n if self.sparse:\n encoded = Dense(hidden_dim, \n activation='relu',\n activity_regularizer=self.sparse_regularizer,\n kernel_initializer='he_uniform', \n name='shared_repres')(shared)\n else:\n encoded = Dense(hidden_dim, \n activation='relu',\n kernel_initializer='he_uniform', \n name='shared_repres')(shared)\n \n decoded_A = Dense(hidden_dim_A, \n activation='relu', kernel_initializer='he_uniform', \n name='audio_decoded')(encoded)\n decoded_V = Dense(hidden_dim_V, \n activation='relu', kernel_initializer='he_uniform', \n name='video_decoded')(encoded)\n\n decoded_A = Dense(self.dimension_A, activation='linear',\n name='audio_recon')(decoded_A)\n decoded_V = Dense(self.dimension_V, activation='linear',\n name='video_recon')(decoded_V)\n\n self.autoencoder = Model(inputs=[input_data_A, input_data_V], outputs=[decoded_A, decoded_V])\n self.encoder = Model(inputs=[input_data_A, input_data_V], outputs=encoded)\n self.decoder_A = Model(inputs=encoded_input, \n outputs=self.autoencoder.get_layer('audio_recon')(\n self.autoencoder.get_layer('audio_decoded')(\n encoded_input)))\n self.decoder_V = Model(inputs=encoded_input, \n outputs=self.autoencoder.get_layer('video_recon')(\n self.autoencoder.get_layer('video_decoded')(\n encoded_input)))\n\n # configure model\n self.autoencoder.compile(optimizer='adam', \n loss='mse',\n metrics=[metrics.mse, metrics.mse],\n loss_weights=[0.5, 0.5])\n print(\"--\" * 20)\n print(\"autoencoder\")\n print(self.autoencoder.summary())\n print(\"--\" * 20)\n print(\"encoder\")\n print(self.encoder.summary())\n print(\"--\" * 20)\n print(\"decoder (A)\")\n print(self.decoder_A.summary())\n print(\"--\" * 20)\n print(\"decoder (V)\")\n print(self.decoder_V.summary())\n print(\"--\" * 20)\n\n plot_model(self.autoencoder, show_shapes=True, to_file=os.path.join(self.save_dir, self.name, 'bimodal_DDAE.png'))", "def gen_input(args, datareader, bkd_gids):\n As = {}\n Xs = {}\n for gid in bkd_gids:\n if gid not in As: As[gid] = torch.tensor(datareader.data['adj_list'][gid], dtype=torch.float)\n if gid not in Xs: Xs[gid] = torch.tensor(datareader.data['features'][gid], dtype=torch.float)\n Ainputs = {}\n Xinputs = {}\n \n if args.gtn_input_type == '1hop':\n for gid in bkd_gids:\n if gid not in Ainputs: Ainputs[gid] = As[gid].clone().detach()\n if gid not in Xinputs: Xinputs[gid] = torch.mm(Ainputs[gid], Xs[gid])\n \n elif args.gtn_input_type == '2hop':\n for gid in bkd_gids:\n As[gid] = torch.add(As[gid], torch.mm(As[gid], As[gid]))\n As[gid] = torch.where(As[gid]>0, torch.tensor(1.0, requires_grad=True),\n torch.tensor(0.0, requires_grad=True))\n As[gid].fill_diagonal_(0.0)\n \n for gid in bkd_gids:\n if gid not in Ainputs: Ainputs[gid] = As[gid].clone().detach()\n if gid not in Xinputs: Xinputs[gid] = torch.mm(Ainputs[gid], Xs[gid])\n \n \n elif args.gtn_input_type == '1hop_degree': \n rowsums = [torch.add(torch.sum(As[gid], dim=1), 1e-6) for gid in bkd_gids]\n re_Ds = [torch.diag(torch.pow(rowsum, -1)) for rowsum in rowsums]\n \n for i in range(len(bkd_gids)):\n gid = bkd_gids[i]\n if gid not in Ainputs: Ainputs[gid] = torch.mm(re_Ds[i], As[gid])\n if gid not in Xinputs: Xinputs[gid] = torch.mm(Ainputs[gid], Xs[gid])\n \n \n elif args.gtn_input_type == '2hop_degree':\n for gid in bkd_gids:\n As[gid] = torch.add(As[gid], torch.mm(As[gid], As[gid]))\n As[gid] = torch.where(As[gid]>0, torch.tensor(1.0, requires_grad=True),\n torch.tensor(0.0, requires_grad=True))\n As[gid].fill_diagonal_(0.0)\n \n rowsums = [torch.add(torch.sum(As[gid], dim=1), 1e-6) for gid in bkd_gids]\n re_Ds = [torch.diag(torch.pow(rowsum, -1)) for rowsum in rowsums]\n \n for i in range(len(bkd_gids)):\n gid = bkd_gids[i]\n if gid not in Ainputs: Ainputs[gid] = torch.mm(re_Ds[i], As[gid])\n if gid not in Xinputs: Xinputs[gid] = torch.mm(Ainputs[gid], Xs[gid])\n \n else: raise NotImplementedError('not support other types of aggregated inputs')\n\n # pad each input into maxi possible size (N, N) / (N, F)\n NodeMax = int(datareader.data['n_node_max'])\n FeatDim = np.array(datareader.data['features'][0]).shape[1]\n for gid in Ainputs.keys():\n a_input = Ainputs[gid]\n x_input = Xinputs[gid]\n \n add_dim = NodeMax - a_input.shape[0]\n Ainputs[gid] = np.pad(a_input, ((0, add_dim), (0, add_dim))).tolist()\n Xinputs[gid] = np.pad(x_input, ((0, add_dim), (0, 0))).tolist()\n Ainputs[gid] = torch.tensor(Ainputs[gid])\n Xinputs[gid] = torch.tensor(Xinputs[gid])\n\n return Ainputs, Xinputs", "def concat(inp):\n if(type(inp) == tuple):\n return\n if(inp.getName() == '&'):\n if(inp.getFirst().getName() == 'tt' and inp.getSec() is not None):\n inp.setName(inp.getSec().getName())\n inp.setFirst(inp.getSec().getFirst())\n inp.setSec(inp.getSec().getSec())\n if(inp.getSec() is None):\n return\n if(inp.getSec().getName() == 'tt' and inp.getFirst() is not None):\n inp.setName(inp.getFirst().getName())\n if(inp.getName() in doubles or inp.getName() in singles):\n inp.setFirst(inp.getFirst().getFirst())\n inp.setSec(inp.getFirst().getSec())\n else:\n inp.setAtom()", "def build(self, hp, inputs=None):\n input_node = inputs\n # TODO: modify default hash_size, current version is wrong when category of a feature is more than 10000\n hash_size = self.hash_size or [hp.Choice('hash_size', [10000], default=10000)\n for _ in range(self.num_of_fields)]\n embedding_dim = self.embedding_dim or hp.Choice('embedding_dim', [8, 16], default=8)\n output_node = tf.stack(\n [\n tf.keras.layers.Embedding(hash_size[col_id], embedding_dim)(input_node[0][:, col_id])\n for col_id in range(self.num_of_fields)\n ],\n axis=1\n )\n return output_node", "def build(self):\n\n # bgr_ = bgr*255.0\n bgr_= self.X\n start_time = time.time()\n print(\"build model started\")\n\n # blue ,green, red = tf.split(axis=3, num_or_size_splits=3, value= bgr)\n red ,green, blue, = tf.split(axis=3, num_or_size_splits=3, value= bgr_)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n # blue - VGG_MEAN[0],\n # green - VGG_MEAN[1],\n # red - VGG_MEAN[2],\n\n red - VGG_MEAN[0],\n green - VGG_MEAN[1],\n blue - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n\n\n\n print(bgr.shape)\n\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n\n\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\n\n\n\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\n self.fc6 = self.fc_layer(self.pool5, \"fc6\")\n assert self.fc6.get_shape().as_list()[1:] == [4096]\n self.relu6 = tf.nn.relu(self.fc6)\n\n self.fc7 = self.fc_layer(self.relu6, \"fc7\")\n self.relu7 = tf.nn.relu(self.fc7)\n\n self.fc8 = self.fc_layer(self.relu7, \"fc8\")\n\n # self.fc9 = self.fc_layer(self.fc8,'fc9')\n # self.relu9 = tf.nn.relu(self.fc9)\n\n\n\n\n relu8 = tf.nn.relu(self.fc8)\n fc9 = self.fc_layer(relu8, 'fc9')\n print((\"build model finished: %ds\" % (time.time() - start_time)))\n return fc9\n\n # self.prob = tf.nn.softmax(self.fc8, name=\"prob\")", "def build_model():\n mdl = Sequential()\n\n # normalization\n mdl.add(Lambda(lambda x: x/128. - 1, input_shape=IMAGE_SHAPE, name=\"input\"))\n\n # trim image\n mdl.add(Lambda(lambda x: x[:, 10:-10, :, :]))\n\n # convolutions\n mdl.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Flatten())\n\n mdl.add(Dense(128, activation='relu'))\n mdl.add(Dense(64, activation='relu'))\n mdl.add(Dense(1, name=\"output\"))\n\n mdl.summary()\n\n return mdl", "def build_deepsets_joint_representation_model():\n # We first create the embedding model\n test_input = tf.keras.layers.Input(shape=(_NUM_INPUTS.value,))\n train_input = tf.keras.layers.Input(shape=(_NUM_INPUTS.value,))\n train_label = tf.keras.layers.Input(shape=(1,))\n\n # Obtain a mask variable. Output dimension [1, _NUM_INPUTS.value]\n mask = tf.ones((1, _NUM_INPUTS.value))\n one_row = tf.reshape(tf.gather(train_input, [0], axis=0), [-1])\n mask = mask * tf.cast(tf.not_equal(one_row, _PAD_VALUE.value), tf.float32)\n\n # Calibrate input if haven't done so\n calibrated_train_input = train_input\n calibrated_test_input = test_input\n calibration = tfl.layers.PWLCalibration(\n input_keypoints=np.linspace(0.0, 1.0, _NUM_CALIB_KEYS.value),\n units=_NUM_INPUTS.value,\n output_min=0.0,\n output_max=1.0,\n impute_missing=True,\n missing_input_value=_MISSING_VALUE.value,\n name=\"input_calibration\")\n calibrated_train_input = calibration(train_input)\n calibrated_test_input = calibration(test_input)\n\n # Reshape the input to pair-wise format.\n # Output dimension [_BATCH_SIZE.value, _NUM_INPUTS.value**2, 2]\n pairwise_train_input = get_pairwise_inputs(calibrated_train_input)\n pairwise_test_input = get_pairwise_inputs(calibrated_test_input)\n\n # Obtain pairwise masks. Output dimesion [_NUM_INPUTS.value**2,]\n pairwise_mask = get_pairwise_inputs(mask)\n pairwise_mask = tf.reshape(tf.reduce_prod(pairwise_mask, axis=-1), [-1])\n\n # Obtain pairwise labels.\n # Output dimension\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2, _MAX_NUM_CLASSES.value]\n one_hot_train_label = tf.one_hot(\n tf.cast(train_label, tf.int32), _MAX_NUM_CLASSES.value)\n pairwise_train_label = tf.tile(one_hot_train_label,\n tf.constant([1, _NUM_INPUTS.value**2, 1]))\n\n # Concatenate pairwise inputs and labels.\n # Output dimension\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2, _MAX_NUM_CLASSES.value + 2]\n pairwise_train_input = tf.concat([pairwise_train_input, pairwise_train_label],\n axis=-1)\n\n # Obtain distribution representation. Output dimension\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2,\n # _DISTRIBUTION_REPRESENTATION_DIM.value]\n batch_embedding = tf.keras.layers.Dense(\n _DISTRIBUTION_REPRESENTATION_DIM.value, activation=\"relu\")(\n pairwise_train_input)\n for _ in range(_HIDDEN_LAYER.value - 1):\n batch_embedding = tf.keras.layers.Dense(\n _DISTRIBUTION_REPRESENTATION_DIM.value, activation=\"relu\")(\n batch_embedding)\n\n # Average embeddings over the batch. Output dimension\n # [_NUM_INPUTS.value**2, _DISTRIBUTION_REPRESENTATION_DIM.value].\n mean_distribution_embedding = tf.reduce_mean(batch_embedding, axis=0)\n\n outputs = []\n for pairwise_input in [pairwise_test_input, pairwise_train_input]:\n # [_NUM_INPUTS.value**2, _DISTRIBUTION_REPRESENTATION_DIM.value] ->\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2,\n # _DISTRIBUTION_REPRESENTATION_DIM.value] via repetition.\n distribution_embedding = tf.tile(\n [mean_distribution_embedding],\n tf.stack([tf.shape(pairwise_input)[0],\n tf.constant(1),\n tf.constant(1)]))\n # Concatenate pairwise inputs and embeddings. Output shape\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2,\n # 2 + _DISTRIBUTION_REPRESENTATION_DIM.value]\n concat_input = tf.concat([pairwise_input, distribution_embedding], axis=-1)\n\n # Apply a common function to each pair. Output shape\n # [_BATCH_SIZE.value, _NUM_INPUTS.value**2, _DEEPSETS_LAYER_UNITS.value]\n pairwise_output = tf.keras.layers.Dense(\n _DEEPSETS_LAYER_UNITS.value, activation=\"relu\")(\n concat_input)\n for _ in range(_HIDDEN_LAYER.value - 1):\n pairwise_output = tf.keras.layers.Dense(\n _DEEPSETS_LAYER_UNITS.value, activation=\"relu\")(\n pairwise_output)\n\n # Average pair-wise outputs across valid pairs.\n # Output shape [_BATCH_SIZE.value, _DEEPSETS_LAYER_UNITS.value]\n average_outputs = tf.tensordot(pairwise_mask, pairwise_output, [[0], [1]])\n average_outputs = average_outputs / tf.reduce_sum(pairwise_mask)\n\n # Use several dense layers to get the final output\n final_output = tf.keras.layers.Dense(\n _OUTPUT_LAYER_UNITS.value, activation=\"relu\")(\n average_outputs)\n for i in range(_HIDDEN_LAYER.value - 1):\n final_output = tf.keras.layers.Dense(\n _OUTPUT_LAYER_UNITS.value, activation=\"relu\")(\n final_output)\n outputs.append(final_output)\n\n test_outputs = tf.math.l2_normalize(outputs[0], axis=1)\n train_outputs = tf.math.l2_normalize(outputs[1], axis=1)\n similarity_matrix = tf.exp(\n tf.matmul(test_outputs, tf.transpose(train_outputs)))\n\n similarity_list = []\n for i in range(_MAX_NUM_CLASSES.value):\n mask = tf.cast(tf.squeeze(tf.equal(train_label, i)), tf.float32)\n similarity_list.append(similarity_matrix * mask)\n\n similarity = [\n tf.reduce_mean(s, axis=1, keepdims=True) for s in similarity_list\n ]\n sum_similarity = tf.reduce_sum(\n tf.concat(similarity, axis=1), axis=1, keepdims=True)\n final_output = [similarity / sum_similarity for similarity in similarity_list]\n final_output = tf.concat(final_output, axis=1)\n\n keras_model = tf.keras.models.Model(\n inputs=[test_input, train_input, train_label], outputs=final_output)\n compile_keras_model(keras_model)\n return keras_model", "def modelbuilder():\n model = Sequential()\n # Add a convolution layer with with a sigmoid activation function\n model.add(layers.Conv2D(1, (2, 2), strides=(1, 1), activation='sigmoid', padding='same', input_shape=(256, 256, 3)))\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\n model.summary()\n return model", "def TextRCNN2(inputs, lstm_size=256, cnns=[(32,1), (32,3), (64,5), (128,7)]):\n tf_input, cnn_feats = cnn_backbone(inputs=inputs, cnns=cnns)\n\n tf_x = tf.keras.layers.concatenate(cnn_feats)\n tf_x = tf.keras.layers.CuDNNLSTM(lstm_size, return_sequences=True)(tf_x)\n tf_x = tf.keras.layers.CuDNNLSTM(lstm_size, return_sequences=True)(tf_x)\n tf_x = tf.keras.layers.CuDNNLSTM(lstm_size, return_sequences=True)(tf_x)\n tf_x = tf.keras.layers.CuDNNLSTM(lstm_size)(tf_x)\n \n tf_readout = tf.keras.layers.Dense(1, activation='sigmoid')(tf_x)\n model = tf.keras.models.Model(tf_input, tf_readout, name='TextRCNN2') \n model.compile(loss='binary_crossentropy', optimizer='adam')\n return model", "def build_model(self):\n # Define input layer (states)\n states = Input(shape=self.state_size)\n \n # Add hidden layers\n net = Dense(units=self.fc1_units, activation='relu', kernel_initializer='glorot_uniform')(states)\n net = Dense(units=self.fc2_units, activation='relu', kernel_initializer='glorot_uniform')(net)\n\n # Add final output layer with linear activation\n Q_values = Dense(units=self.action_size, activation='linear', kernel_initializer='glorot_uniform')(net)\n\n # Create Keras model\n self.model = Model(inputs=states, outputs=Q_values, name=self.name)" ]
[ "0.6844526", "0.6768665", "0.66675013", "0.65246445", "0.6511792", "0.6232399", "0.6185943", "0.61621577", "0.60782385", "0.60532033", "0.6042736", "0.6000108", "0.5988612", "0.59775037", "0.5974451", "0.5970998", "0.59662324", "0.59449714", "0.59398437", "0.58934414", "0.5888277", "0.58700323", "0.58603", "0.5837908", "0.58329976", "0.5818866", "0.58050555", "0.5792099", "0.5768973", "0.5734099", "0.5730435", "0.5722599", "0.5722599", "0.57052535", "0.5689324", "0.56733346", "0.56573004", "0.56479275", "0.5645725", "0.5619447", "0.560502", "0.56034917", "0.5596743", "0.5594893", "0.55666775", "0.55438507", "0.5536855", "0.5528856", "0.55257165", "0.5519374", "0.5518548", "0.5516322", "0.5489898", "0.5478379", "0.5474642", "0.54738855", "0.5469699", "0.5459517", "0.54511267", "0.5446931", "0.54411626", "0.5428608", "0.53851575", "0.5378518", "0.53604096", "0.5347616", "0.5342435", "0.5340242", "0.533748", "0.5336213", "0.53251576", "0.53251165", "0.53251064", "0.5319088", "0.5317806", "0.5309753", "0.52861035", "0.5283856", "0.5277151", "0.52753985", "0.527477", "0.52655226", "0.5259509", "0.5259491", "0.5258727", "0.5258311", "0.5247659", "0.52467734", "0.5245383", "0.5237923", "0.52271426", "0.52263147", "0.52235776", "0.52192485", "0.52161247", "0.52153146", "0.5214458", "0.5209039", "0.52070737", "0.52031684" ]
0.77477473
0
Try to break it, the algorithm... !! Guess not mdrfkr.
def test_BuildModel3(self): print("\nTest 7: Building a more complicated Model") builder = StaticBuilder("BreakIt") in1 = builder.addInput(10) in2 = builder.addInput(20) enc1 = builder.addInner(3) enc2 = builder.addInner(5, num_islots=2) out1 = builder.addOutput() out2 = builder.addOutput() builder.addDirectedLink(in1, enc1) builder.addDirectedLink(in2, enc2, islot=0) builder.addDirectedLink(enc1, enc2, islot=1) builder.addDirectedLink(enc1, out1) builder.addDirectedLink(enc2, out2) builder.build()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_guessing(self):\n self.classifier.guess(self.message)", "def play(self):\n print(\"Game is starting!!\")\n self.generate_secret_number()\n while True:\n self.get_guess_from_user()\n self.ans = self.compare_results()\n if self.ans:\n print(f\"Right Guess!! , the number is {self.secret_number}\")\n break\n else:\n print(f\"Wrong Guess!! , Please try again.\")\n return self.ans", "def find():\n b = 0\n q = 0\n while b == q:\n seq = [randint(-10, 10) for _ in range(randint(15, 30))]\n b, b_at = brute_force(seq)\n q = solution(seq)\n print(seq, b, q, b_at)", "def main():\n word = random_word()\n old_ans = dashed(word)\n print('You have ' + str(N_TURNS) + ' guesses left.')\n guess(word, old_ans)", "def main():\n # init variables\n lower_bound = 1\n higher_bound = 10\n guess = generate_guess(1, 10)\n while True:\n try:\n secret = input(\"What should the computer guess? Enter a number between 1 and 10: \")\n except ValueError:\n print(\"{} isn't a number!\".format(secret))\n while True:\n if int(guess) == int(secret):\n print(\"I guessed {}! Your number was {}! I win!\".format(guess, secret))\n play_again = input(\"Do you want to play again? (Y/n)\")\n if play_again != \"Y\":\n print(\"Thanks for playing!\")\n exit()\n else:\n main()\n elif int(guess) != int(secret):\n high_or_low = input(\"I guessed {}. Was it high or low? (H/L)\".format(guess))\n print(\"G: {}, HB: {}, LB: {}\".format(guess, higher_bound, lower_bound))\n if high_or_low == \"H\":\n higher_bound = guess - 1\n guess = generate_guess(lower_bound, higher_bound)\n elif high_or_low == \"L\":\n lower_bound = guess + 1\n guess = generate_guess(lower_bound, higher_bound)\n else:\n print(\"Please try again: \\n\")", "def guess_a_number():\n\n # TODO:\n # generate a random number (uniformly distributed between 0 and 100)\n # read input from the user and validate that the input is numeric (use the function check_raw)\n # check whether the number was guessed \n # implement the functions evaluate_my_number, which checks whether the number is too high or too low\n # and print this information to the user\n # let the computer guess, therefore implement the demo_a_number function\n random_number=randint(0,100)\n \n '''versuche=0\n max_versuche=5\n guess=-1\n test= False\n while guess != random_number:\n while test == False:\n guess= input('Gib eine Zahl zwischen 0 und 100 ein: ')\n try:\n guess= int(guess)\n test=True\n except ValueError:\n print('Try Again')\n \n if guess == random_number:\n print('Du hast die Zahl erraten!')\n elif guess > random_number:\n print('Die Zahl ist zu gross')\n versuche=versuche+1\n else:\n print('Die Zahl ist zu klein')\n versuche=versuche+1'''", "def guess(word, old_ans):\n life = N_TURNS\n while life > 0:\n guess_ch = input('Your guess: ')\n guess_ch = guess_ch.upper()\n if guess_ch.isalpha() != True or len(guess_ch) != 1:\n print('Illegal format.')\n else:\n ans = ''\n if word.find(guess_ch) == -1:\n # when user doesn't find the right character\n print('There is no ' + guess_ch + \"'s in the word.\")\n life -= 1\n life = life\n for ch in word:\n if ch == guess_ch:\n ans += ch\n else:\n ans += '-'\n else:\n # when user make a correct guess that find out the right character of the word\n print('You are correct!')\n for ch in word:\n if ch != guess_ch:\n ans += '-'\n else:\n ans += guess_ch\n new_ans = ''\n for i in range(len(old_ans)):\n # to keep the previous right guess' result\n ch = old_ans[i]\n if ch.isalpha():\n new_ans += ch\n elif ch != ans[i]:\n new_ans += guess_ch\n else:\n new_ans += ch\n old_ans = new_ans\n if old_ans.isalpha():\n # when the user find all characters of the random word ans still alive\n print('You win!!')\n print('The word was: '+word)\n break\n else:\n if life > 0:\n print('The word looks like '+old_ans)\n print('You have '+str(life)+' guesses left.')\n # when the user make wrong guesses and finish all his/her guess opportunities\n if life == 0:\n print('You are completely hung : (')\n print('The word was: '+word)", "async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))", "def get_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words):\n\n\tprint \"\\n The word to guess is: \", mask\t\n\tprint \"\\n # of attempts: \", attempts\n\tprint \"\\n Insert a letter or a number \\n\"\n\tthe_guess = raw_input()\n\tthe_guess = the_guess.lower()\n\t# Check if the input is a valid character\n\tvalidity = check_validity(the_guess, valid_characters, user_guesses)\n\tif (validity is True):\n\t\t# CHeck if the user has guessed the letter\n\t\tif (check_if_guessed(the_guess, word_to_guess) >= 0):\n\t\t\tprint \"\\n Great! your choosed the correct letter!\"\n\t\t\tuser_guesses += the_guess\n\t\t\tmask = calculate_mask(user_guesses, word_to_guess)\n\t\t\tyou_won = check_if_won(user_guesses, word_to_guess, secret_words)\n\t\t\tif you_won is True:\n\t\t\t\t# If the user has won it stop the game\n\t\t\t\treturn\n\t\telse:\n\t\t\tattempts = attempts + 1\n\t\t\tprint \"\\n Sorry! the letter is not present in the word! you have now %d guess left\" % (6 - attempts)\n\t\t\tyou_lost = check_if_lost(attempts, secret_words)\n\t\t\tif you_lost is True:\n\t\t\t\t# If he user has lost it stop the game\n\t\t\t\treturn\n\telse:\n\t\tprint \"\\n The input is not valid! Insert a valid input\"\n\tget_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words)\n\treturn", "def advancedGuessingGame():\n\n print(\"\\nWelcome to the guessing game!\")\n print(\"A number between _ and _ ?\")\n\n lowerBound = not_number_rejector(\"Enter Lower Bound: \")\n\n higher_number = False # we need to set an upper and lowerbound for game\n\n while not higher_number:\n upperBound = not_number_rejector(\"Enter Upper Bound: \")\n if upperBound > lowerBound:\n higher_number = True\n else:\n print(\"The upperbound is lower than you lowerbound: TRY AGAIN\")\n\n # above code ensures upper > lower, see stubbon_asker in EX1\n\n print(\"OK then, guess a number between {} and {} ?\".format(lowerBound, upperBound))\n lowerBound = int(lowerBound) # ensures integer is give (Not a letter)\n upperBound = int(lowerBound)\n\n actualNumber = random.randint(lowerBound, upperBound)\n\n guessed = False\n\n while not guessed:\n guessedNumber = not_number_rejector(\"Make a guess: \")\n print(\"You guessed {},\".format(guessedNumber),)\n if guessedNumber == actualNumber:\n print(\"HOW DID YOU GET THAT! It was {}\".format(actualNumber))\n guessed = True\n elif guessedNumber > upperBound:\n print(\"This is higher than the upperbound! Try again!\")\n elif guessedNumber < lowerBound:\n print(\"This is lower than the lowerbound! Try again!\")\n elif guessedNumber < actualNumber:\n print(\"{} is too small, try again\".format(actualNumber))\n else:\n print(\"{} is too big, try again \".format(actualNumber))\n return \"You got it!\"\n # the tests are looking for the exact string \"You got it!\". Don't modify that!", "def main():\n # initial condition\n ans = random_word()\n remaining_guess_num = N_TURNS\n guess_word = ''\n for i in range(len(ans)):\n guess_word += '-'\n\n # start to play hangman game\n while (remaining_guess_num > 0) and (guess_word != ans):\n print('The word looks like: ' + str(guess_word))\n print('You have ' + str(remaining_guess_num) + ' guesses left.')\n input_ch = str(input('Your guess: '))\n\n # illegal format\n if not input_ch.isalpha():\n print('illegal format.')\n elif len(input_ch) != 1:\n print('illegal format.')\n # correct format\n else:\n # case-insensitive\n input_ch = input_ch.upper()\n # wrong guess\n if ans.find(input_ch) == -1:\n print('There is no ' + str(input_ch) + '\\'s in the word.')\n remaining_guess_num -= 1\n # correct guess\n else:\n print('You are correct!')\n ans_slice = ans\n # replace all the correct guessed letter(s)\n while ans_slice.find(input_ch) != -1:\n replace_loc = len(ans) - len(ans_slice) + ans_slice.find(input_ch)\n guess_word = replace_letter(input_ch, replace_loc, guess_word)\n ans_slice = ans_slice[ans_slice.find(input_ch)+1:]\n # win\n if guess_word == ans:\n print('You win!!')\n # lose\n else:\n print('You are completely hung : (')\n print('The word was: ' + str(ans))", "def guessTheSecret():\n\tguess = int(input('Guess the number > '))\n\tglobal attempts\n\tcheck = False\n\twhile guess != secret_num:\n\t\tif guess < secret_num:\n\t\t\tprint('Your guess is too low')\n\t\telif guess > secret_num:\n\t\t\tprint('You guess to too high')\n\t\tguess = int(input('Guess again > '))\n\t\tattempts += 1\n\t\tif attempts >= 4:\n\t\t\tbreak\n\tif guess == secret_num:\n\t\treturn True", "def main_f():\n ph_number = read_number()\n if ph_number == -1:\n print('Incorrect number, try again')\n return\n res_l = find_let(ph_number, 0)\n output_result(res_l)", "def main():\n answer = random_word().upper()\n dashed_word = ''\n for i in range(len(answer)):\n dashed_word += '-'\n guess_times = 0\n while True:\n if guess_times == N_TURNS:\n # This is the last chance to guess and user failed\n print('You are completely hung :\\'(')\n break\n print('The word looks like: ' + dashed_word + '\\nYou have ' + str(N_TURNS - guess_times) + ' guesses left.')\n guess = input('Your Guess: ')\n if len(guess) == 1 and guess.isalpha():\n # Legal format\n guess = guess.upper()\n if answer.find(guess) != -1:\n # The guess is correct and should uncover the dashed_word\n print('You are correct!')\n dashed_word = uncover_dash(guess, answer, dashed_word)\n if not dashed_word.find('-') > -1:\n # No dash left.\n print('You win!!')\n break\n else:\n # Wrong guess\n guess_times += 1\n print('There is no ' + guess + '\\'s in the word.')\n else:\n print('Illegal format')\n print('The word was: ' + answer)", "def play_game(n):\n tries = 0\n magic_number = generate_random(n)\n print(\"Let's play the mimsmind0 game.\")\n # Get and validate user's first guess\n while True:\n try:\n guess = int(input(\"Guess a {}-digit number: \".format(n)))\n tries += 1\n break\n except:\n print(\"That is not a valid number, try again.\") \n while True:\n # Check guess against magic number and give directional guidance if incorrect\n try:\n if magic_number > guess:\n guess = int(input(\"Try again. Guess a higher number: \"))\n tries += 1\n elif magic_number < guess:\n guess = int(input(\"Try again. Guess a lower number: \"))\n tries += 1\n else:\n print(\"Congratulations. You guessed the correct number in {} tries.\".format(tries))\n break\n except:\n print(\"That's not a valid number.\")", "def test_guess_optics():\n from ctapipe.instrument import guess_telescope\n\n answer = guess_telescope(1855, 28.0 * u.m)\n\n assert answer.name == \"LST\"\n assert answer.n_mirrors == 1", "def guess_word(ga, word, ch):\n flag = 0\n sd = stringDatabase\n gameword = list(word)\n gamewordlen = len(gameword)\n\n # Checks if the option/choice is 'l'\n if ch == 'l':\n gl = sd.StringDatabase.get_letter()\n if not gl:\n return\n # print('here')\n if gl in gameword:\n if gl in ga.guess_list:\n print('Already flipped the letter, Guess another letter')\n return\n n = gameword.count(gl)\n print('You found', n, 'letter/s')\n for i in range(gamewordlen):\n if gameword[i] == gl:\n ga.guess_list[i] = gl\n if ga.guess_list == gameword:\n flag = 1\n ga.status = 'Success'\n print('Correct Guess!!')\n ga.calculate_score()\n else:\n print('You found 0 new letter/s, Try Again')\n ga.missed_letter += 1\n if ga.score == 0:\n ga.score -= 1\n else:\n ga.score -= (ga.score/4)\n per = (ga.score*10)/100\n ga.score -= per\n\n # Checks if the option/choice is 'g'\n if ch == 'g':\n gl = list(sd.StringDatabase.get_word())\n # print('\\n')\n if gl == gameword:\n count = 0\n for i in range(gamewordlen):\n if gameword[i] != ga.guess_list:\n count += 1\n if count >= 1:\n ga.calculate_score()\n ga.score += 10\n ga.status = 'Success'\n print('Correct guess!!')\n flag = 1\n else:\n ga.bad_guess += 1\n print('Wrong guess, Try Again')\n per = (ga.score * 30) / 100\n ga.score -= per\n\n # Checks if the option/choice is 't'\n if ch == 't':\n ga.score = ga.score/2\n ga.calculate_minus_score()\n ga.status = 'Gave up'\n print('Correct word is: ' + word)\n flag = 1\n\n return flag", "def kampf():\n print(\"Spiele Schere, Stein, Papier gegen das Monster\")\n while True:\n print(\"Wähle Schere, Stein oder Papier\")\n command= input(\"?\")\n if command not in (\"Schere\",\"Stein\",\"Papier\"):\n return 1\n gegnerzug=random.choice((\"Schere\",\"Stein\",\"Papier\"))\n print(\"Du spielst {} Monster spielt {}\".format(command,gegnerzug))\n if gegnerzug==command:\n print(\"Unentschieden, spiele nochmals\")\n continue\n if (gegnerzug==\"Schere\" and command==\"Papier\") or (gegnerzug==\"Stein\" and command==\"Schere\") or (gegnerzug==\"Papier\" and command==\"Stein\"):\n print(\"Monster gewinnt!\")\n return 1\n print(\"Du gewinnst!\")\n return 0", "def check_guess(f, g):\n return f == g", "def results_of_guess(self):\r\n print(self.best_guess)\r\n print(self.chosen_letter)\r\n \r\n #self.best_guess = input(\"Enter word with correct letters and stars \" + \"as blank spaces.\")\r\n wrong_words = set()\r\n if self.chosen_letter in self.best_guess: # in case of success\r\n print(\"hit\")\r\n list_of_indices = [i for i, value in enumerate(self.best_guess) \r\n if value == self.chosen_letter]\r\n for word in self.valid_words:\r\n for index in list_of_indices:\r\n if word[index] != self.chosen_letter:\r\n wrong_words.add(word)\r\n elif word.count(self.chosen_letter) > len(list_of_indices):\r\n wrong_words.add(word)\r\n \r\n else: # in case of failure\r\n print(\"miss\")\r\n for word in self.valid_words:\r\n if self.chosen_letter in word:\r\n wrong_words.add(word)\r\n self.valid_words = self.valid_words.difference(wrong_words)", "def guess(key, values):\n # need to know a number of gaussians in order to give a sensible guess. \n return None", "def main():\n word = random_word()\n attempt_left = N_TURNS\n ans = intro(word, attempt_left)\n while attempt_left != 0:\n hangman_figure(attempt_left)\n ans, attempt_left = hangman(word, ans, attempt_left)\n if ans == word: # if players had guess the word correctly\n print('You are correct!')\n print('You win!!')\n print('The word was: ' + word)\n break\n else:\n print('The word looks like: ' + ans)\n if attempt_left == 0: # players failed to guess the word correctly\n hangman_figure(attempt_left)\n print('You are completely hung : (')", "def main(self):\n stringDatabase.IOop(1)\n word=\"\"\n Word=\"\"\n guess=\"\"\n letter=\"\"\n status=\"\"\n badGuess=0\n contr=0\n noOfTime=0\n missLetter=0\n score=0\n cont=0\n flag=0\n listTemp = []\n listTemp1 = []\n # frequency : store frequency of all letters in dictionary data structure\n frequency={'a':8.17,'b':1.49,'c':2.78,'d':4.25,'e':12.70,'f':2.23,'g':2.02,'h':6.09,'i':6.97,'j' : 0.15,'k':0.77,'l':4.03,'m':2.41,'n':6.75,'o':7.51,'p':1.93,'q':0.10,'r':5.99,'s':6.33,'t':9.06,'u':2.76,'v':0.98,'w':2.36,'x':0.15,'y':1.97,'z':0.07}\n dictionary={}\n try:\n for i in range(100):\n print(\"\\n** The great guessing game **\")\n print(\" Game : \",i+1)\n f2=random.randint(0,4029)\n Word=list[f2]\n word=\"----\"\n listTemp.clear()\n listTemp1.clear()\n noOfTime=0\n badGuess=0\n missLetter=0\n cont=0;\n score=0\n contr=0\n status=\"\"\n for j in Word:\n if j not in listTemp1:\n listTemp1.append(j)\n if j in dictionary:\n dictionary[j] += 1\n else:\n dictionary[j] = 1\n while word!=Word:\n print(\"Current Guess: \",word)\n print(\"g = guess, t = tell me, l for a letter, and q to quit\")\n choice=input()\n\n if choice==\"g\":\n contr=contr+1\n print(\"Enter word:\")\n guess=input()\n if guess==Word:\n status=\"Success\"\n for h in listTemp1:\n score+=dictionary[h]*frequency[h]\n print(\"Correct Guess : \",Word)\n print(\"\\nLets guess another Word...\\n\\n\")\n word=Word\n else:\n badGuess=badGuess+1\n score-=score/10\n #print(score)\n print(\"Incorrect Guess...Try Again\\n \")\n elif choice==\"t\":\n contr=contr+1\n status=\"Gave up\"\n for h in listTemp1:\n score-=dictionary[h]*frequency[h]\n print(\"You Gave up\\nCorrect word is : \",Word)\n print(\"Lets guess another Word...\\n\")\n word=Word\n elif choice==\"q\":\n print(\"Your Current Guess Word is : \",Word)\n print(\"Thank you for playing 'The great guessing game'\")\n print(\"\\nYour game summary:-\\n\")\n flag=1\n status=\"Gave up\"\n word=Word\n elif choice==\"l\":\n contr=contr+1\n print(\"Enter a letter:\");\n letter=input()\n if letter in dictionary:\n if letter in listTemp:\n print(\"You already uncover this letter...Try with nother letter or word.\\n\")\n else :\n for j in range(0,len(word)):\n if Word[j]==letter:\n word=word[:j]+letter+word[j+1:]\n\n listTemp1.remove(letter)\n #print(word)\n\n if word==Word:\n status=\"Success\"\n score+=dictionary[letter]*frequency[letter]\n print(\"Correct Guess : \" , Word)\n print(\"Lets guess another Word...\\n\")\n word = Word\n\n else :\n score+= dictionary[letter]*frequency[letter]\n cont += dictionary[letter]\n print(\"You found \" , cont , \" letters\\n\")\n listTemp.append(letter)\n\n else:\n missLetter=missLetter+1\n noOfTime=noOfTime+1\n print(\"Incorrect letter Guess....Try Again\\n\")\n if(noOfTime!=0):\n score-=score/noOfTime\n\n dictionary.clear()\n\n if(contr!=0):\n game.report(i+1,Word,status,badGuess,missLetter,score)\n\n if flag==1:\n break\n except:\n print()\n\n game.printReport(1)", "def evaluate_my_number(guess, random_number):", "def merkkaa_miina(kentta):\n while True:\n print(\"Voit merkata tyhjän paikan x:llä tai poistaa merkkauksen syöttämällä merkatun paikan koordinaatit uudestaan.\")\n print(\"Merkataan ruutu x:llä\")\n merkattava_ruutu = input(\"- Syötä koordinaatit välilyönnillä erotettuna: \").split()\n print(\"------------------------------------------------\")\n if len(merkattava_ruutu) == 0:\n print(\">>> Syötä koordinaatit kokonaislukuina välilyönnillä erotettuna toisistaan!\")\n tulosta_kentta(kentta, miinat)\n continue\n elif merkattava_ruutu[0] == \"q\":\n return \"q\"\n elif len(merkattava_ruutu) != 2:\n print(\">>> Syötä kaksi koordinaattia kokonaislukuina välilyönnillä erotettuna toisistaan!\")\n tulosta_kentta(kentta, miinat)\n continue\n try:\n miinan_leveys = int(merkattava_ruutu[0])\n miinan_korkeus = int(merkattava_ruutu[1])\n if miinan_leveys >= len(kentta[0]) or miinan_korkeus >= len(kentta) or miinan_leveys < 0 or miinan_korkeus <0:\n print(\">>> Syöttämäsi koordinaatit ovat kentän ulkopuolella. Yritä uudestaan.\")\n tulosta_kentta(kentta, miinat)\n continue\n except ValueError:\n print(\">>> Anna koordinaatit kokonaislukuina!\")\n tulosta_kentta(kentta, miinat)\n else:\n if kentta[miinan_korkeus][miinan_leveys] == \"-\":\n kentta[miinan_korkeus][miinan_leveys] = \"x\"\n tulosta_kentta(kentta, miinat)\n elif kentta[miinan_korkeus][miinan_leveys] == \"x\":\n kentta[miinan_korkeus][miinan_leveys] = \"-\"\n tulosta_kentta(kentta, miinat)\n else:\n print(\">>> Et voi merkata avattua ruutua!\")\n tulosta_kentta(kentta, miinat)\n return miinan_leveys, miinan_korkeus", "def main():\r\n print('Kies hier hoe u mastermind wil spelen: \\n1.Ik wil raden.\\n2.De computer laten raden.')\r\n keuze = int(input('Ik kies voor nummer: '))\r\n try:\r\n if keuze == 1:\r\n print('We spelen met deze kleuren: ' + str(kleurenlijst))\r\n vierhiddencomputer = computerinput(kleurenlijst)\r\n return playerzetten(vierhiddencomputer)\r\n elif keuze == 2:\r\n print('We spelen met deze kleuren: ' + str(kleurenlijst))\r\n vierhiddenplayer = playerinput(kleurenlijst)\r\n return simpel_algoritme(vierhiddenplayer, totalecombinaties, combinatie)\r\n else:\r\n print('kies voor nummer 1 of 2 \\n')\r\n except:\r\n print('kies voor nummer 1 of 2 \\n')\r\n return main()", "def try_to_guess(word):\n\n # set number of tries based on word length\n if 4 < len(word) < 7:\n tries = 4\n elif 7 < len(word) < 12:\n tries = 8\n else:\n tries = 12\n \n # create placeholder word eg: ---\n placeholder = ['-' for _ in range(len(word))]\n \n # list to check if letter was already guessed\n guesses = []\n\n while tries > 0:\n print('\\n' + ''.join(placeholder))\n letter = str(input(f\"Input a letter: \"))\n\n # only one lower case alphanum character\n if len(letter) > 1:\n print(\"You should input a single letter\")\n elif not letter.isalnum() or not letter.islower():\n print(\"It is not an ASCII lowercase letter\")\n \n elif letter in guesses:\n print(\"You already typed this letter\") \n elif letter not in word:\n print(\"No such letter in the word\")\n tries -= 1\n \n # we have a good letter\n else:\n for i, v in enumerate(word):\n \n if v == letter:\n placeholder[i] = letter\n \n if ''.join(placeholder) == word:\n print()\n print(''.join(placeholder))\n print(\"You guessed the word!\\nYou survived!\")\n return\n \n guesses.append(letter)\n \n else:\n print(\"You lost!\")\n print(f\"The word was {word}\")", "def checkGuess(guess, secretNum): \n if guess < secretNum:\n return \"Your guess is too low.\"\n elif guess > secretNum:\n return \"Your guess is too high.\"\n else:\n return \"You got it!!\"", "def run_game(ans, n):\n # transform to upper case to be case-insensitive\n ans = ans.upper()\n wrong_times = 0\n dashed = \"\"\n for i in range(len(ans)):\n dashed += '-'\n print_hangman(n, wrong_times)\n print('The word looks like: ' + dashed)\n print('You have '+str(n-wrong_times)+' guesses left.')\n while True:\n input_ch = input('Your guess: ')\n # check type of the input, just only one alphabet can be accepted\n if not (input_ch.isalpha() and (len(input_ch) == 1)):\n print('illegal format.')\n else:\n # transform to upper case to be case-insensitive\n input_ch = input_ch.upper()\n # if guessed alphabet was in the answer word\n if ans.find(input_ch) != -1:\n # check the alphabet's index in the word\n for i in range(len(ans)):\n if ans[i] == input_ch:\n # replace the guessed alphabet in the dashed string to show\n dashed = dashed[:i]+ans[i]+dashed[i+1:]\n print_hangman(n, wrong_times)\n print('You are correct!')\n # if alphabets were not all guessed, the while loop will be continued\n if not dashed.isalpha():\n print('The word looks like: ' + dashed)\n print('You have ' + str(n - wrong_times) + ' guesses left.')\n # if all alphabets were guessed, the game is over\n else:\n print('You win!')\n print('The word was: ' + ans)\n break\n # if guessed alphabet wasn't in the answer word\n else:\n wrong_times += 1\n # if wrong times haven't reached N_TURNS, the while loop will be continued\n print_hangman(n, wrong_times)\n if wrong_times < n:\n print(\"There's no \" + input_ch + \"'s in the word.\")\n print('The word looks like: ' + dashed)\n print('You have ' + str(n - wrong_times) + ' guesses left.')\n # if user guessed the wrong alphabet at the last time, the game is over\n elif wrong_times == n:\n print(\"There's no \" + input_ch + \"'s in the word.\")\n print('You are completely hung :(')\n print('The word was: ' + ans)\n break", "def check_guess(guess):\n while True:\n print(\" Was \" + str(guess) + \" too high, too low, or correct?\")\n answer = input()\n answer= answer.lower()\n \n if answer == 'too low' or answer == 'to low':\n return -1\n elif answer == 'too high' or answer == 'to high':\n return 1\n elif answer == 'correct':\n return 0\n else:\n print(\"I don't understand. Please enter 'too low', too high', or 'correct'.\")", "def main():\n known_plaintext = \"session_id=\"\n chars_to_guess = string.ascii_letters + string.digits + \"/+=\\n\" # guess from b64 chars plus newline\n pairs_to_guess = pair_combos(chars_to_guess)\n\n while '\\n' not in known_plaintext:\n guess = guess_from_iterable(iterable=chars_to_guess, known=known_plaintext)\n if guess is not None:\n known_plaintext += guess\n else:\n # Try a two byte guess to resolve misaligned compression\n pair_guess = guess_from_iterable(iterable=pairs_to_guess, known=known_plaintext)\n if pair_guess is not None:\n known_plaintext += pair_guess\n else:\n print(f\"Restarting: failure at '{known_plaintext}'\")\n known_plaintext = \"session_id=\"\n\n print(bytes(known_plaintext, \"utf-8\"))", "def correct_guess(self, guess):\n \n if self.code == guess:\n return True\n return False", "def bruteforce(vierhidden, totalecombinatie):\r\n for x in range(0, 1296):\r\n code = totalecombinatie[x]\r\n if validatecombination(vierhidden, code)[0] == 4:\r\n return 'Gevonden na ' + str(x + 1) + ' zetten.'", "def handleGuess(self, guess):\n res = self.checkAnswer(guess)\n if res is True:\n print (\"Correct!\")\n return True\n print (\"Bulls: %s, Cows: %s\" % res)\n return False", "def game_code(user_input, secret_word, my_letters, guess_count):\n#if str.isalpha(myinput1) == True and myinput1 not in my_letters and guess_count > 0:\n if user_input in secret_word and len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Good guess: ' + mytempstr1)\n return 0\n elif user_input in ['a','e','i','o','u'] and len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Oops! That letter is not in my word: ' + mytempstr1)\n return 1\n elif len(user_input) == 1:\n my_letters.append(user_input)\n mytempstr1 = get_guessed_word(secret_word, my_letters)\n print('Oops! That letter is not in my word: ' + mytempstr1)\n return 2", "def genGuess(data):\n v, e = data; a, b, c = polyfit(v, e, 2)\n v0 = -b/(2*a)\n e0 = a*v0**2 + b*v0 + c\n b0 = 2*a*v0\n bP = 4.\n guess = [e0, b0, bP, v0]\n return guess", "def process_player_input(self,guess):\r\n # Step 1 - Catch faulty input, this is not topic of week 2\r\n\r\n # Tell the player the secret number :-)\r\n if (guess == \"Cheat\"):\r\n return \"Secret number = %d\" % (self.secret_number)\r\n \r\n # Step 2 - Verify player's input.\r\n user_input = self.verify_input(guess, self.num_range)\r\n if (type(user_input) != type(0)):\r\n # Verify_input() detected faulty input\r\n # Let's leave here with the error message\r\n return user_input\r\n\r\n # Decrease the number of still available tries\r\n if (self.remaining_guesses>0):\r\n self.remaining_guesses -= 1\r\n print \"Remaining number of tries = \", self.remaining_guesses\r\n \r\n # Step 3 - Give the player a hint for next guess\r\n if ((user_input > self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Lower!\"\r\n elif ((user_input < self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Higher!\"\r\n elif (user_input == self.secret_number):\r\n result_message = self.correctguess_message\r\n else:\r\n # As the guess was wrong and there is no further try anymore,\r\n # tell the player that he/she lost\r\n result_message = \"You tried too often than necessary, You lost!\"\r\n return result_message", "def create_new_guess():\n next_choice = next(permutation_iterator) \n while inconsistent(next_choice, guesses):\n try:\n next_choice = next(permutation_iterator)\n except StopIteration:\n print(\"Error: Your answers were inconsistent!\")\n return ()\n return next_choice", "def guess_a_number():\n x = check_raw()\n random_number=randint(0,100)\n count_tries = 0\n\n while x != random_number:\n count_tries = count_tries + 1\n if count_tries == 10:\n print ('GAME OVER! You failed too many times!')\n break\n x = evaluate_my_number(x,random_number)\n if x == random_number:\n print ('Your number is correct! You needed {} tries.'.format(count_tries))\n break\n\n new_game = str(input(\"Do you want to play again? If so, say 'yes'! If not, say 'no' \"))\n if new_game == 'yes':\n guess_a_number()\n else:\n print('Goodbye!')\n\n # TODO:\n # generate a random number (uniformly distributed between 0 and 100)\n # read input from the user and validate that the input is numeric (use the function check_raw)\n # check whether the number was guessed \n # implement the functions evaluate_my_number, which checks whether the number is too high or too low\n # and print this information to the user\n # let the computer guess, therefore implement the demo_a_number function", "def guess(mqtt_client, number_to_guess_entry):\n # TODO: 5. Uncomment the line of code below to make guesses with EV3.\n mqtt_client.send_message(\"guess\", [int(number_to_guess_entry.get())])\n number_to_guess_entry.delete(0, 'end')\n # Note: You can play the game with only TO DO 5 complete, but it will be easier to solve if you do TO DO 6 as well.", "def brute_force_ramble(w: BaysianWeights, prompt: str, length: int = 5, fn: Callable = next_word_v1):\n while True:\n try: return ramble(w, prompt, length, fn)\n except: pass", "def run_single_game(words_list):\r\n word = hangman_helper.get_random_word(words_list) #random word\r\n pattern = len(word)*'_'\r\n wrong_guess_lst= list()\r\n error_count=0\r\n msg= hangman_helper.DEFAULT_MSG\r\n ask_play=False\r\n while error_count < hangman_helper.MAX_ERRORS and '_' in pattern:\r\n hangman_helper.display_state(pattern, error_count, wrong_guess_lst, msg, ask_play)\r\n user_input = hangman_helper.get_input()\r\n does_letter = if_letter(user_input[1]) #if the input is letter or not\r\n if user_input[0] == hangman_helper.HINT:\r\n filter_list= filter_words_list(words_list,pattern,wrong_guess_lst)\r\n filter_1 = choose_letter(filter_list,pattern)\r\n msg = hangman_helper.HINT_MSG+filter_1\r\n else:\r\n if len(user_input[1])!=1 or does_letter==False:\r\n msg= hangman_helper.NON_VALID_MSG\r\n elif user_input[1] in wrong_guess_lst or user_input[1] in pattern:\r\n msg= hangman_helper.ALREADY_CHOSEN_MSG+user_input[1]\r\n elif user_input[1] in word:\r\n pattern = update_word_pattern(word, pattern, user_input[1])\r\n msg = hangman_helper.DEFAULT_MSG\r\n else:\r\n error_count+=1\r\n msg=hangman_helper.DEFAULT_MSG\r\n wrong_guess_lst.append(user_input[1])\r\n if '_' in pattern:\r\n ask_play = True\r\n msg = hangman_helper.LOSS_MSG + word\r\n else:\r\n ask_play = True\r\n msg = hangman_helper.WIN_MSG\r\n hangman_helper.display_state(pattern, error_count, wrong_guess_lst, msg, ask_play)", "def diffy_hellman(field, a_value, b_value, point):\n a_comb, b_comb = int(), int()\n while a_comb == b_comb:\n a_comb = randint(1, sqrt(field) // 2)\n b_comb = randint(1, sqrt(field) // 2)\n print(\"Next factors have been generated:\")\n print(\"alhpha: \", a_comb)\n print(\"beta: \", b_comb)\n try:\n a_point = multiply_point(point, a_comb, field, a_value, b_value)\n b_point = multiply_point(point, b_comb, field, a_value, b_value)\n a_secret = multiply_point(b_point, a_comb, field, a_value, b_value)\n b_secret = multiply_point(a_point, b_comb, field, a_value, b_value)\n except ValueError:\n print(\"Got a point an eternity... Please, repeat DF-algorythm\")\n return\n if a_secret != b_secret:\n print(\"Something has terribly gone wrong...\")\n return\n else:\n print(\"Common secret key has been succesfully generated\")\n return a_secret", "def perform(self):\n i = 1\n attempts = 0\n\n while i < 8:\n letter = self.ask_letter()\n result = self.check_letter(letter, self.random_word)\n attempts += 1\n if result:\n print(result)\n if self.is_game_finished(result):\n print('\\nYou`re win!')\n winner = 1\n return self.send_result(attempts, winner)\n break\n else:\n self.draw_hangman(i)\n i += 1\n if i == 8:\n print('\\nYou`re lose')\n winner = 2\n return self.send_result(attempts, winner)", "def crackRsaBruteForce (e, n):\r\n p = getFirstFactor(n)\r\n q = n/p\r\n # phi = Euler Tortient\r\n phi = (p-1)*(q-1)\r\n\r\n d = 1\r\n while d < phi:\r\n # If the public key times the private key % phi = 1, then you have found\r\n # the correct private key\r\n if (e*d) % phi == 1:\r\n return d\r\n\r\n d += 1\r\n\r\n return -1", "def run_im_bored():\n \n greet_user()\n \n bored = True\n \n while bored:\n generate_suggestion()\n bored = ask_to_continue()", "def guess_option(self,secret,dashes):\n\n\n\n\n try1 = input(\"enter the word:\")\n if (try1!=secret):\n self.badGuess = self.badGuess + 1\n g = (list(try1))\n\n for i in range(len(secret)):\n if secret[i] == g[i]:\n dashes = self.update_dashes(secret, dashes, secret[i])\n\n # guess_calculation(dashes)\n print(dashes)\n self.count=self.count+1\n for i in range(len(dashes)):\n if secret[i] != dashes[i]:\n self.total = self.total + String_Database.frequency.get(secret[i])\n\n print(\"Your guess is wrong: \", \"no of wrong guesses \", self.badGuess)\n return dashes\n\n\n else:\n print(\"Success\")\n self.status=\"Success\"\n\n\n dashes=secret\n if self.total==0:\n for i in dashes:\n self.total=self.total+String_Database.frequency.get(i)\n\n\n\n\n self.calculate_finalscore()\n print(\"final_score for guess-otpion\",self.finalScore)\n return dashes", "def test_best_wild_hand(self):\n self.assertEqual(\n sorted(best_wild_hand(\"6C 7C 8C 9C TC 5C ?B\".split())),\n ['7C', '8C', '9C', 'JC', 'TC'])\n self.assertEqual(\n sorted(best_wild_hand(\"TD TC 5H 5C 7C ?R ?B\".split())),\n ['7C', 'TC', 'TD', 'TH', 'TS'])\n self.assertEqual(\n sorted(best_wild_hand(\"JD TC TH 7C 7D 7S 7H\".split())),\n ['7C', '7D', '7H', '7S', 'JD'])", "def next_round (warnings_remaining, guesses_remaining, available_letters,secret_word, letters_guessed):\n availables_letters = get_available_letters(letters_guessed)\n print(\"------------\")\n print(\"You have %d warnings left.\" %warnings_remaining)\n print(\"You have %d guesses left.\" %guesses_remaining)\n print(\"Available letters: %s\" %(available_letters))\n print (get_guessed_word(secret_word, letters_guessed))\n print(\"Please guess a letter:\")\n guessed_letter= input()\n return guessed_letter, availables_letters", "def get_guess():\n print('Choose a letter:')\n return input()", "def _prepare(self):\n self.code = random.randint(1000,9999)\n self.user_guess.append(\"----\")\n self.user_guess.append(\"----\")\n self.applied_guess.append(\"****\")\n self.applied_guess.append(\"****\")", "def incorrect_guess(self,\n letter): # relies on sanitise_guess, add_previous_guess(), display_correct_guess() & draw()\n if not self.sanitize_guess(letter): # ensures that it is alphabetical input\n return False\n if not self.add_previous_guess(): # ensures that it hasn't already been guessed\n return False\n if not self.display_correct_guess(): # ensures that it is not a correct guess\n self.attempts -= 1\n\n if self.attempts <= 0:\n Donatello.turtle_text(f\"Wrong guess! Attempts left: {self.attempts}\")\n Donatello.turtle_focused_text(\n f\"Oh no! You ran out of attempts. The word was '{self.chosen_word.upper()}'\")\n return False\n else:\n Donatello.turtle_text(f\"Wrong guess! Attempts left: {self.attempts}\")\n self.draw()\n Donatello.draw_word(self.display_word)\n return False, self.attempts", "def play_hangman(self) -> None: \n tries=6\n current_word=self.get_word()\n guessed_word = False\n word_hidden_states = [current_word[indx] for indx in sample(range(0, len(current_word)-1), randint(1, len(current_word)-2))]\n word_completion_state = [letter if letter not in word_hidden_states else \"_\" for letter in current_word]\n\n while tries > 0 and not guessed_word: \n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n self.display_state(tries,word_completion_state)\n guessed_char=str(input(\"Guess a Character : \")).upper()\n\n if guessed_char in word_hidden_states :\n print(\"\\nCorrect Guess !!!!!! Updating..........\")\n for indx,_ in enumerate(word_completion_state) : \n if guessed_char == current_word[indx]:\n word_completion_state[indx]=guessed_char\n \n word_hidden_states = [char for char in word_hidden_states if char != guessed_char]\n guessed_word = False if \"_\" in word_completion_state else True\n sleep(5)\n else :\n print(\"\\nIncorrect Guess!!! Updating!!!!!!\")\n sleep(5)\n tries=tries-1\n \n if tries == 0 and not guessed_word:\n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n print(f\"{'-' * 20}HANGMAN{ '-' * 20}\\n\\n\")\n print(self.hangman_states[-1] + \"\\n\")\n print(f\"No Tries Remaining , YOU LOST !!!!!\")\n print(f\"CORRECT WORD was ------> {current_word}\")\n print(f\"GAME OVER\")\n \n if guessed_word:\n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n print(f\"{'-' * 20}HANGMAN{ '-' * 20}\\n\\n\")\n print(self.hangman_states[-tries] + \"\\n\")\n print(f\"YOU GUESSED THE WORD CORRECTLY !!!\")\n print(f\"WORD was ------> {current_word}\")\n print(f\"Congratulations You win\")", "def get_guess_from_user(self):\n self.guess_number = input(f\"please guess a number between 1 to {self.difficulty}: \\n\")\n while True:\n if not self.guess_number.isnumeric() or \\\n not int(self.guess_number) <= self.difficulty or \\\n not int(self.guess_number) >= 0:\n self.guess_number = input(f\"you input is invalid!! please guess a number between 1 to {self.difficulty}: \\n\")\n else:\n self.guess_number = int(self.guess_number)\n break\n return self.guess_number", "def eval_guess(self, Guess):\n\n\t\t# pulls comparison from win check and assigns peg responses \n\n\t\t# returns a list to be in hint_response\n\n\t\t# displays as part of big display in view.\n\n\t\t\"\"\"Borrow the logic from win_check to implement eval_guess. Use variables right and wrong to \n\t\tevaluate. Right = Black peg. Wrong = no peg. \n\n\t\tWhite will be generated from a third loop to compare the entire list\"\"\"\n\n\n\t\tpass", "def guess_number():\n guess = 0\n while guess < 1:\n guess = int(input(\"Your guess: \"))\n return guess", "def answer(self) -> bool:", "def initLocalBestChoice(self):\n random.seed()\n return", "def guess(self, message, db_session):\n user = self.ts.get_user(message)\n if db_session.query(db.MiscValue).filter(db.MiscValue.mv_key == 'guessing-enabled').one().mv_value == 'True':\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n if len(msg_list) > 1:\n guess = msg_list[1]\n if guess.isdigit() and int(guess) >= 0:\n self._set_current_guess(user, guess, db_session)\n self._add_to_whisper_queue(user, \"{} your guess has been recorded.\".format(user))\n else:\n self._add_to_whisper_queue(user, \"Sorry {}, that's not a non-negative integer.\".format(user))\n else:\n self._add_to_whisper_queue(user,\n \"Sorry {}, !guess must be followed by a non-negative integer.\".format(user))\n else:\n self._add_to_whisper_queue(user, \"Sorry {}, guessing is disabled.\".format(user))", "def challenge2(self):\n # Create emulator, with 6 registers. Set register 0 to 1\n emulator = Emulator(6)\n emulator.registers[0] = 1\n\n # Running this program seems to take forever... let's see if there's a pattern.\n # OK, so after dumping out lots of instructions, this pattern is repeated a lot:\n # [ 3] mulr 1 5 3\n # [ 4] eqrr 3 2 3\n # [ 5] addr 3 4 4\n # [ 6] addi 4 1 4\n # [ 8] addi 5 1 5\n # [ 9] gtrr 5 2 3\n # [10] addr 4 3 4\n # [11] seti 2 2 4\n # In this program, register 4 is the IP register. (IP is shown in [] above)\n # So instruction 6 is just jumping to instruction 8, we can optimize that out\n # And seti 2 2 4 will act as a jump back to instruction 3 (= 2 + 1)\n # Registers look like this after instruction 3:\n # IP: 4 Reg: [0, 1, 10551376, 145834, 3, 145834]\n # So effectively we have, per loop iteration:\n # reg[3] = reg[1] * reg[5]\n # reg[3] = 1 if reg[3] == reg[2] else 0\n # reg[4] = reg[3] + reg[4] => If the above was false, we go to instruction 6.\n # Otherwise we go to instruction 7 (outside the normal loop flow) and do:\n # addr 1 0 0 => reg[0] = reg[0] + reg[1], then proceed as below.\n # If we stay in the loop, or just fallen through now, we are at instruction 8:\n # reg[5] = reg[5] + reg[1]\n # reg[3] = 1 if reg[5] > reg[2] else 0\n # reg[4] = reg[3] + reg[4] => If the above was false, we go to instruction 11 => back to the start.\n # Otherwise, we jump outside the loop to instruction 12.\n # So to write the above in Python code, with registers named R0 etc:\n # while R5 <= R2:\n # R3 = R1 * R5\n # if R3 == R2:\n # R0 += R1\n # R5 += R1\n #\n # This appears to be a brute force way of checking if R1 is a factor of R2 - and if it is, adding it to R0\n # After this loop, if we go to instruction 12:\n # addi 1 1 1 => R1 += 1\n # gtrr 1 2 3 => R3 = 1 if R1 > R2 else 0\n # addr 3 4 4\n # seti 1 4 4\n # mulr 4 4 4\n # These last three mean \"jump to instruction 2 if R1 <= R2 else terminate (by squaring the IP)\"\n # So putting this together, along with instructions 2 and 3 of:\n # seti 1 8 1 => R1 = 1\n # seti 1 3 5 => R5 = 1\n # We are brute-force finding all the factors of R2 and adding all of them together into R0\n # As R2 contains 10551376, we can do this in a cleverer, non brute force way to find the final value of R0!\n number_to_factorize = 10551376\n \n def factors(n): \n return set(reduce(list.__add__, \n ([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))\n\n sum_factors = sum(factors(number_to_factorize))\n print(f\"Final value of register 0: {sum_factors}\")", "def new_game(secret_words):\n\n\tattempts=0\n\tword_index = random.randint(0,5)\n\tword_to_guess = secret_words[word_index]\n\tglobal mask\n\tmask = \" _ \" * len(secret_words[word_index])\n\tget_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words)\n\treturn", "def close(self, target, guess):\r\n return ord(target)-5 <= ord(guess) and ord(guess) <= ord(target)+5", "def findSecretWord(self, w, master):\n\n h = [None] * len(w) # keeps the set\n n = [None] * len(w) # keeps the near matrix\n for i in range(len(h)):\n h[i] = defaultdict(set)\n n[i] = [0] * len(w)\n\n for i in range(0, len(w) - 1):\n for j in range(i + 1, len(w)):\n nr = self.near(i, j, w)\n n[i][j], n[j][i] = nr, nr\n h[i][nr].add(j)\n h[j][nr].add(i)\n\n # print(n)\n # print(h)\n\n def remaining_choices(select, nr, choices):\n return len(h[select][nr] & choices)\n\n choices = set(range(len(w)))\n while True:\n max_cost = {}\n if len(choices) > 1:\n for select in choices:\n cost = {}\n visited = set()\n for secret in choices:\n if select != secret:\n nr = n[select][secret]\n if nr not in visited:\n cost[secret] = remaining_choices(\n select, nr, choices)\n visited.add(nr)\n # print(\"select {}\".format(select), cost)\n # find the max cost among all the secrets\n max_cost[select] = max(cost.items(), key=lambda x: x[1])\n # print(\"per select max cost\", max_cost)\n mcost = {k: v[1] for k, v in max_cost.items()}\n # print(mcost)\n minmax = min(mcost.items(), key=lambda x: x[1])\n # print(minmax)\n # master.guess(w[minmax])\n selection = minmax[0]\n else:\n selection = list(choices)[0]\n\n offline = False\n if offline:\n my_secret = w[1]\n my_secret = \"hbaczn\"\n matches = self.guess(w[selection], my_secret)\n my_secret_index = w.index(my_secret)\n print(\n (\"Secret: {}, Index: {}, \" +\n \"Matches: {}, N: {}, |N|: {}\").format(\n my_secret, my_secret_index,\n n[selection][my_secret_index],\n h[selection][matches],\n len(h[selection][matches])))\n else:\n matches = master.guess(w[selection])\n\n if matches == 6:\n print(\"found\")\n break\n choices = h[selection][matches] & choices\n # print(\"Choices\", choices)\n # print(\"sel, worst cost: {}, matches, remain: {}\".format(\n # minmax, (matches, len(choices))))\n\n return w[selection]", "def fuzz():\n if FUZZ:\n time.sleep(random.random())", "def exercise_b2_39():\r\n pass", "def guess_a_number(guess=1):\n if guess != 4:\n raise Exception('WRONG!')\n\n print('You guessed correctly!')", "def computer_guess(x):\n\n low = 1 \n high = x\n response = \"\"\n\n while response != 'c':\n\n if high != low:\n guess = random.randint(low, high)\n else:\n guess = low \n print(f\"I guessed your number and it is...{guess}\")\n break\n\n response = input(f\"Is {guess} the number you guessed? Is it high(h), low(l) or correct(c)? \").lower()\n\n if response == 'h':\n high = guess - 1\n elif response == 'l':\n low = guess + 1\n elif response == 'c':\n print(f\"Look, I correctly guessed your number as {guess}\")\n else:\n print(\"You don't deserve to play this game. You are a retarded human who can't even follow rules.\")\n break", "def evaluate_my_number(guess, random_number):\n if guess < random_number:\n print('Too low!')\n else: \n print ('Too high!')\n guess = check_raw()\n return guess", "def check_guess(self):\n if self.guess in self.guessed[8:]:\n self.entry.delete(0, END)\n return\n self.guessed += self.guess\n self.guesses.set(self.guessed)\n if self.guessed[-1] not in self.word:\n self.strikes += 1\n self.change_image()\n else:\n self.word_form()\n\n if \"_\" in self.word_underscored:\n if self.strikes == 6:\n self.word_blank.set(self.word)\n self.guesses.set(\"HANGMAN. YOU LOSE.\")\n\n if ''.join(self.word_underscored) == self.word:\n self.guesses.set(\"You WIN!\")\n self.word_blank.set(self.word)\n\n self.entry.delete(0, END)", "def guess_number():\n searched_number = random.randint(1, 10)\n while True:\n try:\n users_number = int(input(\"Guess the number: \"))\n except ValueError:\n print(\"It's not a number!\")\n continue\n if users_number > searched_number:\n print(\"Too big!\")\n elif users_number < searched_number:\n print(\"Too small!\")\n else:\n return \"You win!\"", "def AIguessing(lijst):\n\n global Code\n global allcombos\n\n\n AIguess = choice(lijst)\n\n print(f\"The original code was {Code}\")\n print(f\"my guess this time is {AIguess}, how did I do?\")\n while not feedbackgiven:\n correct = int(input(\"Write down how many colors are in the right spot: \"))\n semicorrect = int(input(\"Write down how many colors are correct but not in the right spot: \"))\n\n feedback = correct + semicorrect\n if feedback <= 4:\n return NewFeedbackSystem(AIguess, correct, semicorrect, lijst)\n else:\n print(\"please use numbers 1-4 where the total <= 4\")\n continue", "def main():\n \n global m,n,a,b\n \n m=int(input(\"墙长\"))\n n=int(input(\"墙宽\"))\n a=int(input(\"砖长\"))\n b=int(input(\"砖宽\"))\n\n mn=[]\n plmy=[]\n\n anses=[]\n ans=[]\n htanses=[]#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n htans=[]#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n ans,anses,htans,htanses\n \n for i in range(m*n):\n mn.append(i)\n plmy.append(0)\n\n if a==b:\n if m%a!=0 or n%a!=0:\n anses=[]\n else:\n zhuan=[]\n for i in range(m//a):\n for j in range(n//a):\n for k in range(a):\n for ki in range(a):\n zhuan.append(mn[i*a+j*a*m+k+ki*m])\n ans.append(tuple(zhuan))\n zhuan=[]\n htans.append(((i*a+j*a*m),(i*a+j*a*m)+a-1,(i*a+j*a*m)+(a-1)*m,(i*a+j*a*m)+(a-1)*m+a-1))\n anses.append(ans.copy())\n htanses.append(htans.copy())\n else:\n dg(mn,plmy,ans,anses,htans,htanses)\n\n print(\"共有方案数\",len(anses))\n if len(anses)!=0:\n i=input(\"查看详细结果(输入任意键)?可直接回车跳过\")\n if i==\"\":\n pass\n else:\n for j in range(len(anses)):\n print(\"方案%d\"%(j+1),anses[j])\n fa=int(input('想可视化的方案'))\n ksh(fa-1,alex,htanses) \n else:\n pass", "def evaluate_guess(secret_word, guesses, ip):\n if len(ip) > 1:\n return \"Only single letter guesses\", False\n if not i.islpha():\n return \"Only alphabet\", False\n if ip in guesses:\n return \"Already guessed {}\".format(ip), False\n\n return \"\", True", "def guess_the_number():\n # get a random number from 1 to 1000\n number = random.randrange(1, 1000)\n\n guess = 0\n gcounter = 0\n # compare guess and selected number\n while guess != number:\n # get user input\n guess = int(input('Guess my number between 1 to 1000: '))\n # compare with number\n if guess > number:\n print('Too high. Try again')\n gcounter += 1\n elif guess < number:\n print('Too low. Try again')\n gcounter += 1\n else:\n # if equal, congratulate the user\n print('Congratulations, you guessed the number!')\n print(f'You used {gcounter} guesses')\n # check the number of guesses and provide feedback\n if gcounter > 10:\n print('You should be able to do better')\n else:\n print('Either you know the secret or you got lucky.')\n # give the option to restart the game or quit.\n response = input((\"Would you like to play it again? \"\n \"('yes' or 'no'): \"))\n # check user response\n if response == 'yes':\n number = random.randrange(1, 100)\n guess = 0\n gcounter = 0\n elif response == 'no':\n print('Bye.')\n break\n else:\n print('Invalid response. Quitting...')\n break", "def hairness_mcstern_conflict(d, b, z):\n\n print('Your Hyuck is not good enough...')\n\n print(\"memes are overrated\")\n return None", "def NewFeedbackSystem(guess, correct, semicorrect, lijst):\n\n global allcombos\n global usedcombos\n global all_right\n\n\n feedback = correct + semicorrect\n\n usedcombos.append(guess)\n\n if not allright: #needs an extra way to AT LEAST get the same feedback as previous one\n\n if feedback == 4: #takes all letters in the code and checks for possible new combinations, adds them to the list\n for j in range(1):\n A = guess[j]\n B = guess[j + 1]\n C = guess[j + 2]\n D = guess[j + 3]\n\n results = permutations(f\"{A}{B}{C}{D}\", 4)\n newcombos = resulttolist(results)\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n all_right = True\n return AIguessing(newcombos)\n\n elif feedback == 3: #takes all letters in the code and checks for possible new combinations with >= 3 from previous code, adds them to the list\n results = permutations(guess, 3)\n newresult = resulttolist(results, feedback)\n\n return compareWithAll(newresult, lijst, feedback)\n\n elif feedback == 2:\n #takes all letters in the code and checks for possible new combinations with >= 2 from previous code, adds them to the list\n results = permutations(guess, 2)\n newresult = resulttolist(results, feedback)\n\n return compareWithAll(newresult, lijst, feedback)\n\n elif feedback == 1:\n #takes all letters in the code and checks for possible new combinations with >= 1 from previous code, adds them to the list\n results = combinations(guess, 1)\n newresult = list(dict.fromkeys(resulttolist(results)))\n\n\n return compareWithAll(newresult, lijst)\n\n else:\n #takes all letters in the code and checks for possible new combinations WITHOUT these letters, adds them to the list\n newletterlist = [item for item in letters if item not in guess] #creates a new list with letters that weren't used\n newletters = \"\".join(newletterlist)\n\n results = product(newletters, repeat=4)\n newcombos = resulttolist(results)\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n return AIguessing(newcombos)\n\n else: #if all letters were guessed correctly\n\n if correct == 4:\n return (\"Well played Human, but I win this time\")\n\n elif correct == 2: #in a 2,2 case, checks which combinations are possible while keeping 2 on the same spot each time\n\n results = permutations(guess, 2)\n newresult = resulttolist(results, feedback)\n\n return compareWithAll(newresult, lijst, feedback)\n\n elif correct == 1: #in a 1,3 case, creates a list with still possible combinations (since there'll be only 8, it's hardcoded in here)\n for j in range(1):\n A = guess[j]\n B = guess[j + 1]\n C = guess[j + 2]\n D = guess[j + 3]\n\n newcombos = [f\"{A}{C}{D}{B}\", f\"{A}{D}{B}{C}\", f\"{C}{B}{D}{A}\", f\"{D}{B}{A}{C}\", f\"{B}{D}{C}{A}\", f\"{D}{A}{C}{B}\", f\"{B}{C}{A}{D}\", f\"{C}{A}{B}{D}\"]\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n return AIguessing(newcombos)\n\n else:\n for j in range(1):\n A = guess[j]\n B = guess[j + 1]\n C = guess[j + 2]\n D = guess[j + 3]\n\n results = permutations(f\"{A}{B}{C}{D}\", 4)\n newcombos = resulttolist(results)\n newcombos = [item for item in newcombos if item not in usedcombos]\n\n return AIguessing(newcombos)", "def one_turn(number, request):\r\n if not request:\r\n return (False, \"goodbye\")\r\n else:\r\n return check_guess(number, int(request))", "def not_number_rejector(message):\n actual_number = False\n\n while not actual_number:\n guess = str(input(message))\n if guess.isdigit():\n actual_number = True\n return int(guess)\n else:\n print(\"Not a number\")", "def algorithm_loop(self):", "def heuristiek(vierhidden, totalecombinatie, combinatie):\r\n zet = 1\r\n while zet <= 100:\r\n valid = validatecombination(vierhidden, combinatie)\r\n if valid[0] == 4:\r\n return 'De computer heeft jouw geheime code geraden ' + str(zet) + ' zetten!'\r\n else:\r\n mogelijke = verminderen(totalecombinatie, combinatie, valid)\r\n combinatie = mogelijke[random.randint(0, len(mogelijke) - 1)]\r\n zet += 1\r\n if zet > 100:\r\n return 'je hebt gewonnen van de computer.'", "def guessnum3(num):\n low = 1 # lowest number we could guess\n high = 101 # highest number plus 1\n tries = 0\n\n # use a for loop instead of a while\n # guarantees we won't get stuck\n for _ in range(100): # we can replace the i with an '_' because we don't care about using the index\n my_guess = (low+high) // 2 # this is the mean rounded down\n tries += 1\n if my_guess == num:\n return tries # breaks loop\n elif my_guess > num:\n high = my_guess # this readjusts the higher portion of the halving algorithm\n else: # when your guess is lower than the number\n low = my_guess + 1 # readjusts the lower portion of the halving algorithm", "def solution(s):", "def run(self):\n try:\n if self.guess:\n self.from_all()\n return\n\n if self.unix:\n result, indiv_output, combined_output, reason = self.from_unix_sec()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.umil:\n result, indiv_output, combined_output, reason = self.from_unix_milli()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.wh:\n result, indiv_output, combined_output, reason = self.from_win_64_hex()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.whle:\n result, indiv_output, combined_output, reason = self.from_win_64_hexle()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.chrome:\n result, indiv_output, combined_output, reason = self.from_chrome()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.active:\n result, indiv_output, combined_output, reason = self.from_ad()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.uhbe:\n result, indiv_output, combined_output, reason = self.from_unix_hex_32be()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.uhle:\n result, indiv_output, combined_output, reason = self.from_unix_hex_32le()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.cookie:\n result, indiv_output, combined_output, reason = self.from_cookie()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.oleb:\n result, indiv_output, combined_output, reason = self.from_ole_be()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.olel:\n result, indiv_output, combined_output, reason = self.from_ole_le()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.mac:\n result, indiv_output, combined_output, reason = self.from_mac()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.hfsdec:\n result, indiv_output, combined_output, reason = self.from_hfs_dec()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.hfsbe:\n result, indiv_output, combined_output, reason = self.from_hfs_be()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.hfsle:\n result, indiv_output, combined_output, reason = self.from_hfs_le()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.fat:\n result, indiv_output, combined_output, reason = self.from_fat()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.msdos:\n result, indiv_output, combined_output, reason = self.from_msdos()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.systime:\n result, indiv_output, combined_output, reason = self.from_systime()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.ft:\n result, indiv_output, combined_output, reason = self.from_filetime()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.hotmail:\n result, indiv_output, combined_output, reason = self.from_hotmail()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.pr:\n result, indiv_output, combined_output, reason = self.from_prtime()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.auto:\n result, indiv_output, combined_output, reason = self.from_ole_auto()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.ms1904:\n result, indiv_output, combined_output, reason = self.from_ms1904()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.ios:\n result, indiv_output, combined_output, reason = self.from_ios_time()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.sym:\n result, indiv_output, combined_output, reason = self.from_sym_time()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.gps:\n result, indiv_output, combined_output, reason = self.from_gps_time()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.eitime:\n result, indiv_output, combined_output, reason = self.from_eitime()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.bplist:\n result, indiv_output, combined_output, reason = self.from_bplist()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.gsm:\n result, indiv_output, combined_output, reason = self.from_gsm()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.vm:\n result, indiv_output, combined_output, reason = self.from_vm()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.tiktok:\n result, indiv_output, combined_output, reason = self.from_tiktok()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.twitter:\n result, indiv_output, combined_output, reason = self.from_twitter()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.discord:\n result, indiv_output, combined_output, reason = self.from_discord()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.ksuid:\n result, indiv_output, combined_output, reason = self.from_ksuid()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.mastodon:\n result, indiv_output, combined_output, reason = self.from_mastodon()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.meta:\n result, indiv_output, combined_output, reason = self.from_metasploit()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.sony:\n result, indiv_output, combined_output, reason = self.from_sony()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.uu:\n result, indiv_output, combined_output, reason = self.from_uuid()\n if indiv_output is False:\n print(reason)\n else:\n print(indiv_output)\n if self.timestamp:\n self.to_timestamps()\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))", "def test_griewank_works_fine(self):\n\t\thsaba_griewank = self.algo(NP=10, seed=self.seed)\n\t\thsaba_griewankc = self.algo(NP=10, seed=self.seed)\n\t\tAlgorithmTestCase.test_algorithm_run(self, hsaba_griewank, hsaba_griewankc)", "def gguess(x,y,xr,yr,xsgn,ysgn):\n \n # Bad until proven okay \n guesspar = None\n guessx = None \n guessy = None\n \n # Making sure it's the right structure \n #tags = tag_names(*(!gstruc.data)) \n #if (len(tags) != 6) : \n # return guesspar,guessx,guessy \n #comp = (tags == ['X','Y','RMS','NOISE','PAR','SIGPAR']) \n #if ((where(comp != 1))(0) != -1) :\n # return guesspar,guessx,guessy \n \n # Saving the originals \n orig_x = x \n orig_y = y \n\n if xr is None:\n xr = [0,2000]\n if yr is None:\n yr = [0,2000] \n \n # Is the x range continuous??\n # Assume not continuous in general\n cont = 0 \n \n # getting the p3 and p4 positions \n # P3 back in x (l-0.5), same y \n # P4 back in y (b-0.5), same x \n x3,y3 = gincrement(x,y,xr,yr,xsgn=-xsgn,ysgn=-ysgn)\n x4,y4 = gincrement(x,y,xr,yr,xsgn=xsgn,ysgn=-ysgn,p2=True)\n \n # CHECKING OUT THE EDGES \n # AT THE LEFT EDGE, and continuous, Moving RIGHT, use neighbor on other side \n # Use it for the guess, but never move to it directly \n if (x == xr[0]) and (xsgn == 1) and (cont == 1): \n y3 = y \n x3 = xr[1] \n \n # AT THE RIGHT EDGE, and continuous, Moving LEFT \n if (x == xr[1]) and (xsgn == -1) and (cont == 1): \n y3 = y \n x3 = xr[0] \n \n # At the edge, NOT continuous, Moving RIGHT, NO P3 NEIGHBOR \n if (x == xr[0]) and (xsgn == 1) and (cont == 0): \n x3 = None \n y3 = None \n \n # At the edge, NOT continuous, Moving LEFT, NO P3 NEIGHBOR \n if (x == xr[1]) and (xsgn == -1) and (cont == 0): \n x3 = None\n y3 = None\n \n # Have they been visited before? \n p3,res3 = gfind(x3,y3)\n p4,res4 = gfind(x4,y4)\n \n # Comparing the solutions \n b34,dbic34 = gbetter(res3,res4)\n \n # selecting the best guess \n if (dbic34<0): # using P3 \n guesspar = res3['par']\n guessx = x3\n guessy = y3 \n if (dbic34>=0): # using P4 \n guesspar = res4['par']\n guessx = x4 \n guessy = y4 \n if np.isfinite(dbic34)==False:\n guesspar = None\n guessx = None \n guessy = None\n \n # Putting the originals back \n x = orig_x \n y = orig_y \n\n return guesspar,guessx,guessy", "def play_a_game(strategy, word):\n guesses = []\n state_of_play = \"\"\n for i in range(len(word)):\n state_of_play = state_of_play + \" \"\n print(state_of_play)\n while state_of_play != word:\n letter = strategy.play_round(state_of_play,guesses)\n guesses.append(letter)\n print(letter)\n guess_success = False\n for i in range(len(word)):\n if letter == word[i]:\n state_of_play = state_of_play[0:i] + letter + state_of_play[i+1:]\n print(\"State of game: \" + state_of_play)\n guess_success = True\n if not guess_success:\n strategy.made_mistake()\n return strategy.mistakes", "def main():\r\n _evaluative_test(5)\r\n _fuzz_test(1)\r\n _fuzz_test(1, 512)\r\n _fuzz_test(1, 1512)\r\n _fuzz_test(1000)\r\n _fuzz_test(1000, 512)\r\n _fuzz_test(1000, 4077)", "def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch", "def get_input(self, guess):\r\n print\r\n print \"The player guessed = \", guess\r\n result = self.process_player_input(guess)\r\n print result\r\n if ((self.remaining_guesses == 0) or ( result == self.correctguess_message)):\r\n # Start a new game, with same range\r\n self.init(self.num_range)\r\n return result", "def go(self, comp):\n self.attempt = 1\n number = random.randint(1, 20)\n\n comp.call(util.Confirm('I choose a number between 1 and 20. Try to guess it'))\n\n while True:\n x = comp.call(util.Ask('Try #%d: ' % self.attempt))\n if not x.isdigit():\n continue\n\n x = int(x)\n\n if x > number:\n comp.call(util.Confirm('Choose a lower number'))\n\n if x < number:\n comp.call(util.Confirm('Choose a greater number'))\n\n if x == number:\n comp.call(util.Confirm(self.final_text % self.attempt))\n break\n\n self.attempt += 1", "def start():\n\n word = input(\"Please enter the word to be guessed: \").lower()\n while ( len(word) < MIN_WORD_LENGTH ) or ( len(word) > MAX_WORD_LENGTH ):\n word = input(\"Error, word must be > 5 and < 11 letters. Enter again: \").lower()\n\n game_end = False\n guess_letter = \"\"\n found_list = [\"\"] * len(word) # correct guesses\n guess_list = [] # failed guesses (all other guesses)\n hang_state = 0\n while not game_end: #while game is running\n game_end = gui(word,found_list, guess_list, hang_state)\n if game_end == False:\n guess_letter = input(\"Guess the next letter: \").lower() # Guess a letter\n while(len(guess_letter) > 1 or len(guess_letter) < 1):\n print(\"The letter must be length 1\")\n guess_letter = input(\"Guess the next letter: \").lower() # Guess a letter\n while guess_letter in guess_list or guess_letter in found_list:\n print(\"You already guessed this letter!\")\n guess_letter = input(\"Guess the next letter: \").lower() # Guess a letter\n for charnum in range(len(word)): #for each letter in the word\n \n if guess_letter == word[charnum]:\n if found_list[charnum] == \"\":\n found_list[charnum] = guess_letter.lower()\n else:\n if charnum == (len(word) - 1) and guess_letter not in found_list:\n print(\"That letter does not exist in the word...\")\n guess_list.append(guess_letter.lower())\n hang_state += 1\n finalString = ''.join(found_list)\n if str(finalString) == str(word):\n print(\"You got it! The word was \" + word + \"!\")\n game_end = True", "def new_round(guesses, letters_guessed = letters_guessed):\n\n # print(get_guessed_word(secret_word, letters_guessed) )\n print(\"You have \" + str(guesses) + \" guesses left.\")\n print(\"Available letters: \" + get_available_letters(letters_guessed))\n ans = input(\"Please guess a letter: \")\n if ans.isalpha():\n return ans.lower()\n else:\n return None", "def dance(self):\n if not self.safe_to_dance():\n return False #shutdown\n for x in range(4): \n self.shuffle()\n self.skipp()\n self.spin_dizzy()\n self.for_back()\n self.break_neck()\n self.swiggly()\n self.break_neck()\n self.backward_shimmey()", "def chooseAttack(opponents_board):\r\n while True:\r\n guess = int(raw_input(\"choose a number between 0 through 8 to attack::\"))\r\n if guess < 0 or guess >8:\r\n continue\r\n result = checkIfHitOrMiss(guess, opponents_board)\r\n\r\n\r\n if result == \"hit\" or result == \"miss\":\r\n break\r\n\r\n if checkIfSunk(opponents_board):\r\n return \"sunk\"\r\n\r\n return result", "def get_choice(attempt):\n try:\n user_text=''\n\n if attempt ==1:\n user_text ='Guess a number between 0 and 99:'\n \n choice = int(input(user_text))\n except ValueError:\n return get_choice()\n return choice", "def check_if_guessed(the_guess, word_to_guess):\n\treturn word_to_guess.find(the_guess)", "def get_guess():\n letter = input(\"Please input a letter to check\").lower()\n if len(letter) != 1:\n print(\"Please input a single letter\")\n get_guess()\n elif letter not in \"abcdefghijklmnopqrstuvxyz\":\n print (\"Only input letters\")\n get_guess()\n else:\n return letter", "def playGameplus(wordList):\n #选择游戏模式\n global la_st2\n n = 0\n print '请选择你想进行的模式:a:单人 c:人机 e: 退出游戏'\n while True:\n order9 = raw_input('>>>').lower()\n if (order9 == 'a') or (order9 =='c'):\n moudl = True\n break\n elif order9 == 'e':\n moudl = False\n print '游戏已退出'\n print ' = ' * 20\n break\n else:\n print '命令有误,请重新输入'\n if moudl:\n print 'n:新的游戏 r:重开上局 e:退出'\n order8 = raw_input('>>>').lower() \n while True:\n if order8 == 'n':\n while True:\n n = raw_input('你想获取的字母数(大于4个):')\n while True:\n try:\n n = int(n)\n if n > 4:\n break\n except ValueError,e:\n print '输入有误!'\n if order9 == 'a':\n hand = dealHand(n)\n la_st = copy.deepcopy(hand)\n playHand(hand, wordList, n)\n elif order9 == 'c':\n hand = dealHand(n)\n la_st = copy.deepcopy(hand)\n playHandplus(hand, wordList, n)\n if order8 == 'r':\n if la_st2 and (order9 == 'a'):\n playHand(la_st, wordList, n)\n elif la_st2 and (order9 == 'c'):\n playHandplus(hand, wordList, n)\n elif not la_st2:\n print '您没有上局存档,请重新输入指令:'\n order8 = raw_input('>>>').lower()\n if order8 == 'e':\n print '游戏结束'\n break\n if not order8 in ['r','n','e'] or order8 == '':\n print '请重新输入指令:'\n order8 = raw_input('>>>').lower()", "def exercise_b2_53():\r\n pass", "def main():\n print(\"\\tWelcome to 'Guess My Number (Edit)'!\")\n print(\"\\nI'm thinking of a number between 1 and 100.\")\n print(\"Try to guess it in as few attempts as possible.\\n\")\n\n # set the initial values\n the_number = random.randint(1, 100)\n guess = ask_number(\"Take a guess: \", 1, 100)\n tries = 10\n\n # guessing loop\n while guess != the_number:\n tries -= 1\n if tries <= 0:\n break\n if guess > the_number:\n print(\"Lower...\")\n else:\n print(\"Higher...\")\n guess = ask_number(\"Take a guess: \", 1, 100)\n\n if guess == the_number:\n print(\"You guessed it! The number was\", the_number)\n print(\"And it only took you\", tries, \"tries!\\n\")\n\n elif tries <= 0:\n print(\"\\nSorry, you're out of tries. Better luck next time!.\")", "def play_hangman(self):\n while self.stage < 6:\n self.display_hangman()\n guess = input(f'{Fore.YELLOW}Choose a letter: {Style.RESET_ALL}').lower().strip() # noqa\n print('\\n')\n if guess.isalpha() and len(guess) == 1:\n if guess not in self.word:\n if guess in self.guessed_letters:\n print(f'You already guessed {guess}, try again')\n print('\\n')\n else:\n print(f'{Fore.RED}{guess} is not in the word, try again{Style.RESET_ALL}') # noqa\n print('\\n')\n self.stage += 1\n self.guessed_letters.append(guess)\n elif guess.isalpha() and guess in self.word:\n if guess in self.guessed_letters:\n print(f'You already guessed {guess}, try again')\n print('\\n')\n else:\n print(f'{Fore.GREEN}{guess} is in the word!{Style.RESET_ALL}') # noqa\n print('\\n')\n self.guessed_letters.append(guess)\n # code for replacing dashes with letters adapted from # noqa\n # https://github.com/kiteco/python-youtube-code/blob/master/build-hangman-in-python/hangman.py\n word_as_list = list(self.progress)\n indices = [i for i, letter in enumerate(self.word) if letter == guess] # noqa\n for index in indices:\n word_as_list[index] = guess\n self.progress = \"\".join(word_as_list)\n if \"-\" not in self.progress:\n print(f'{Fore.GREEN}Congrats! You correctly guessed the answer: {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.games_won += 1\n break\n\n elif guess.isalpha() and guess == self.word:\n print(f'{Fore.GREEN}Congrats! You correctly guessed the answer: {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.games_won += 1\n break\n\n elif guess.isalpha() and guess not in self.word and guess in self.guessed_words: # noqa\n print(f'You already guessed {guess}, try again')\n print('\\n')\n\n elif guess.isalpha() and guess not in self.word and guess not in self.guessed_words: # noqa\n print(f'{Fore.RED}{guess} is not the word, try again{Style.RESET_ALL}') # noqa\n print('\\n')\n self.stage += 1\n self.guessed_words.append(guess)\n print('\\n')\n else:\n print('Invalid input \\n')\n if self.stage >= 6:\n print(Fore.CYAN + HANGMAN_PICS[self.stage])\n print('\\n')\n print(f'{Fore.RED}Game Over! The word was {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.play_again()" ]
[ "0.6238723", "0.62207544", "0.5964192", "0.594622", "0.5904978", "0.5902772", "0.585103", "0.581829", "0.5811916", "0.57857025", "0.5780752", "0.57535255", "0.57274103", "0.5718406", "0.57046616", "0.5671099", "0.5660733", "0.5626499", "0.56193167", "0.56131303", "0.5610858", "0.5606759", "0.5593693", "0.55798304", "0.5575135", "0.5518858", "0.5505174", "0.55022997", "0.55021596", "0.54712075", "0.546668", "0.54634744", "0.54623306", "0.54552037", "0.54463005", "0.54327375", "0.5429117", "0.542562", "0.5424466", "0.5417775", "0.5404893", "0.5395461", "0.5388907", "0.53782326", "0.53705376", "0.5366631", "0.53533775", "0.53399706", "0.5337086", "0.53300226", "0.5318004", "0.5295682", "0.52941024", "0.528812", "0.5283988", "0.5269931", "0.52426976", "0.524078", "0.5234313", "0.5233008", "0.5227922", "0.52272964", "0.5225665", "0.52218705", "0.5220627", "0.5220211", "0.52188635", "0.5212902", "0.5212804", "0.5212736", "0.5211798", "0.52069265", "0.52059186", "0.5202448", "0.5201077", "0.5197005", "0.5193895", "0.5188811", "0.51834655", "0.51823497", "0.51791763", "0.51770467", "0.5173501", "0.5166146", "0.5164817", "0.5164048", "0.5164011", "0.5163543", "0.5162683", "0.51613766", "0.5153554", "0.51504034", "0.51487267", "0.51431346", "0.51426935", "0.51322085", "0.5128117", "0.5125054", "0.51137304", "0.511281", "0.51119787" ]
0.0
-1
Given the IP ADDRESS of the camera, to which you are connected and ACQUISITION MODE into which you want to put the camera, this command will send the according request to the camera.
def command(mode, ip, log): logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging_config[log]) # Using the default dict to get a valid format string no matter what phantom_socket = PhantomSocket(ip) phantom_socket.connect() click.echo('CONNECTED TO THE PHANTOM CAMERA') mode_identifier = _modes[mode] phantom_socket.set_mode(mode_identifier) click.echo('PHANTOM WILL TRANSIT INTO THE MODE "%s" NOW!' % mode_identifier) click.echo('THIS WILL CAUSE A REBOOT OF THE CAMERA, SO PLEASE HAVE PATIENCE') click.echo('IN CASE A CONNECTION CANNOT BE ESTABLISHED EVEN AFTER SOME TIME, HARD RESET THE CAMERA') click.echo('AFTER THE HARD RESET, THE MODE SHOULD BE CHANGED') phantom_socket.disconnect()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def camera_control(camera_host, camera_port, camera_user, camera_pass, q):\n\n try:\n camera = IPCamera(camera_host, camera_port, camera_user, camera_pass)\n q.put(camera.get_rtsp_url())\n except RuntimeError as exc:\n q.put(exc)\n\n try:\n while True:\n camera.move_to(*q.get())\n except KeyboardInterrupt:\n pass", "def event_btn_confirm_ip(self):\n\n print(\"attempting to open camera\")\n self.change_state(States.ACTIVATE_CAMERA)", "def camstart():\n\n\trespond = send_command('camstart')", "def __init__(self, local_ip, local_port, command_timeout=7, tello_ip='192.168.10.1',\r\n tello_port=8889):\r\n \r\n self.abort_flag = False\r\n self.command_timeout = command_timeout\r\n self.response = None \r\n\r\n self.frame = None # numpy array BGR -- current camera output frame\r\n self.last_frame = None\r\n\r\n # self.cap = cv2.VideoCapture(\"udp://@0.0.0.0:11111\")\r\n\r\n self.tello_ip = tello_ip\r\n self.tello_address = (tello_ip, tello_port)\r\n \r\n # Commands\r\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd\r\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n self.socket.bind((local_ip, local_port))\r\n\r\n # thread for receiving cmd ack\r\n self.receive_thread = threading.Thread(target=self._receive_thread)\r\n self.receive_thread.daemon = True\r\n self.receive_thread.start()\r\n\r\n self.socket.sendto(b'command', self.tello_address)\r\n\r\n # Video\r\n # self.socket_video = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for receiving video stream\r\n # self.local_video_port = 11111 # port for receiving video stream\r\n # self.socket_video.bind((local_ip, self.local_video_port))\r\n\r\n # thread for receiving video\r\n # self.receive_video_thread = threading.Thread(target=self._receive_video_thread)\r\n # self.receive_video_thread.daemon = True\r\n # self.receive_video_thread.start() \r\n\r\n # to receive video -- send cmd: command, streamon\r\n self.socket.sendto(b'streamon', self.tello_address)\r\n\r\n self.stream_state = True\r\n\r\n # TELLO STATE\r\n self.state = {}\r\n\r\n self.socket_state = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for receiving state\r\n self.state_port = 8890 # port for receiving state\r\n self.socket_state.bind((local_ip, self.state_port))\r\n\r\n # thread for receiving state\r\n self.receive_state_thread = threading.Thread(target=self._receive_state_thread)\r\n self.receive_state_thread.daemon = True\r\n self.receive_state_thread.start() \r\n\r\n self.socket_state.sendto('command'.encode('utf-8'), self.tello_address)", "def camera_start(self):\n mycam = ONVIFCamera(self.__cam_ip, 80, self.__cam_user, self.__cam_password)\n logging.info('Create media service object')\n media = mycam.create_media_service()\n logging.info('Get target profile')\n media_profile = media.GetProfiles()[0]\n logging.info('Camera working!')\n\n self.mycam = mycam\n self.camera_media_profile = media_profile\n self.camera_media = media\n self.mycam = mycam\n\n return self.mycam", "def pibooth_setup_camera(cfg):", "def command(self, value):\n for ii in range(0, len(exposure_mode_names)):\n if value == exposure_mode_names[ii]: break\n self.tcp_comms.tcp_params.exposureMode = ii\n self.tcp_comms.send_exposure_mode(self.tcp_comms.tcp_params.exposureMode)", "def set_camera_module(action):\n\n endpoint = CAMERA_CAPTURE_URL + \"/camera/\" + action\n if DEBUG:\n print(\"Calling endpoint '%s'\" % endpoint)\n\n response = requests.post(endpoint)\n \n if DEBUG:\n print(\"Call to endpoint '%s' returned status code %s. Reason: %s\" % (endpoint, str(response.status_code), response.content))", "def change_IP(self,server_IP,MAC):\n content = {'server_IP':server_IP,'MAC_address':MAC}\n content = json.dumps(content)\n headers = {\"Content-Type\":\"application/json\"}\n #address will be given by the api\n r = requests.post(f\"http://{self.webserver_address}/api/camera/update_ip\", data = content,headers = headers,verify=False)\n if(r.status_code == 200):\n return True\n return False", "def setMode(self, request, context):\n \n self.vehicle.mode = VehicleMode(str(request.mode))\n self.vehicle.wait_ready('mode')\n \n return droneconnect_pb2.Null()", "def initialCamera(self, cmd):\n\n pass", "def run_single_camera(cam):\n\n try:\n # Retrieve TL device nodemap and print device information\n #nodemap_tldevice = cam.GetTLDeviceNodeMap()\n\n #result &= print_device_info(nodemap_tldevice)\n\n # Initialize camera\n cam.Init()\n\n # Retrieve GenICam nodemap\n nodemap = cam.GetNodeMap()\n exposures=[2000,4000,8000,16000]\n index=0\n if cam.ExposureAuto.GetAccessMode() != PySpin.RW:\n print(\"Unable to disable automatic exposure. Aborting...\")\n return False\n node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode(\"AcquisitionMode\"))\n if not PySpin.IsAvailable(node_acquisition_mode) or not PySpin.IsWritable(node_acquisition_mode):\n print(\"Unable to set acquisition mode to continuous (enum retrieval). Aborting...\")\n return False\n\n # Retrieve entry node from enumeration node\n node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName(\"Continuous\")\n if not PySpin.IsAvailable(node_acquisition_mode_continuous) or not PySpin.IsReadable(node_acquisition_mode_continuous):\n print(\"Unable to set acquisition mode to continuous (entry retrieval). Aborting...\")\n return False\n\n acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()\n\n node_acquisition_mode.SetIntValue(acquisition_mode_continuous)\n\n print(\"Acquisition mode set to continuous...\")\n\n cam.ExposureAuto.SetValue(PySpin.ExposureAuto_Off)\n '''\n # Set maximum width\n #\n # *** NOTES ***\n # Other nodes, such as those corresponding to image width and height,\n # might have an increment other than 1. In these cases, it can be\n # important to check that the desired value is a multiple of the\n # increment.\n #\n # This is often the case for width and height nodes. However, because\n # these nodes are being set to their maximums, there is no real reason\n # to check against the increment.\n if cam.Width.GetAccessMode() == PySpin.RW and cam.Width.GetInc() != 0 and cam.Width.GetMax != 0:\n cam.Width.SetValue(FRAME_WIDTH)\n print(\"Width set to %i...\" % cam.Width.GetValue())\n\n else:\n print(\"Width not available...\")\n result = False\n\n # Set maximum height\n #\n # *** NOTES ***\n # A maximum is retrieved with the method GetMax(). A node's minimum and\n # maximum should always be a multiple of its increment.\n if cam.Height.GetAccessMode() == PySpin.RW and cam.Height.GetInc() != 0 and cam.Height.GetMax != 0:\n cam.Height.SetValue(FRAME_HEIGHT)\n print(\"Height set to %i...\" % cam.Height.GetValue())\n\n else:\n print(\"Height not available...\")\n result = False\n '''\n print(\"Automatic exposure disabled...\")\n #node_acquisition_framerate = PySpin.CFloatPtr(nodemap.GetNode(\"AcquisitionFrameRate\"))\n\n # if not PySpin.IsAvailable(node_acquisition_framerate) and not PySpin.IsReadable(node_acquisition_framerate):\n # print(\"Unable to retrieve frame rate. Aborting...\")\n # return False\n\n # framerate_to_set = node_acquisition_framerate.GetValue()\n\n # print(\"Frame rate to be set to %d...\" % framerate_to_set)\n canvas=np.zeros((FRAME_HEIGHT*2,FRAME_WIDTH*2,3), np.uint8)\n while True:\n exposure=exposures[index]\n \n configure_exposure(cam, exposure)\n # Acquire images\n err, img,width,height = acquire_images(cam, nodemap)\n if err < 0:\n return err\n\n \n img = img.GetData().reshape(height,width,3)\n\n half_height = int(height/2)\n half_width = int(width/2)\n half_frame_height = int(FRAME_HEIGHT/2)\n half_frame_width = int(FRAME_WIDTH/2)\n \n img = img[half_height-half_frame_height:half_height+half_frame_height,half_width-half_frame_width:half_width+half_frame_width]\n #smallimg=cv2.resize(img,(int(FRAME_WIDTH/2),int(FRAME_HEIGHT/2)))\n if index==0:\n #top left\n canvas[0:FRAME_HEIGHT,0:FRAME_WIDTH]=img\n elif index==1:\n #top right\n canvas[0:FRAME_HEIGHT,FRAME_WIDTH:FRAME_WIDTH*2]=img\n elif index==2:\n #bot left\n canvas[FRAME_HEIGHT:FRAME_HEIGHT*2,0:FRAME_WIDTH]=img\n else:\n #bot right\n canvas[FRAME_HEIGHT:FRAME_HEIGHT*2,FRAME_WIDTH:FRAME_WIDTH*2]=img\n index+=1\n if index>=len(exposures):\n index=0\n\n cv2.imshow(\"frame\",canvas)\n if cv2.waitKey(1) &0xff ==ord('q'):\n #stop the feed the 'q'\n break\n cv2.destroyAllWindows()\n # Deinitialize camera\n cam.DeInit()\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False", "def startCamera(self):\n if self.video == \"camera\":\n self.cap = cv2.VideoCapture(gstreamer_pipeline(\n capture_width=416, capture_height=416, flip_method=0), cv2.CAP_GSTREAMER)\n else:\n video_path = Path(self.video)\n if not video_path.exists():\n raise Exception(\"Video file not found\")\n self.cap = cv2.VideoCapture(str(video_path))", "def cozmo_app(coz_conn):\n coz = coz_conn.wait_for_robot()\n coz.camera.image_stream_enabled = True\n coz_ros = CozmoRos(coz)\n coz_ros.run()", "def camera(ctx, cam_id, analytic_addr, width, height):\n if not analytic_addr:\n analytic_addr = [\"localhost:50051\"]\n db = ctx.obj.db\n client = aceclient.AnalyticMultiClient()\n cap = cv2.VideoCapture(int(cam_id))\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, int(width))\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, int(height))\n classes = {}\n window_names = []\n f_req = analytic_pb2.FrameRequest()\n for a in analytic_addr:\n analytic = analytic_pb2.AnalyticData()\n analytic.addr = a\n f_req.analytics.append(analytic)\n try:\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n print(\"Stream unavailable. Exiting.\")\n break\n resp = analytic_pb2.CompositeResults()\n resp = client.process_frame(frame, f_req, resp)\n print(len(window_names))\n render(resp, window_names, classes, frame, db)\n finally:\n cv2.destroyAllWindows()\n print(\"Shutting down\")", "def sendCommand(self, command, code):\r\n if self.visprotocol is not None:\r\n self.visprotocol.RequestArm(command.lower(), code)", "def setCameraToCOM(self):\n pass", "def dst_nat_into_vrf():\n\t\n device_params = {\n 'device_type': 'mikrotik_routeros',\n 'port': '11209',\n 'username': 'admin'}\n \t\t\n device_params['ip'] = input('IP Address of managed device: ')\n nd_port = input('SSH port. Blank, if default(11209): ')\n if nd_port:\n device_params['port'] = nd_port\n nd_user = input('Username. Blank, if default (admin): ')\n if nd_user:\n device_params['username'] = nd_user\n device_params['password'] = getpass.getpass()\n outside_address = input('Put outside address for dstnat(default - 93.189.145.82): ')\n if not outside_address:\n outside_address = '93.189.145.82'\n #outside_int = input('Put outside interface (default - ether2(DC Kraud outside int)): ')\n #if not outside_port:\n # outside_port = 'ether2'\n outside_port_dstnat = input('Put outside port for dstnat(Public port): ')\n inside_port = input('Put destination port(only port):') \n inside_address = input('Put inside address for dstnat (Inside adress): ')\n commands = []\n commands.append(f'/ip firewall mangle add action=mark-connection chain=prerouting connection-state=new dst-address={outside_address} dst-port={outside_port_dstnat} in-interface=ether2 new-connection-mark=into-vrf passthrough=yes protocol=tcp comment=\"DST_NAT_MANGLE_RULE_BY_SCRIPT FOR LEAKING FROM VRF\"')\n commands.append(f'/ip firewall nat add action=dst-nat chain=dstnat comment=\"DST_NAT_MANGLE_RULE_BY_SCRIPT FOR LEAKING FROM VRF\" dst-address={outside_address} dst-port={outside_port_dstnat} in-interface=ether2 protocol=tcp to-addresses={inside_address} to-ports={inside_port}')\n \n with ConnectHandler(**device_params) as ssh:\n for comm in commands:\n ssh.send_command(comm)\n return print(f'\"{commands[0]}\" and \"{commands[1]}\" are sent to device')", "def start_cam(cam = 'pi1', host = ' ', port = ' '):\n try:\n # using systemd to manage daemons. {space} is for weird systemd escaping\n space = '\\\\\\\\x20'\n remote_command = f\"ssh -f {cam} systemctl --user restart picamera@'{host}.local{space}{port}'\" \n print(remote_command)\n os.system(remote_command)\n except Exception as exc:\n sys.exit(f'SSH connection to {cam} failed with: {exc}')", "def __init__(self):\n self.available_angles = [-30, -15, 0, 15, 30]\n self.ros_service = rospy.Service(\"turn_camera\", TurnCamera, self.send_image)", "def gen():\n global ASK_NAME\n curr_frame = 0\n user_id = None\n mask_on_off = None\n\n cap = cv2.VideoCapture(0)\n # cap = cv2.VideoCapture(\"rtsp://192.168.22.146:8554/mjpeg/1\")\n\n def detect_and_predict_mask(frame, maskNet):\n nonlocal mask_on_off\n face = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n face = cv2.resize(face, (224, 224))\n face = img_to_array(face)\n face = preprocess_input(face)\n\n faces = [face]\n faces = np.array(faces, dtype=\"float32\")\n\n preds = maskNet.predict(faces, batch_size=32)\n # print(preds)\n (mask, withoutMask) = preds[0]\n label = \"Mask\" if mask > withoutMask else \"No Mask\"\n label = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n if mask > withoutMask and mask > 0.9:\n print(label)\n mask_on_off = 1\n r = requests.post(f'http://{BASE_URL}:5000/mask', json={\"mask\": 1})\n if withoutMask > mask and withoutMask > 0.9:\n print(label)\n r = requests.post(f'http://{BASE_URL}:5000/mask', json={\"mask\": 0})\n mask_on_off = 0\n\n def mark_attendance(user_id_detected):\n nonlocal user_id\n print(user_id_detected, 'was seen')\n user_id = user_id_detected\n r = requests.post(\n f'http://{BASE_URL}:5000/face_info', json={\"name\": user_id})\n\n # Read until video is completed\n while CAM_ON:\n # Capture frame-by-frame\n ret, img = cap.read()\n mask_frame = imutils.resize(img, width=400)\n if ret:\n imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)\n imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)\n\n facesCurFrame = face_recognition.face_locations(imgS)\n encodesCurFrame = face_recognition.face_encodings(\n imgS, facesCurFrame)\n\n for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):\n matches = face_recognition.compare_faces(\n encodeListKnown, encodeFace)\n faceDis = face_recognition.face_distance(\n encodeListKnown, encodeFace)\n # print(faceDis)\n matchIndex = np.argmin(faceDis)\n\n if matches[matchIndex]:\n # name = classNames[matchIndex].upper()\n name = label_names[faceLabels[matchIndex]]\n y1, x2, y2, x1 = faceLoc\n y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n cv2.rectangle(img, (x1, y2 - 35), (x2, y2),\n (0, 255, 0), cv2.FILLED)\n cv2.putText(img, name.upper(), (x1 + 6, y2 - 12),\n cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\n detect_and_predict_mask(mask_frame, maskNet)\n mark_attendance(name)\n else:\n if curr_frame < FRAMES_TO_CAPTURE:\n print(\"Saving Frame\")\n cv2.imwrite(f\"{datetime.datetime.now()}.jpg\", img)\n curr_frame += 1\n else:\n ASK_NAME = True\n cap.release()\n\n frame = cv2.imencode('.jpg', img)[1].tobytes()\n yield b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n'\n time.sleep(0.01)\n else:\n break\n if user_id is not None and mask_on_off is not None:\n print(\"Stopping stream...\")\n cap.release()\n break", "def _start_vidmemwriter(self, camType, ip=None, inputres=\"640x480\", outputres=\"640x480\"):\n if not self.__vidmemwriter and not self.__server_mode:\n self.__vidmemwriter = vidmemwriter.VidMemWriter([], [])\n\n if camType in self.__video_sources:\n return True\n\n self.__logger.info(\"I'm starting %s\" % camType)\n\n if ros_pattern.match(camType):\n #The first 4 characters \"ros_\" identify that is a specific ros image\n #The second part *** in \"ros_***/topic\" is the encoding:\n topic = camType[4:]\n encoding = \"passthrough\"\n self.__logger.info(\"camType !!!!!! %s\" % camType)\n if not camType[4] == '/':\n str_list = camType.split(\"_\")\n topic = '_'.join(str_list[2:])\n encoding = str_list[1]\n ros_image_source = rosimage.RosImage(topic, encoding)\n\n if self.__server_mode:\n self.__register_video_source(camType, ros_image_source)\n else:\n self.__vidmemwriter.add_video_source(ros_image_source, camType)\n self.__video_sources.append(camType)\n self.__logger.info(\"rosimage started for topic: %s, with encoding: %s\" % (topic, encoding))\n return True\n elif camType == \"webcam\":\n self.__logger.debug(\"I'm starting webcam\")\n webcamsource = takeimages.TakeImages(self.__camera)\n img = webcamsource.get_image()\n if type(img) is type(\"\"):\n self.__logger.error(\"No camera found. Please check connection!\")\n return False\n\n if webcamsource.Nocamera:\n if self.__camera == -1:\n self.__logger.error(\"No camera found. Please check connection!\")\n else:\n self.__logger.error(\"Camera %d not found. Please check connection!\" % self.__camera)\n return False\n if self.__server_mode:\n self.__register_video_source('webcam', webcamsource)\n else:\n self.__vidmemwriter.add_video_source(webcamsource, \"webcam\")\n self.__video_sources.append(\"webcam\")\n self.__logger.info(\"Webcam started\")\n return True\n elif camType == 'kinect_openni':\n self.__logger.debug(\"I'm starting kinect using openni\")\n import util.openni_kinectvideo as kv\n depth_source = kv.OpenNIKinect(\"depth\")\n rgb_source = kv.OpenNIKinect(\"rgb\")\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n\n if self.__server_mode:\n self.__register_video_source('kinect_depth', depth_source)\n self.__register_video_source('kinect_rgb', rgb_source)\n else:\n self.__vidmemwriter.add_video_source(depth_source, \"kinect_depth\")\n self.__vidmemwriter.add_video_source(rgb_source, \"kinect_rgb\")\n\n self.__video_sources.append(\"kinect_depth\")\n self.__video_sources.append(\"kinect_rgb\")\n self.__video_sources.append(\"kinect\")\n self.__video_sources.append(\"kinect_openni\")\n \n self.__logger.info(\"Kinect started\")\n return True\n elif camType == 'kinect' or camType == 'kinect_rgb' or camType == 'kinect_depth':\n if self.__use_openni:\n self.__logger.info(\"I'm starting kinect using openni\")\n import util.openni_kinectvideo as kv\n depth_source = kv.OpenNIKinect(\"depth\")\n rgb_source = kv.OpenNIKinect(\"rgb\")\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n else:\n self.__logger.info(\"I'm starting kinect using freenect\")\n try:\n import util.kinectmemwriter\n except:\n self.__logger.error(\"Could not load kinectmemwriter module. Check modules.\")\n return False\n\n depth_source = util.kinectmemwriter.KinectDepthSource()\n rgb_source = util.kinectmemwriter.KinectRGBSource()\n\n try:\n depth_source.get_image()\n except:\n self.__logger.error(\"Kinect not found. Please check connection!\")\n return False\n\n if self.__server_mode:\n self.__register_video_source('kinect_depth', depth_source)\n self.__register_video_source('kinect_rgb', rgb_source)\n else:\n self.__vidmemwriter.add_video_source(depth_source, \"kinect_depth\")\n self.__vidmemwriter.add_video_source(rgb_source, \"kinect_rgb\")\n\n self.__video_sources.append(\"kinect_depth\")\n self.__video_sources.append(\"kinect_rgb\")\n self.__video_sources.append(\"kinect\")\n \n self.__logger.info(\"Kinect started\")\n return True\n elif camType == \"naovideo\":\n self.__logger.debug(\"I'm starting naovideo\")\n try:\n import util.naovideo as naovideo\n except:\n self.__logger.error(\"Could not load naovideo module. Check modules\")\n return False\n #get ip of nao:\n #TODO: fix this dirty hack (it should be read from the config file)\n naoip = \"129.125.178.232\"\n if ip:\n naoip = ip\n \n self.__logger.warn(\"Using input resolution %s and output resolution %s\" % (inputres, outputres))\n #use the naovideo module:\n if self.__camera != 0 and self.__camera != 1:\n self.__camera = 0\n try:\n naocamsource = naovideo.VideoModule(naoip, inputres, outputres, camera=self.__camera)\n naocamsource.get_image()\n except:\n self.__logger.error(\"Something went wrong using the camera of the nao (check connection!)\")\n traceback.print_exc()\n return False\n\n if self.__server_mode:\n self.__register_video_source('naovideo', naocamsource)\n else:\n self.__vidmemwriter.add_video_source(naocamsource, \"naovideo\")\n self.__video_sources.append(\"naovideo\")\n self.__nao_camera = naocamsource\n self.__logger.info(\"Naovideo started\")\n return True\n else:\n self.__logger.warning(\"Invalid video source specified: %s\" % camType)\n return False", "def __init__(self, source, ip='localhost', port=12345):\n self.ip = ip\n self.port = port\n self.frame = 1\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.frame_buffer = PriorityQueue()\n self.capture_thread = Thread(target=self.capture_video)\n self.sending_thread = Thread(target=self.send_video)\n self.capture_thread.setDaemon(True)\n self.sending_thread.setDaemon(True)\n self.capturing = False\n self.source = source\n self.addr = (self.ip, self.port)\n self.encode_param = [1, 90]#[int(cv2.IMWRITE_JPEG_QUALITY), 90]", "def send_rtsp_request(self, request_code):\n last_fileName = ''\n if request_code == self.PLAY:\n last_fileName = self.fileName\n self.fileName = self.movie_name_edit.text()\n if request_code == self.SETUP and self.state == self.INIT:\n threading.Thread(target=self.recv_rtsp_reply).start()\n self.rtsp_seq += 1\n self.rtp_port = int(self.rtp_port_edit.text())\n self.rtcp_port = int(self.rtcp_port_edit.text())\n request = 'SETUP ' + self.fileName + ' RTSP/1.0\\nCSeq: ' + str(\n self.rtsp_seq) + '\\nTransport: RTP/UDP; client_port= ' + str(self.rtp_port) + \\\n '\\nProtect: RTCP/TCP; rtcp_port= ' + str(self.rtcp_port)\n self.rtsp_command_send = request_code\n elif request_code == self.PLAY and self.state == self.READY:\n if last_fileName != self.fileName:\n self.play_seconds = 0\n self.curr_frame = 0\n self.time_label.setText('00:00:00')\n self.rtsp_seq += 1\n self.timer.start(1000)\n\n request = 'PLAY ' + self.fileName + ' RTSP/1.0\\nCSeq: ' + str(self.rtsp_seq) + '\\nlevel: ' + \\\n str(self.video_level) + '\\nSession: ' + str(self.session_id) + '\\nRange: ' + str(self.percent)\n self.rtsp_command_send = request_code\n elif request_code == self.PAUSE and self.state == self.PLAYING:\n self.rtsp_seq += 1\n request = 'PAUSE ' + self.fileName + ' RTSP/1.0\\nCSeq: ' + str(self.rtsp_seq) + '\\nSession: ' + str(\n self.session_id)\n self.rtsp_command_send = request_code\n elif request_code == self.TEARDOWN and not self.state == self.INIT:\n self.rtsp_seq += 1\n request = 'TEARDOWN ' + self.fileName + ' RTSP/1.0\\nCSeq: ' + str(self.rtsp_seq) + '\\nSession: ' + str(\n self.session_id)\n self.rtsp_command_send = request_code\n else:\n return\n self.rtsp_socket.send(request.encode())", "def start_camera(config):\n print(\"Starting {} on {}\".format(config.name, config.path))\n cs = CameraServer.getInstance()\n camera = cs.startAutomaticCapture(name=config.name, path=config.path)\n\n camera.setConfigJson(json.dumps(config.config))\n\n return cs, camera", "def cameraOn():\n cap = cv2.VideoCapture(CAM0, cv2.CAP_DSHOW) # use camera to monitor the motor-mirror assemnbly by DirectShow\n while(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow(\" Real-Time Video. Press 'q' to exist.\",frame)\n if cv2.waitKey(8) & 0xFF == ord('q'): #display a frame for 8ms, ~120Hz\n break\n \n cap.release() # release the capture\n cv2.destroyAllWindows()", "def cam():\n\treturn Response(gen(camera),\n\t\t\t\t\tmimetype='multipart/x-mixed-replace; boundary=frame'), 200", "def request_capturing(self):\n self.socket.sendall(pack('B', codes['request_pokemon']))\n self.receive_pokemon_suggestion()", "def homeCameraService(req):\n\n global robot\n\n # home the camera\n robot.camera.reset()\n\n # return status\n return homeCameraResponse(True)", "def __init__(self, name, location, device_id=uuid.uuid4(), ip=None, cam_user=None, cam_password=None,\n capture_path=None, payload=None, authentication = None, cam_ctrl=None):\n Device.__init__(self, name, \"ipcam\", location, device_id)\n self.ip = ip\n self.cam_user = cam_user\n self.cam_password = cam_password\n self.capture_path = capture_path\n self.payload = payload\n self.authentication = authentication\n self.cam_ctrl = cam_ctrl", "def forward(self, srcip, packet): #gets entire packet and srcip of that packet\n # get route to send packet\n best_route = self.get_route(srcip, packet[DEST]) #is a socket\n\n sock = best_route\n\n\n jsonpack = json.dumps(packet)\n sock.sendall(jsonpack.encode())\n # TODO fix src and dest\n return True", "def command(self):\n saw_error = False\n try:\n analog_gain = float(self.value_analog.get())\n except:\n print(\"analog must be floating point value\")\n self.value_analog.set(str(self.tcp_comms.tcp_params.analog_gain_target))\n saw_error = True\n try:\n digital_gain = float(self.value_digital.get())\n except:\n print(\"digital must be floating point value\")\n self.value_digital.set(str(self.tcp_comms.tcp_params.digital_gain_target))\n saw_error = True\n try:\n analog_tol = float(self.value_analog_tol.get())\n except:\n print(\"analog tol must be floating point value\")\n self.value_analog_tol.set(str(self.tcp_comms.tcp_params.analog_gain_tol))\n saw_error = True\n try:\n digital_tol = float(self.value_digital_tol.get())\n except:\n print(\"digital tol must be floating point value\")\n self.value_digital_tol.set(str(self.tcp_comms.tcp_params.digital_gain_tol))\n saw_error = True\n if not saw_error:\n self.tcp_comms.tcp_params.analog_gain_target = analog_gain\n self.tcp_comms.tcp_params.digital_gain_target = digital_gain\n self.tcp_comms.tcp_params.analog_gain_tol = analog_tol\n self.tcp_comms.tcp_params.digital_gain_tol = digital_tol\n self.tcp_comms.send_freeze_exposure(analog_gain, analog_tol, digital_gain, digital_tol)", "def open_camera(self):\n camera_source = self.winOpenCam.camera_source_used()\n if camera_source:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.running_video(camera_source)\n self.cam = True", "def testCaptureModeResponse(self):\n message = (mavutil.mavlink.GOPRO_COMMAND_CAPTURE_MODE, mavutil.mavlink.GOPRO_REQUEST_SUCCESS)\n self.mgr.set_response_callback('vehicle','name', message)\n self.mgr.processMsgQueue.assert_called_with()", "def showCamera(self,**kwargs):\n try:\n side = kwargs['side']\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n self.hideGUI(**{'update':False})\n self.baxter.camera.startCamera(side+\"_hand_camera\")\n self.locator.publish_camera = True", "def __init__(self):\n print(\"Starting picam Camera Object\")\n self.cam = picamera.PiCamera()\n self.stream = io.BytesIO()", "def set_up_stream(self,email,api_key,client_ip,server_IP):\n try:\n valid = self.validate.validate_user(email,api_key)\n if (valid):\n try:\n requests.post(f\"http://{server_IP}:6000/sensor_off\")\n except:\n print(\"Sensor was not active\")\n try:\n content = {'email':email,'api_key':api_key}\n content = json.dumps(content)\n headers = {\"Content-Type\":\"application/json\"}\n requests.post(f\"http://{server_IP}:8000/shutdown_stream\", data = content,headers = headers,verify=False)\n\n time.sleep(1)\n except:\n print(\"Video stream was not active\")\n\n # subprocess.call(['python3', '/home/Project_Group_9/model/video_stream.py', str(client_ip), str(PORT)])\n # call(f\"python3 /home/Project_Group_9/model/video_stream.py {client_ip} {PORT}\", shell=True)\n # run(\"/home/Project_Group_9/model/video_stream.py\", client_ip, PORT)\n start_command = f\"sudo python3 /home/Project_Group_9/model/video_stream.py {client_ip} {PORT} {server_IP} &\"\n os.system(start_command)\n time.sleep(5)\n return json.dumps({'port':PORT, 'server_ip': server_IP})\n return '', 403\n except:\n return '', 500", "def camera(self, source):\n self._camera = source\n self.SetActiveCamera(self._camera)\n self.camera_position = CameraPosition(\n scale_point(source, source.position, invert=True),\n scale_point(source, source.focal_point, invert=True),\n source.up,\n )\n self.Modified()\n self.camera_set = True", "def showNaoImage(IP, PORT):\r\n\r\n camProxy = ALProxy(\"ALVideoDevice\", IP, PORT)\r\n resolution = 2 # VGA\r\n colorSpace = 11 # RGB\r\n\r\n videoClient = camProxy.subscribe(\"python_client\", resolution, colorSpace, 5)\r\n\r\n t0 = time.time()\r\n\r\n # Get a camera image.\r\n # image[6] contains the image data passed as an array of ASCII chars.\r\n naoImage = camProxy.getImageRemote(videoClient)\r\n\r\n t1 = time.time()\r\n\r\n # Time the image transfer.\r\n print \"acquisition delay \", t1 - t0\r\n\r\n camProxy.unsubscribe(videoClient)\r\n\r\n\r\n # Now we work with the image returned and save it as a PNG using ImageDraw\r\n # package.\r\n\r\n # Get the image size and pixel array.\r\n imageWidth = naoImage[0]\r\n imageHeight = naoImage[1]\r\n array = naoImage[6]\r\n\r\n # Create a PIL Image from our pixel array.\r\n #im = Image.fromstring(\"RGB\", (imageWidth, imageHeight), array)\r\n im = Image.frombytes(\"RGB\", (imageWidth, imageHeight), array)\r\n\r\n # Save the image.\r\n im.save(\"camImage.png\", \"PNG\")\r\n im=Image.open(\"camImage.png\")\r\n im.mode\r\n 'p'\r\n im=im.convert('RGB')\r\n im.mode\r\n 'RGB'\r\n im.save('camImage_asjpg.jpg',quality=95)\r\n im.show()", "def send_video(self):\n self.send_video_socket = self.start_socket(IP, SEND_VIDEO_PORT)\n self.send_chunk(self.call_name.encode(), self.send_video_socket)\n mes = self.receive_mes(self.send_video_socket)\n print(mes)\n mes = self.receive_mes(self.send_video_socket)\n print(mes)\n while mes == \"wait\":\n time.sleep(TIME_SLEEP)\n mes = self.receive_mes(self.send_video_socket)\n print(mes)\n # print(\"here send\")\n cap = cv.VideoCapture(CAPTURE)\n cap.set(WID, WIDTH)\n cap.set(HIGH, HEIGHT)\n code = 'start'\n code = ('start' + (BUF - len(code)) * 'a').encode('utf-8')\n done = False\n #try:\n while cap.isOpened() and not done:\n try:\n ret, frame = cap.read()\n if ret:\n self.send_chunk(code, self.send_video_socket)\n data = frame.tobytes()\n for i in range(RANGE_START, len(data), BUF):\n self.send_chunk(data[i:i + BUF],\n self.send_video_socket)\n time.sleep(TIME_SLEEP)\n else:\n break\n except socket.error as msg:\n print(\"socket failure send video: {}\".format(msg))\n done = True\n #except ConnectionAbortedError as e:\n #print(\"exception send video\")\n self.send_video_socket.close()", "def connect(self):\n logging.info(\"Cam.py: connecting components\")\n self.serv = pyrs.Service()\n self.dev = self.serv.Device(device_id=0, \n streams=[\\\n pyrs.stream.DepthStream(fps=60), pyrs.stream.ColorStream(fps=60)])", "def __init__(self, tello, address):\r\n tello.cap = cv.VideoCapture(address)\r\n self.cap = tello.cap\r\n\r\n if not self.cap.isOpened():\r\n self.cap.open(address)\r\n\r\n self.grabbed, self.frame = self.cap.read()\r\n self.stopped = False", "def capture_camera(mirror=True, size=None):\n # カメラをキャプチャする\n cap = cv2.VideoCapture(0) # 0はカメラのデバイス番号\n #HAAR分類器の顔検出用の特徴量\n cascade_path = \"haarcascade_frontalface_alt.xml\"\n color = (255, 255, 255) #白\n #カスケード分類器の特徴量を取得する\n cascade = cv2.CascadeClassifier(cascade_path)\n\n while True:\n count = 0 #参照フレームのカウント\n # retは画像を取得成功フラグ\n ret, frame = cap.read()\n\n # 鏡のように映るか否か\n if mirror is True:\n frame = frame[:,::-1]\n\n # フレームをリサイズ\n # sizeは例えば(800, 600)\n if size is not None and len(size) == 2:\n frame = cv2.resize(frame, size)\n\n k = cv2.waitKey(1) # 1msec待つ\n\n if k == 13: # Enterキーで保存\n cv2.imwrite(\"test.png\", frame)\n\n if k == 27: # ESCキーで終了\n break\n\n\n if count == 10 or count == 0: # 参照フレーム軽減\n #グレースケール変換\n image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #物体認識(顔認識)の実行\n facerect = cascade.detectMultiScale(image_gray, scaleFactor=1.1, minNeighbors=1, minSize=(1, 1))\n count = 1\n else:\n count = count + 1\n #rect = (50,50,50,50)\n image = cv2.imread('lena.jpeg')\n #cv2.rectangle(image), tuple([50,50]), tuple([50,50]), color, thickness=2)\n\n if len(facerect) > 0:\n #if True:\n #検出した顔を囲む矩形の作成\n print (\"face rectangle\")\n print (facerect)\n for rect in facerect:\n cv2.rectangle(image, tuple(rect[0:2]),tuple(rect[0:2]+rect[2:4]), color, thickness=2)\n print('check')\n\n # フレームを表示する\n cv2.imshow('camera capture', frame)\n\n # キャプチャを解放する\n cap.release()\n cv2.destroyAllWindows()", "def camera_operation(self):\r\n ret, self.frame = self.cap.read() #get frame/ read from camera\r\n\r\n #try finding faces\r\n try:\r\n gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)\r\n faces = FACE_CASCADE.detectMultiScale(gray, scaleFactor = 1.5, minNeighbors = 5)\r\n #print(faces)\r\n for(x, y, w, h) in faces:\r\n #print(x, y, w, h) \r\n self.roi_gray = gray[y: y+h, x: x+w] #region of interest is face\r\n #Drawing Rectangle\r\n color = (255, 0, 0)\r\n stroke = 2\r\n end_cord_x = x+w\r\n end_cord_y = y+h\r\n cv2.rectangle(self.frame, (x,y), (end_cord_x, end_cord_y), color, stroke)\r\n self.FACE_FOUND = True\r\n\r\n \"\"\"While training if more than one face detected\"\"\"\r\n if (self.TRAIN_FLAG == True) and (len(faces) > 1):\r\n self.pop_window(title=\"Warning\", msg=\"Training takes only one face. \\nMultiple face detected.\")\r\n self.FACE_FOUND = False\r\n\r\n \"\"\"recognize faces, show with name\"\"\"\r\n if self.RECOGNIZE_FLAG == True:\r\n Id, confidence = RECOGNIZER.predict(self.roi_gray)\r\n print(confidence)\r\n \r\n name = self.names[Id-1] #get corresponding name\r\n\r\n \"\"\"if id not found, lock the screen\"\"\"\r\n if (confidence > CONFIDENCE_THRESHOLD) and (self.RECOGNIZE_FLAG == True):\r\n subprocess.call(LOCK_CODE)\r\n print(\"Unknown\")\r\n\r\n \"\"\"put name with face bouding box\"\"\"\r\n #if confidence value less than threshold value,\r\n #the smalller the value the better the accuracy\r\n if (name in self.names) and (confidence < CONFIDENCE_THRESHOLD) and (self.TRAIN_FLAG == False):\r\n cv2.putText(self.frame, name, (x, y+w+20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (250, 250, 250))\r\n print(Id)\r\n\r\n\r\n\r\n \r\n except:\r\n #self.FACE_FOUND = False\r\n pass #run anyway\r\n \r\n\r\n #_______________________Check record flag____________________________________\r\n #print(self.RECORD_FLAG)\r\n if self.RECORD_FLAG == True:\r\n print(\"Recording man!\")\r\n self.video_writer.write(self.frame)\r\n #notify on image about recording\r\n cv2.putText(self.frame, \"Recording..\", (5, 380), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\r\n\r\n #_______________________Train model with new face____________________________\r\n #print(self.TRAIN_FLAG)\r\n if self.TRAIN_FLAG == True:\r\n #print(\"Training Mode\")\r\n #notify about Training\r\n cv2.putText(self.frame, \"Training Mode\", (5, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\r\n #put sample number on screen\r\n cv2.putText(self.frame, str(self.sample_num), (10, 300), cv2.FONT_HERSHEY_COMPLEX, 4, (255, 255, 255), 2, cv2.LINE_AA)\r\n \r\n self.counter += 1 #start counter\r\n #print(self.counter)\r\n \r\n if self.sample_num == MAX_SAMPLE_COLLECTION_NUM: #reached max sample number\r\n cv2.putText(self.frame, \"Training, wait!\", (10, 350), cv2.FONT_HERSHEY_COMPLEX, 2, (255, 255, 255), 1, cv2.LINE_AA)\r\n self.update_img_label(self.frame)\r\n self.sample_num = 0 #set sample number to zero\r\n self.TRAIN_FLAG = False #stop saving\r\n self.pop_window(title=\"INFO\", msg=\"Sample images collected, Train?\")\r\n\r\n self.train()\r\n\r\n\r\n elif (self.counter == 12) and (self.FACE_FOUND == True): #after 1 sec and if face found\r\n print(\"saving roi\")\r\n self.sample_num += 1 #increment sample number\r\n cv2.imwrite(f\"{PARENT_PATH}\\\\{DATASET_DIR}\\\\user.{self.id}.{self.sample_num}.jpg\", self.roi_gray)\r\n \r\n self.counter = 0 #make it zero\r\n self.FACE_FOUND = False #False, wait for next face confirmation\r\n\r\n elif self.counter == 12:\r\n print(\"Waiting for face\")\r\n self.counter = 0\r\n \r\n\r\n \r\n #_______________set current frame in QLabel___________________\r\n self.update_img_label(self.frame)", "def _get_camera(self, mode):\n cam_bp = self.blueprint_lib.find(f\"sensor.camera.{mode}\")\n cam_bp.set_attribute(\"image_size_x\", f\"{self.img_x}\")\n cam_bp.set_attribute(\"image_size_y\", f\"{self.img_y}\")\n cam_bp.set_attribute(\"fov\", f\"{self.img_fov}\")\n cam = self.world.spawn_actor(cam_bp, self.transform, attach_to=self.vehicle) # spawing isn't expected to fail\n \n return cam", "def insert_route(src_ip, gre_tunnel):\n import os\n try:\n os.system(\"\"\"\n /usr/bin/sudo /sbin/iptables -t mangle -A PREROUTING -s %s -j MARK --set-mark %s\n \"\"\" % (src_ip, gre_tunnel))\n except:\n raise iptExc(\"Could not insert route from src_ip %s to gre tunnel %s in iptables\" % (src_ip, gre_tunnel))\n return True", "def from_rtsp_stream(ip, port):\n url = f\"rtsp://{ip}:{port}/h264_pcm.sdp\"\n vcap = cv2.VideoCapture(url)\n while True:\n ret, frame = vcap.read()\n if ret == False:\n print(\"Frame is empty\")\n break\n else:\n cv2.imshow(\"VIDEO\", frame)\n cv2.waitKey(1)", "def set_addressing_mode(mode):\n send_command(0x20)\n send_command(mode)", "def __init__(\n self,\n hass,\n camera,\n uuid,\n name,\n stream_source,\n recording_mode,\n model,\n up_since,\n last_motion,\n online,\n ):\n super().__init__()\n self.hass = hass\n self._nvr = camera\n self._uuid = uuid\n self._name = name\n self._model = model\n self._up_since = up_since\n self._last_motion = last_motion\n self._online = online\n self._motion_status = recording_mode\n self._stream_source = stream_source\n self._isrecording = False\n self._camera = None\n self._last_image = None\n self._supported_features = SUPPORT_STREAM if self._stream_source else 0\n\n if recording_mode != \"never\" and self._online:\n self._isrecording = True\n\n _LOGGER.debug(\"Camera %s added to Home Assistant\", self._name)", "def deviceClientOn():\n\n # Make the client\n client = makeClient()\n camera = PiCamera()\n camera.start_preview()\n sleep(2)\n\n # Continous looping for user images. If there is a face\n # The loop will pause and the user key will be evaluated.\n while True:\n\n # Take a piture and save it to a specific path\n takePicture(camera, IMGPATH)\n\n # If face detected it will send to AWS IoT Core\n if faceDetection(IMGPATH):\n\n print('face detected... sending to cloud')\n\n # Open the newly aquired face\n with open(IMGPATH, 'rb') as file:\n img = file.read()\n\n # open users for admin messages.\n # Ideally this would be in an MySQL server, but \n # that is a paid service in AWS and time consuming\n with open(PATH_TO_USERS) as users:\n readData = json.load(users)\n \n # We need to encode the image to be in a byte format for MQTT\n data = base64.b64encode(img)\n\n # Create our dictionary to send the data in\n message = {\n \"image\" : data.decode('utf-8'),\n \"faceCollection\" : FACE_COLLECTION_ID,\n \"admin_list\" : readData['admin_list'],\n \"device_id\" : DEVICENAME\n }\n\n # Send to cloud\n client.publish(topic, json.dumps(message), 1)\n\n # Buffer for processing time\n # Sleep for 30 seconds as camera cooldown / as to not spam\n # the AWS service ($$$)\n print('Waiting...')\n sleep(30)\n else: \n # No face was detected, we sleep only for 10 seconds\n print(\"No face found... Looking again in 10 seconds\")\n sleep(10)", "def Camera2Run():\n try:\n with open('./Camera2_settings.jsn', 'r') as fp:\n configDict = json.loads(fp.read())\n fp.close()\n except IOError:\n # we will make default settings if we didn't find file\n print (\n 'Unable to open Camera2_settings.jsn, using default settings, please edit new settings.')\n configDict = {'dataPath': '/home/pi/Documents/', 'UDP_Sender': '127.0.0.1',\n 'UDP_IP': '', 'UDP_Port': 5007, 'maxRecSecs': 30.0}\n try:\n camera2 = AHF_Camera(configDict)\n except Exception as anError:\n print (\"Quitting, Camera not initialized..\" + str(anError))\n return\n event = input(\n 'enter \\'e\\' to edit settings, or any other character to start waiting for UDP events\\n:')\n if event == 'e' or event == \"E\":\n editConfig(configDict, camera2)\n\n try:\n # set up UDP port for listening\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((configDict.get('UDP_IP'), configDict.get('UDP_Port')))\n except socket.error:\n print (\"Quitting, Could not make a socket connection.\")\n return\n isCapturing = False\n while True:\n if isCapturing == True:\n sock.settimeout(configDict.get('maxRecSecs'))\n print ('Waiting for a Stop Trigger.')\n else:\n print ('Waiting for a Start Trigger.')\n sock.settimeout(None)\n try:\n data, addr = sock.recvfrom(1024)\n dataStr = data.decode(\"utf-8\")\n addStr = addr[0]\n if addStr == configDict.get('UDP_Sender'):\n if isCapturing == False:\n camera2.start_recording(configDict.get(\n 'dataPath') + dataStr + '.' + configDict.get('format'))\n isCapturing = True\n print ('Capturing \"' + dataStr + '\"...', end=' ')\n elif dataStr == 'Stop':\n camera2.stop_recording()\n isCapturing = False\n print ('Ending Capture...', end=' ')\n\n except socket.error:\n if isCapturing == True:\n camera2.stop_recording()\n isCapturing = False", "def send_request(bytestr, mode, tag=''):\n init = Initializer.create_init()\n queue = init.queue\n\n addr = queue.get()\n client = ipc.HTTPTransceiver(addr, 12345)\n requestor = ipc.Requestor(PROTOCOL, client)\n\n data = dict()\n data['input'] = bytestr\n data['next'] = mode\n data['tag'] = tag\n\n start = time.time()\n requestor.request('forward', data)\n end = time.time()\n\n init.node_timer(mode, end - start)\n\n client.close()\n queue.put(addr)", "def request():\n return face_client.face.detect_with_stream(image=open(\"frame.png\", 'rb'),\n return_face_attributes=[emotion_attribute],\n recognition_model='recognition_02')", "def update_cams(request_body: CamerasModel):\n logger.info(request_body)\n frame_rate = request_body.fps\n ava_is_send = request_body.ava_is_send\n stream_manager.update_streams([cam.id for cam in request_body.cameras])\n n = stream_manager.get_streams_num_danger()\n # frame_rate = onnx.update_frame_rate_by_number_of_streams(n)\n # recommended_fps = onnx.get_recommended_frame_rate(n)\n onnx.set_frame_rate(frame_rate)\n logger.warning(\"update frame rate to {}\".format(frame_rate))\n\n # lva_mode\n\n if request_body.lva_mode:\n lva_mode = request_body.lva_mode\n onnx.set_lva_mode(lva_mode)\n else:\n lva_mode = onnx.lva_mode\n\n for cam in request_body.cameras:\n cam_type = cam.type\n cam_source = cam.source\n cam_id = cam.id\n cam_name = cam.name\n # TODO: IF onnx.part_detection_mode == \"PC\" (PartCounting), use lines to count\n line_info = cam.lines\n zone_info = cam.zones\n\n if cam.aoi:\n aoi = json.loads(cam.aoi)\n has_aoi = aoi[\"useAOI\"]\n aoi_info = aoi[\"AOIs\"]\n logger.info(\"aoi information\")\n else:\n has_aoi = False\n aoi_info = None\n\n logger.info(\"Updating camera %s\", cam_id)\n stream = stream_manager.get_stream_by_id(cam_id)\n # s.update_cam(cam_type, cam_source, cam_id, has_aoi, aoi_info, cam_lines)\n # FIXME has_aoi\n recording_duration = int(cam.recording_duration * 60)\n stream.update_cam(\n cam_type,\n cam_source,\n frame_rate,\n recording_duration,\n lva_mode,\n ava_is_send,\n cam_id,\n cam_name,\n has_aoi,\n aoi_info,\n onnx.detection_mode,\n line_info,\n zone_info,\n )\n stream.cascade_name = request_body.cascade_name\n stream.send_video_to_cloud = cam.send_video_to_cloud\n stream.send_video_to_cloud_parts = [\n part.name for part in cam.send_video_to_cloud_parts\n ]\n stream.send_video_to_cloud_threshold = (\n int(cam.send_video_to_cloud_threshold) * 0.01\n )\n stream.use_tracker = cam.enable_tracking\n\n if stream.scenario:\n logger.warning(stream.scenario)\n if stream.model.detection_mode == 'TCC' and cam.counting_end_time != '':\n stream.scenario.set_time(\n cam.counting_start_time, cam.counting_end_time)\n # recording_duration is set in topology, sould be handled in s.update_cam, not here\n # stream.recording_duration = int(cam.recording_duration*60)\n\n logger.info(\"Streams %s\", stream_manager.streams)\n return \"ok\"", "def execute_cmd(self, pose, lm, delay, frame):\n if pose == 'pause_or_play':\n try:\n playback = self.sp_client.current_playback()\n if playback is None or not playback['is_playing']:\n self.sp_client.start_playback()\n else:\n self.sp_client.pause_playback()\n except spotipy.exceptions.SpotifyException as e:\n # print(e)\n # print(\"Trying to find an active device...\")\n devs = self.sp_client.devices()['devices']\n if len(devs) > 0:\n dev_id = devs[0]['id']\n self.sp_client.transfer_playback(dev_id)\n else:\n print(\"Tried to turn the volume up...\")\n print(\"Sorry, user needs to log into a device with Spotify!\")\n\n delay.reset_counter(20)\n delay.set_in_action(True)\n\n elif pose == 'connect_cycle':\n try:\n cur_dev_id = self.sp_client.current_playback()['device']['id']\n devs = self.sp_client.devices()['devices']\n cur_dev_idx = None\n for i, dev in enumerate(devs):\n if dev['id'] == cur_dev_id:\n cur_dev_idx = i\n\n new_dev_idx = cur_dev_idx - 1 # Loop backwards\n\n new_dev_id = devs[new_dev_idx]['id']\n self.sp_client.transfer_playback(new_dev_id)\n except spotipy.exceptions.SpotifyException as e:\n print(\"Tried to change device to connect_speaker (left)...\")\n print(e)\n\n delay.reset_counter(20)\n delay.set_in_action(True)\n\n elif pose == 'next_track':\n try:\n self.sp_client.next_track()\n except spotipy.exceptions.SpotifyException as e:\n print(\"Tried to go to next track...\")\n print(e)\n\n delay.reset_counter(10)\n delay.set_in_action(True)\n\n elif pose == 'previous_track':\n try:\n playback = self.sp_client.current_playback()\n if playback is not None:\n cur_uri = playback['item']['uri']\n cur_pos = playback['progress_ms']\n # Check if we have a valid mark in this track to skip back to\n if self.marked_pos is not None and self.marked_pos < cur_pos \\\n and cur_uri == self.marked_uri:\n self.sp_client.seek_track(self.marked_pos)\n else:\n if cur_pos < 6*1000: # Go to previous track\n self.sp_client.previous_track(self.marked_pos)\n else: # Go back to beginning of track\n self.sp_client.seek_track(0)\n except spotipy.exceptions.SpotifyException as e:\n print(\"Tried to go to previous track...\")\n print(e)\n\n delay.reset_counter(10)\n delay.set_in_action(True)\n\n elif pose == 'volume_slider':\n try:\n playback = self.sp_client.current_playback()\n if playback is not None:\n if self.prev_index_finger_tip_y is not None \\\n and self.prev_vol_datetime is not None \\\n and (datetime.now() - self.prev_vol_datetime).total_seconds() < 2.5:\n cur_vol = playback['device']['volume_percent']\n # print(f\"DEBUG: Current volume {cur_vol}.\")\n # print(f\"DEBUG: Landmarks: {lm[8*3+1]}\")\n cur_index_finger_tip_y = lm[8*3+1]\n vol_diff = int((self.prev_index_finger_tip_y - cur_index_finger_tip_y)*200)\n new_vol = max(0, min(100, cur_vol + vol_diff))\n self.sp_client.volume(new_vol)\n # print(f\"DEBUG: New Volume: {new_vol}\")\n self.prev_index_finger_tip_y = lm[8*3+1]\n self.prev_vol_datetime = datetime.now()\n else:\n self.prev_index_finger_tip_y = lm[8*3+1]\n self.prev_vol_datetime = datetime.now()\n # print(f\"DEBUG: Setting volume reference point to {self.prev_index_finger_tip_y}\")\n else:\n print(\"No active playback device... start playing Spotify somewhere.\")\n except spotipy.exceptions.SpotifyException as e:\n print(\"Tried to set volume...\")\n print(e)\n\n delay.reset_counter()\n delay.set_in_action(True)\n\n # E.g. 'skipback_2' or 'skipfwd_5'\n elif pose[:9] == 'skipback_' or pose[:8] == 'skipfwd_':\n n = int(pose[-1]) * (-1 if pose[:9] == 'skipback_' else 1)\n try:\n playback = self.sp_client.current_playback()\n if playback is not None:\n new_pos = max(playback['progress_ms']+int((3*n + 0.3)*1000), 0)\n self.sp_client.seek_track(new_pos)\n # print(f\"DEBUG: Seek {(new_pos - playback['progress_ms'])/1000} seconds.\")\n else:\n print(\"No active playback device... start playing Spotify somewhere.\")\n except spotipy.exceptions.SpotifyException as e:\n print(\"Tried to skipback...\")\n print(e)\n\n self.angle_now = None\n delay.reset_counter()\n delay.set_in_action(True)\n\n elif pose == 'like':\n try:\n playback = self.sp_client.current_playback()\n if playback is not None and playback['is_playing']:\n track_id = playback['progress_ms']\n self.sp_client.current_user_saved_tracks_add(tracks=[track_id])\n except spotipy.exceptions.SpotifyException as e:\n print(\"Tried to like a song...\")\n print(e)\n\n delay.reset_counter(20)\n delay.set_in_action(True)\n\n elif pose == 'mark_pos':\n try:\n playback = self.sp_client.current_playback()\n if playback is not None: # and playback['is_playing']:\n cur_uri = playback['item']['uri']\n if self.marked_uri == 'empty' or self.marked_uri != cur_uri:\n self.marked_pos = playback['progress_ms']\n self.marked_uri = playback['item']['uri']\n print(f\"DEBUG: Position {self.marked_pos} marked.\")\n else: # Delete old mark\n print(f\"DEBUG: Position {self.marked_pos} deleted.\")\n self.marked_pos = None\n self.marked_uri = 'empty'\n\n else:\n print(\"No active playback device... start playing Spotify somewhere.\")\n except spotipy.exceptions.SpotifyException as e:\n print(\"Tried to mark_pos...\")\n print(e)\n\n delay.reset_counter(20) # Ignore a few more frames than usual to avoid undoing\n delay.set_in_action(True)", "def on_setup_btn(self):\n if self.state == self.INIT:\n self.send_rtsp_request(self.SETUP)", "def on_play_btn(self):\n if self.state == self.READY:\n self.send_rtsp_request(self.PLAY)", "def invoke(self):\n\n base=\"data_request?id=action\"\n action = \"SetModeTarget\"\n svc = \"urn:upnp-org:serviceId:HVAC_UserOperatingMode1\"\n path = \"%s&DeviceNum=%d&serviceId=%s&action=%s&NewModeTarget=%s&output_format=json\" \\\n % (base, self.device.id, svc, action, self.value)\n status = self.device.vera.get(path)\n\n job = Job()\n job.id = int(status[\"u:SetModeTargetResponse\"][\"JobID\"])\n job.vera = self.device.vera\n return job", "def mcapi_playback(name_interface):\n device = Solo12(name_interface, dt=DT)\n qc = QualisysClient(ip=\"140.93.16.160\", body_id=0) # QualisysClient\n logger = Logger(device, qualisys=qc) # Logger object\n nb_motors = device.nb_motors\n\n # Default position after calibration\n q_init = np.array([0.0, 0.8, -1.6, 0, 0.8, -1.6, 0, -0.8, 1.6, 0, -0.8, 1.6])\n\n # Calibrate encoders\n device.Init(calibrateEncoders=True, q_init=q_init)\n\n # Wait for Enter input before starting the control loop\n put_on_the_floor(device, q_init)\n\n # CONTROL LOOP ***************************************************\n t = 0.0\n t_max = t_switch[-1]\n\n # Parameters of the PD controller\n KP = 2.\n KD = 0.05\n tau_max = 5. * np.ones(12)\n\n while ((not device.hardware.IsTimeout()) and (t < t_max)):\n\n device.UpdateMeasurment() # Retrieve data from IMU and Motion capture\n\n # Desired position and velocity for this loop and resulting torques\n q_desired, v_desired = demo_solo12(t)\n pos_error = q_desired.ravel() - device.q_mes.ravel()\n vel_error = v_desired.ravel() - device.v_mes.ravel()\n tau = KP * pos_error + KD * vel_error\n tau = np.maximum(np.minimum(tau, tau_max), -tau_max)\n\n # Set desired torques for the actuators\n device.SetDesiredJointTorque(tau)\n\n # Call logger\n # logger.sample(device, qualisys=qc)\n\n # Send command to the robot\n device.SendCommand(WaitEndOfCycle=True)\n if ((device.cpt % 100) == 0):\n device.Print()\n\n t += DT\n\n # ****************************************************************\n\n # Whatever happened we send 0 torques to the motors.\n device.SetDesiredJointTorque([0]*nb_motors)\n device.SendCommand(WaitEndOfCycle=True)\n\n if device.hardware.IsTimeout():\n print(\"Masterboard timeout detected.\")\n print(\"Either the masterboard has been shut down or there has been a connection issue with the cable/wifi.\")\n device.hardware.Stop() # Shut down the interface between the computer and the master board\n\n # Save the logs of the Logger object\n # logger.saveAll()", "def set_camera_address(self, address: int):\n\n self.camera_address = address", "def start(self):\n while True:\n requests.get(\"http://localhost:8080/clear\") #clearing the screen on the web browser\n speech=\"Welcome to Smart Mirror !!\"\n requests.get(\"http://localhost:8080/statement?text=%s\" % speech) # calling the text to appear on the browser\n self.speech.synthesize_text(\"hello\"+speech) #synthesizing the text into speech\n speech1=\"Say The launch Phrase .\" #asking the user to say the lauch phrase\n self.speech.synthesize_text(speech1) #speaking of the above line,\n if self.vision.recognize_face(): #checking if\n print \"Face Found\"\t\t\t#the person is infront of camera\n if use_launch_phrase:\t\t\t#checking whether to use the launch phrase or not\n recognizer, audio = self.speech.listen_for_audio()\t\t#initializing\n if self.speech.is_call_to_action(recognizer, audio):\t#checking if the audio is recognized\n self.__acknowledge_action()\t\t\t#if it is recognized take action\n self.decide_action()\t\t\t#deciding which action to be taken\n else:\n self.decide_action()\t\t\t#printing the else part", "def publish_camera_frame(self):\n executive.get_camera_orientation()\n self.t.start()\n # Wait for transformation to be published\n rospy.sleep(2)", "def open_video(self):\n\n # start the stream on the bebop\n if (self.is_bebop):\n self.drone_object.start_video_stream()\n\n # we have bypassed the old opencv VideoCapture method because it was unreliable for rtsp\n\n # get the path for the config files\n fullPath = inspect.getfile(DroneVisionGUI)\n shortPathIndex = fullPath.rfind(\"/\")\n if (shortPathIndex == -1):\n # handle Windows paths\n shortPathIndex = fullPath.rfind(\"\\\\\")\n print(shortPathIndex)\n shortPath = fullPath[0:shortPathIndex]\n self.imagePath = join(shortPath, \"images\")\n self.utilPath = join(shortPath, \"utils\")\n print(self.imagePath)\n print(self.utilPath)\n\n if self.is_bebop:\n # generate the streaming-address for the Bebop\n self.utilPath = join(shortPath, \"utils\")\n self.stream_adress = \"%s/bebop.sdp\" % self.utilPath\n else:\n # generate the streaming-address for the Mambo\n self.stream_adress = \"rtsp://192.168.99.1/media/stream2\"\n\n # initialise the vlc-player with the network-caching\n self.player = vlc.MediaPlayer(self.stream_adress, \":network-caching=\" + str(self.network_caching))\n\n # start the buffering\n success = self._start_video_buffering()", "def _open_capture(self):\n\n plat = platform.system()\n if plat == \"Windows\":\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink sync=false'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n # self.capture = cv2.VideoCapture(self._rtsp, apiPreference=cv2.CAP_FFMPEG)\n elif plat == \"Linux\":\n if platform.machine() == 'aarch64': # Jetson Nano\n gst ='rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! appsink sync=false'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n elif platform.machine() == 'armv6l' or platform.machine() == 'armv7l': # Raspberry Pi\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! queue ! rtph264depay ! h264parse ! v4l2h264dec capture-io-mode=4 ! v4l2convert output-io-mode=5 capture-io-mode=4 ! appsink sync=false'\n # might not need the two queue statements above\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n elif plat == \"MacOS\":\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n else:\n gst = 'rtspsrc location=' + self._rtsp + ' latency=10 ! rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink'\n self.capture = cv2.VideoCapture(gst, apiPreference=cv2.CAP_GSTREAMER)\n\n self.capture_open = self.capture.isOpened() \n if not self.capture_open:\n self.logger.log(logging.CRITICAL, \"Status:Failed to open camera!\")", "def make_call(self, endpoint='compute', origin='45.4842721,9.2368349', destination='45.468359,9.1761419', mode='driving'):\n with warnings.catch_warnings(): # suppresses warnings on unclosed sockets\n warnings.simplefilter(\"ignore\", ResourceWarning)\n return self.client.get(f'/{endpoint}?' + '&'.join(f\"{k}={v}\" for k, v in locals().items()))", "def main_func_video_camera(param_list: list = None) -> bool:\r\n # index of param\r\n # noinspection PyPep8Naming\r\n PORT_RAW_PICT = 0\r\n\r\n # check if param OK\r\n if len(param_list) != 1:\r\n log_error_to_console(\"GET FRAME VIDEO CAPTURE MAIN FUNCTION PARAM NOK\", str(len(param_list)))\r\n return False\r\n else:\r\n port_image = get_port_from_wave(name=param_list[PORT_RAW_PICT])\r\n\r\n try:\r\n # noinspection PyUnresolvedReferences\r\n success, port_image.arr[:] = global_var_handler.VIDEO.read()\r\n if success is True:\r\n port_image.set_valid()\r\n except BaseException as error:\r\n is_error()\r\n # noinspection PyUnresolvedReferences\r\n log_error_to_console('RAW PICTURE NOK TO READ: ' + str(global_var_handler.VIDEO.__str__()), str(error))\r\n port_image.set_invalid()\r\n pass\r\n\r\n # noinspection PyUnresolvedReferences\r\n log_to_file(str(global_var_handler.FRAME))\r\n # noinspection PyUnresolvedReferences\r\n log_to_file(global_var_handler.STR_L0_SIZE)\r\n\r\n return True", "def Get_CameraPicture(self, request, context: grpc.ServicerContext) \\\n -> Ot2Controller_pb2.Get_CameraPicture_Responses:\n out_image_file: str = \"/tmp/tmp_image.jpeg\"\n cmd: str = f\"ffmpeg -y -f video4linux2 -s 640x480 -i /dev/video0 -ss 0:0:1 -frames 1 {out_image_file}\"\n logging.debug(f\"run '{cmd}'\")\n ssh_stdin, ssh_stdout, ssh_stderr = self.ssh.exec_command(cmd)\n run_ret: int = ssh_stdout.channel.recv_exit_status()\n logging.debug(\"run returned '\" + str(run_ret) + \"'\")\n\n scp = SCPClient(self.ssh.get_transport())\n try:\n scp.get(out_image_file, \"/tmp/tmp_image.jpeg\", recursive=False)\n except SCPException as error:\n logging.error(error)\n raise\n finally:\n scp.close()\n\n logging.debug(f\"Downloaded {out_image_file} to /tmp/tmp_image.jpeg\")\n img_bytes = open(\"/tmp/tmp_image.jpeg\", 'rb').read()\n\n ts: datetime = datetime.datetime.now(datetime.timezone.utc)\n timezone = silaFW_pb2.Timezone(hours=0, minutes=0)\n timestamp = silaFW_pb2.Timestamp(year=ts.year,\n month=ts.month,\n day=ts.day,\n hour=ts.hour,\n minute=ts.minute,\n second=ts.second,\n timezone=timezone)\n\n cam_pic_struct = Ot2Controller_pb2.Get_CameraPicture_Responses.CameraPicture_Struct(\n ImageData=silaFW_pb2.Binary(value=img_bytes),\n ImageTimestamp=timestamp)\n\n return Ot2Controller_pb2.Get_CameraPicture_Responses(CameraPicture=cam_pic_struct)", "async def bit(self, instance, value):\n print(f\"Server: {'bit'} Got 'put' request from outside: new value is {value} and type {type(value)}\")\n if self.device is not None:\n self.device.set_bit_client(value)\n else:\n print('device is None')", "def set_mode(self, mode=0, detection_param=0):\r\n return self._arm.set_mode(mode=mode, detection_param=detection_param)", "def camera(ctx, cam_id, verbose):\n client = ctx.obj.client\n cap = cv2.VideoCapture(cam_id)\n frame_num = 1\n classes = {}\n try:\n while True:\n ret, frame = cap.read()\n if not ret:\n print(\"Stream unavailable. Exiting.\")\n break\n if verbose:\n print(frame)\n cv2.imshow('Camera Feed', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_num += 1\n except:\n pass\n\n cap.release()\n cv2.destroyAllWindows()", "def perform_request(mode, config=None, request=None):\n if not isinstance(mode, str):\n raise ValueError(\"Invalid mode, must be a str\")\n\n mode = mode.upper()\n\n if request is None:\n request = EobotRequest()\n elif not isinstance(request, EobotRequest):\n raise ValueError(\"Invalid request, must be a EobotRequet\")\n\n if config is None or isinstance(config, str):\n config = get_config(config)\n elif not isinstance(config, EobotConfig):\n raise ValueError(\"Invalid config, must be a EobotConfig\")\n\n try:\n config.get_authentication(True)\n except NoUserIdError:\n config.set_user_id(get_user_id(config=config, request=request.clone()))\n\n auth = config.get_authentication(False)\n\n current_mode = get_mining_mode(config=config, request=request.clone())\n if current_mode == mode:\n return True\n\n request.set_parameter(\"id\", auth.user_id)\n request.set_parameter(\"email\", auth.email)\n request.set_parameter(\"password\", auth.password)\n request.set_parameter(\"mining\", mode)\n request.perform_request()\n\n new_mode = get_mining_mode(config=config, request=request.clone())\n return new_mode == mode", "def running_video(self, video_source):\n self.video_controller.set_button_enable()\n self.cap = cv2.VideoCapture(video_source)\n self.next_frame_slot()", "def video_thread():\n global last_frame\n # Creating stream capture object\n cap = cv2.VideoCapture('udp://' + drone.tello_ip + ':11111')\n\n while(True):\n _, last_frame = cap.read()\n cap.release()", "def _send(self, what, value, address='localhost:44818', **kwargs):\n\n tag_string = ''\n tag_string = EnipProtocol._tuple_to_cpppo_tag(what, value)\n # print 'DEBUG enip _send tag_string: ', tag_string\n\n cmd = shlex.split(\n self._client_cmd +\n '--log ' + self._client_log +\n '--address ' + address +\n ' ' + tag_string\n )\n # print 'DEBUG enip _send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR enip _send: ', error)", "def openCircuit(srv):", "def open(self):\n if self.hcam is None:\n self.hcam=lib.is_InitCamera(None,self.cam_id)", "def setAdhocParameters(self, host, mode, **params):\n self.mode = mode\n latency = 10\n self.host = host\n #delay = 5 * distance\n try:\n options = dict( params )\n self.interface = options[ 'interface' ]\n except: \n self.interface = 'wlan0'\n \n bandwidth = wifiParameters.set_bw(mode)\n #self.host.cmd(host, \"tc qdisc replace dev %s-%s root netem rate %.2fmbit latency %.2fms delay %.2fms\" % (host, self.interface, rate, latency, delay)) \n self.host.cmd(\"tc qdisc add dev %s-%s root tbf rate %smbit latency %sms burst 1540\" % (str(host), self.interface, bandwidth, latency))", "def __init__(self, camera):\r\n self.planes = []\r\n self.camera = camera", "def setInfraParameters(self, sta, mode, distance):\n station.mode(str(sta), mode)\n \n seconds = 3\n self.src = str(sta)\n try:\n \"\"\"Based on RandomPropagationDelayModel (ns3)\"\"\"\n seconds = abs(mobility.speed[self.src])\n except:\n pass\n self.host = sta\n latency = wifiParameters.latency(distance)\n loss = wifiParameters.loss(distance)\n delay = wifiParameters.delay(distance, seconds)\n bw = wifiParameters.bw(distance, mode) \n self.host.pexec(\"tc qdisc replace dev %s-wlan0 root netem rate %.2fmbit loss %.1f%% latency %.2fms delay %.2fms\" % (sta, bw, loss, latency, delay)) \n #os.system('util/m %s tc qdisc replace dev %s-wlan0 root netem rate %.2fmbit latency %.2fms delay %.2fms' % (self.host, self.host, bandwidth, latency, delay))\n #self.host.cmd(\"tc qdisc replace dev %s-wlan0 root tbf rate %.2fmbit latency %.2fms burst 15k\" % (self.host, rate, latency)) \n associate = self.doAssociation(mode, distance)\n if associate == False:\n mobility.handover(self.host)", "def apr(queue):\n # Camera Parameters for the particular camera used computed using calibrate_camera.py\n url = 'http://127.0.0.1:5000/postjson'\n camera_params = (816.5348873614328, 818.5099487197449, 309.06679467676815, 233.04620465593146)\n size = 8.3 # Size of aprilTag\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n out = cv2.VideoWriter('up123.mp4', fourcc, 15.0, (640, 480))\n position = {'tag0': None, 'roll': None, 'time': None}\n '''Main function.'''\n\n parser = ArgumentParser(\n description='test apriltag Python bindings')\n\n parser.add_argument('device_or_movie', metavar='INPUT', nargs='?', default=0,\n help='Movie to load or integer ID of camera device')\n\n apriltag.add_arguments(parser)\n options = parser.parse_args()\n cap = cameraTest()\n window = 'Camera'\n cv2.namedWindow(window)\n start_time = time.time()\n detector = apriltag.Detector(options,\n searchpath=apriltag._get_demo_searchpath())\n\n while True:\n success, frame = cap.read()\n if not success:\n break\n\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n detections, dimg ,tag_id= detector.detect(gray, return_image=True)\n\n num_detections = len(detections)\n overlay = frame // 2 + dimg[:, :, None] // 2\n if num_detections > 0:\n #print(tag_id)\n for i, detection in enumerate(detections):\n pose, e0, e1 = detector.detection_pose(detection,\n camera_params,\n size)\n\n apriltag._draw_pose(overlay,\n camera_params,\n size,\n pose)\n\n\n\n b = numpy.matrix([[0], [0], [0], [1]])\n coordinate = numpy.matmul(pose, b)\n new_coord = coordinate\n position['tag0'] = new_coord\n roll = math.degrees(math.atan2(pose[0][1], pose[0][0]))\n yaw = math.degrees(math.atan((-1 * pose[2][0]) / math.sqrt((pose[2][1]) ** 2 + (pose[2][2]) ** 2)))\n pitch = math.degrees(math.atan(pose[2][1] / pose[2][2]))\n position['roll'] = roll - 90\n\n entry = [format(time.time() - start_time, '.4f'), format(float(new_coord[0]), '.4f'),\n format(float(new_coord[1]), '.4f'), format(float(new_coord[2]), '.4f'), format(roll, '.4f'),\n format(yaw, '.4f'), format(pitch, '.4f')]\n\n cv2.putText(overlay, \"X =\" + str(float(new_coord[0])) + \"cm\", (10, 20), cv2.FONT_HERSHEY_PLAIN, 1,\n (255, 255, 0), 1)\n cv2.putText(overlay, \"Y =\" + str(float(new_coord[1])) + \"cm\", (10, 50), cv2.FONT_HERSHEY_PLAIN, 1,\n (255, 255, 0), 1)\n cv2.putText(overlay, \"Z =\" + str(float(new_coord[2])) + \"cm\", (10, 80), cv2.FONT_HERSHEY_PLAIN, 1,\n (255, 255, 0), 1)\n cv2.putText(overlay, \"ROll =\" + str(roll) + \"cm\", (350, 20), cv2.FONT_HERSHEY_PLAIN, 1,\n (255, 255, 0), 1)\n cv2.putText(overlay, \"Yaw =\" + str(yaw) + \"cm\", (350, 50), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0), 1)\n cv2.putText(overlay, \"Pitch =\" + str(pitch) + \"cm\", (350, 80), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 0),\n 1)\n logger.info(\"\\n X -> {0:.4f} \\n Y -> {1:.4f} \\n Z -> {2:.4f} \\n Roll -> {3:.4f} \\n Yaw -> {4:.4f} \\n Pitch -> {5:.4f} \\n\".format(float(new_coord[0]),\n float(new_coord[1]),\n float(new_coord[2]),\n float(roll), float(yaw),\n float(pitch)))\n #liveCoord = {'Time': str(time.ctime(int(time.time()))), 'X':format(float(new_coord[0]), '.4f'), 'Y': format(float(new_coord[1]), '.4f'), 'Z': format(float(new_coord[2]), '.4f'), 'Roll': format(float(roll), '.4f'), 'Yaw':format(float(yaw), '.4f') , 'Pitch': format(float(pitch), '.4f')}\n #requests.post(url, json=liveCoord)\n while not queue.empty():\n queue.get()\n queue.put(entry)\n\n else:\n logger.critical('No tag Detected')\n\n cv2.imshow(window, overlay)\n out.write(overlay)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n logger.warning(\"Camera Exited\")\n break", "def buzz_subroutine(self):\n self.socket.sendall(self.make_packet(\"CMD\", \"BUZZ\"))\n \n response = self.socket.recv(4096)\n\n if response:\n response_hdr, response_msg, response_sdr = self.parse_packet(response)\n \n if response_hdr == \"ACK\" and response_msg == \"BUZZ\":\n # ready to send picture to server\n pic_num = self.camera.take_picture()\n pic_file = open(\"{}.png\".format(pic_num), 'rb')\n\n # if camera is broken uncomment line below\n #pic_file = open(\"1.png\", 'rb')\n \n pic_bytes = pic_file.read(1024)\n \n # Pi 1 is slow so need a loop\n while pic_bytes: \n self.socket.send(pic_bytes)\n pic_bytes = pic_file.read(1024)\n \n pic_file.close()\n \n confirm = self.socket.recv(4096)\n\n if confirm:\n confirm_hdr, confirm_msg, confirm_sdr = self.parse_packet(confirm)\n if confirm_hdr == \"DATA\" and confirm_msg == \"PICTURE RECEIVED\":\n print(\"DEBUG: confirmed receive\")", "def turn_camera_on():\n # Do command\n consoleOutput = exec_console_command(constants.cameraOn + constants.getExitStatus)\n\n if \"2\" in consoleOutput:\n raise IOError(constants.cameraOnScriptNotFound)\n\n # Parse output\n feedbackOutput = constants.cameraSwitchedOn\n\n return feedbackOutput", "def setCamera(self, camPos, camCOI, camUp, camNear = 1.5):\n self.camPos = camPos # Origin of camera space\n self.camNear = camNear\n self.camCOI = camCOI\n self.Q = math3dsol.VectorN.__sub__(self.camPos,self.camCOI)\n #Normalized copy of camera pos\n self.norm_copy = math3dsol.VectorN.normalized_copy(self.camPos)\n self.camz = self.Q\n self.camy = math3dsol.cross(self.camz,self.camPos)\n self.camx = math3dsol.cross(self.camy,self.camz)\n math3dsol.VectorN.normalized_copy(self.camx)\n math3dsol.VectorN.normalized_copy(self.camy)\n math3dsol.VectorN.normalized_copy(self.camz)\n return self.camx, self.camy, self.camz", "def test_camera(self, camera):\n dev = list()\n for room in self.rooms:\n for device in room[\"devices\"]:\n dev.append(device)\n return Response(self.gen_testcamera(dev[int(camera)]),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def with_allows_camera(self):\n return self.with_allowed_capture_methods(CAMERA)", "def with_allows_camera_and_upload(self):\n return self.with_allowed_capture_methods(CAMERA_AND_UPLOAD)", "def move(self, pos):\n\n try:\n payload = {\"address\":self.address, \"user\": self.user, \"pwd\": self.pswd, \"pos\": map_position(pos)}\n resp = requests.get(\n \"http://{address}/decoder_control.cgi?command={pos}&user={user}&pwd={pwd}\".format(**payload)\n )\n except KeyError:\n raise CamException(\"Position must be within 1 to 16.\")\n if resp.status_code != 200:\n raise CamException(\"Unauthorized. Wrong user or password.\")\n return \"ok\"", "def from_http_stream(ip, port):\n # Replace the URL with your own IPwebcam shot.jpg IP:port\n url = f\"http:/{ip}:{port}/shot.jpg\"\n\n while True:\n\n # Use urllib to get the image and convert into a cv2 usable format\n img_arr = np.array(\n bytearray(urllib.request.urlopen(url).read()), dtype=np.uint8\n )\n img = cv2.imdecode(img_arr, -1)\n cv2.imshow(\"IPWebcam\", img)\n\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n cv2.destroyAllWindows()", "def reset(runtime, cfg, inputs, state, outputs):\n try:\n connection = int(cfg['connection'])\n except ValueError:\n connection = cfg['connection']\n\n if connection == 'nvarguscamerasrc':\n state['cap'] = cv2.VideoCapture(\n gstreamer_pipeline(\n capture_width = cfg['capture_width'],\n capture_height = cfg['capture_height'],\n display_width = cfg['display_width'],\n display_height = cfg['display_height'],\n framerate = cfg['framerate'],\n flip_method = cfg['flip_method']),\n cv2.CAP_GSTREAMER)\n else:\n state['cap'] = cv2.VideoCapture(connection)", "def create_remote_access(self, info):\n if info:\n response = self._request(\"POST\", [ROUTE_REMOTE_ACCESSES], {\n \"type\": info[\"type\"],\n \"address\": info[\"address\"],\n \"port\": info[\"port\"],\n \"login\": info[\"login\"],\n \"password\": info[\"password\"],\n \"key\": info[\"key\"],\n \"node\": info[\"node\"]\n })\n logging.debug(\"Create connexion remote access::{}\".format(response.text))\n if self.verif_response(response):\n logging.info('remote access successfully created {}'.format(info[\"address\"]))\n return CBWParser().parse_response(CBWRemoteAccess, response)\n\n logging.error(\"Error create connection remote access\")\n return False", "def live():\n m = camera.status.mode\n print \"Hit ^C to exit.\"\n print \"NOTE! After using this command, type: mode('%s') \" % m\n mode('centre')\n try:\n while True:\n f = camera.GetFits()\n camera.status.update()\n setheaders(f)\n camera.status.lastact = time.time() #Record the time that the last image was taken\n xpa.displayimage(f)\n except KeyboardInterrupt:\n logger.error(\"Live mode aborted, dumping image.\")\n finally:\n mode(m) #Restore original camera mode (hopefully)", "def preview_camera(self):\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Previewing the camera. Press the Enter key to exit.')\n self.buttonPreview.setText('Press Enter\\nto finish.')\n self.comboCamera.setEnabled(False)\n self.buttonSelectColor.setEnabled(False)\n self.buttonLogSet.setEnabled(False)\n cap = webcam.initiate_camera(self.comboCamera.currentIndex())\n while True:\n _, frame = cap.read()\n frame = np.rot90(frame, self.comboRotation.currentIndex())\n cv2.imshow('Camera Preview', frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('\\r'):\n break\n cap.release()\n cv2.destroyAllWindows()\n self.buttonPreview.setText('Preview')\n self.comboCamera.setEnabled(True)\n self.buttonSelectColor.setEnabled(True)\n self.buttonLogSet.setEnabled(True)\n self.statusbar.clearMessage()", "def preview(self,*args,**kwargs):\n self.cam.start_preview(*args,**kwargs)", "def testSetRequest(self):\n self.mgr.sendGoProCommand = Mock()\n value = struct.pack('<HH', 8, 22)\n self.mgr.handlePacket(app_packet.GOPRO_SET_REQUEST, value)\n self.mgr.sendGoProCommand.assert_called_with( 8, (22, 0, 0, 0) )", "async def set_bit(self, instance, value):\n print(f\"Server: {'set_bit'} Got 'put' request from outside: new value is {value} and type {type(value)}\")\n if self.device is not None:\n self.device.set_bit_server(value)\n else:\n print('device is None')", "def update(self):\n # Get frame from video source:\n ret, frame = self.vid.read()\n\n if ret:\n # Convert the captured frame into grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.equalizeHist(gray)\n\n # Get all faces from the video frame\n faces = self.faceCascade.detectMultiScale(gray, 1.2,5)\n\n # For each face in faces\n for (x, y, w, h) in faces:\n # Create rectangle around the face\n cv2.rectangle(frame, (x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)\n\n # Recognize the face belongs to which ID\n Id = self.recognizer.predict(gray[y:y+h,x:x+w])\n\n ### IDENTIFICATION & SOCKET CODE GOES HERE\n if Id[0] == self.user_id:\n # If the target face is found 10 times then access is granted\n self.identification_count += 1\n if self.identification_count > 10:\n self.master.switch_frame(AccessGranted)\n\n name_to_put = self.user_name\n else:\n name_to_put = \"Unknown - Access Denied\"\n\n # Put text describe who is in the picture\n cv2.rectangle(frame, (x-22,y-90), (x+w+22, y-22), (0,255,0), -1)\n cv2.putText(frame, str(name_to_put), (x,y-40), self.font, 2, (255,255,255), 3)\n\n self.after(50, self.update)", "def initialize(acquisition_mode, image_mode, save_path, save_nickname=True):\r\n\r\n # get the system and cameras using library\r\n system = PySpin.System.GetInstance()\r\n cameras = system.GetCameras()\r\n num_cams = cameras.GetSize()\r\n print(\"Cameras detected: \" + str(num_cams))\r\n \r\n serial_numbers = []\r\n # initialize the cameras and get their serial numbers\r\n for i, cam in enumerate(cameras):\r\n cam.Init()\r\n serial_numbers.append(cam.TLDevice.DeviceSerialNumber.GetValue())\r\n print(\"Initialized cameras.\")\r\n del cam\r\n \r\n # set acquisition and format mode \r\n # manual and timed modes acquire images in single frame mode\r\n # note that to use continuous mode in a six camera setup, the packet delay must be set to 10,000 in FlyCap2\r\n for i, cam in enumerate(cameras):\r\n if acquisition_mode == CONTINUOUS_MODE:\r\n cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)\r\n else:\r\n cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_SingleFrame)\r\n cam.PixelFormat.SetValue(image_mode)\r\n del cam\r\n \r\n # create the folders required for saving\r\n # create a new folder for the date/time\r\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d %Hhr %Mmin %Ss')\r\n time_path = save_path + \"\\\\Camera Run \" + timestamp\r\n os.mkdir(time_path)\r\n file_path = time_path\r\n # create the camera folders\r\n # save nicknames is enabled - use the camera numbers\r\n if save_nickname:\r\n camera_numbers = range(1, num_cams+1)\r\n # use the camera serial numbers for naming instead\r\n else:\r\n camera_numbers = serial_numbers\r\n for number in camera_numbers:\r\n cam_path = time_path + \"\\\\Camera_\" + str(number)\r\n os.mkdir(cam_path)\r\n\r\n return (system, cameras, file_path)", "def start_camera(self):\n # create the video capture thread\n self.thread = VideoThread()\n # connect its signal to the update_image slot\n self.thread.change_pixmap_signal.connect(self.update_image)\n # start the thread\n self.thread.start()", "def set_immobilizer(immobilizer_var):\n if immobilizer_var == 1 :\n r = requests.put(HOST + 'immobilizer/engage', cert='pixelcamp.pem')\n if immobilizer_var == 0 :\n r = requests.put(HOST + 'immobilizer/disengage', cert='pixelcamp.pem')\n\n return (r.status_code, get_car())", "def do(self, argin):\n device=self.target\n # Code here\n errs = [] # list of error messages\n receptor_to_vcc = dict([*map(int, pair.split(\":\"))] for pair in\n device._proxy_cbf_controller.receptorToVcc)\n for receptorID in argin:\n try:\n vccID = receptor_to_vcc[receptorID]\n vccProxy = device._proxies_vcc[vccID - 1]\n\n # Update the VCC receptorID attribute:\n\n self.logger.debug( (\"receptorID = {}, vccProxy.receptorID = {}\"\n .format(receptorID, vccProxy.receptorID)))\n\n vccProxy.receptorID = receptorID # TODO - may not be needed?\n\n self.logger.debug( (\"receptorID = {}, vccProxy.receptorID = {}\"\n .format(receptorID, vccProxy.receptorID)))\n\n subarrayID = vccProxy.subarrayMembership\n\n # only add receptor if it does not already belong to a different subarray\n if subarrayID not in [0, device._subarray_id]:\n errs.append(\"Receptor {} already in use by subarray {}.\".format(\n str(receptorID), str(subarrayID)))\n else:\n if receptorID not in device._receptors:\n # change subarray membership of vcc\n vccProxy.subarrayMembership = device._subarray_id\n\n # TODO: is this note still relevant? \n # Note:json does not recognize NumPy data types. \n # Convert the number to a Python int \n # before serializing the object.\n # The list of receptors is serialized when the FSPs are \n # configured for a scan.\n\n device._receptors.append(int(receptorID))\n device._proxies_assigned_vcc.append(vccProxy)\n device._group_vcc.add(device._fqdn_vcc[vccID - 1])\n\n # subscribe to VCC state and healthState changes\n event_id_state = vccProxy.subscribe_event(\n \"State\",\n tango.EventType.CHANGE_EVENT,\n device._state_change_event_callback\n )\n\n event_id_health_state = vccProxy.subscribe_event(\n \"healthState\",\n tango.EventType.CHANGE_EVENT,\n device._state_change_event_callback\n )\n\n device._events_state_change_vcc[vccID] = [event_id_state,\n event_id_health_state]\n else:\n log_msg = \"Receptor {} already assigned to current subarray.\".format(\n str(receptorID))\n self.logger.warn(log_msg)\n\n except KeyError: # invalid receptor ID\n errs.append(\"Invalid receptor ID: {}\".format(receptorID))\n\n\n if errs:\n msg = \"\\n\".join(errs)\n self.logger.error(msg)\n # tango.Except.throw_exception(\"Command failed\", msg, \"AddReceptors execution\",\n # tango.ErrSeverity.ERR)\n \n return (ResultCode.FAILED, msg)\n\n message = \"CBFSubarray AddReceptors command completed OK\"\n self.logger.info(message)\n return (ResultCode.OK, message)" ]
[ "0.657208", "0.59614706", "0.5739002", "0.5577688", "0.5528365", "0.5405239", "0.54035485", "0.5352449", "0.5316245", "0.53074706", "0.5200834", "0.5174972", "0.51566505", "0.51535857", "0.50904423", "0.50857383", "0.50825894", "0.5082426", "0.507766", "0.5062548", "0.5052165", "0.50233257", "0.50120944", "0.4998169", "0.49683338", "0.49636835", "0.49560326", "0.49514472", "0.4903696", "0.48935607", "0.4884507", "0.48191416", "0.47896746", "0.47888315", "0.47859153", "0.47834003", "0.47747636", "0.47588435", "0.4739402", "0.4717487", "0.47141853", "0.4711878", "0.4709221", "0.47047928", "0.46752834", "0.46748096", "0.46737975", "0.46635175", "0.4662571", "0.46592867", "0.4658615", "0.4656998", "0.46567327", "0.46543765", "0.46525592", "0.46497935", "0.46448168", "0.4632688", "0.46303445", "0.462601", "0.46246895", "0.46214342", "0.46039996", "0.4603926", "0.45967668", "0.4583521", "0.458275", "0.45805165", "0.45786798", "0.45710203", "0.4562124", "0.45590845", "0.45570797", "0.4556246", "0.45562002", "0.45506337", "0.45456582", "0.4541555", "0.45404083", "0.45382524", "0.45336568", "0.45331734", "0.4523453", "0.4518844", "0.45130566", "0.4503475", "0.4501037", "0.4500724", "0.44865686", "0.4477568", "0.44771907", "0.44698197", "0.44697613", "0.446707", "0.44670555", "0.4466004", "0.44658673", "0.44612074", "0.44575775", "0.4452592" ]
0.689005
0
Attempts to insert the supplied genome. If the genome is inserted, this method will return True, otherwise it will return False.
def try_insert_genome(self, genome): raise Exception("called abstract insert_genome method")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inserted(self):\n return True", "def insert(self, row):\n if not self.loaded:\n print(\"Database is not loaded\")\n return False\n\n self.rows.append(row)\n return True", "def _can_insert(self, index, value):\n return not bool(self._insert_callback(index, value))", "def test_verify_insert(self):\n self._verify([self.applied_commands['insert']])", "def is_insert(self) -> bool:\n return self.statement.is_dml and self.statement.is_insert", "def can_insert(data):\n return hasattr(data, 'read')", "def insert(self, val):\n if val in self.record:\n return False\n \n self.record[val] = len(self.data)\n self.data.append(val)\n return True", "def add_genome(self, genome):\n self.genomes.append(genome)", "def insert(self, val: int) -> bool:", "def insert(self, val):\n if val in self.dic:\n return False\n else:\n self.data.append(val)\n self.dic[val]=len(self.data)-1\n return True", "def insert(self, val: int) -> bool:\n if val not in self.set:\n self.set.add(val)\n return True\n return False", "def insert_node(self, node):\n if self._is_node_reserved(node):\n return False\n\n # Put node in map\n self._node_map[node.get_id()] = node\n return True", "def insert(self, val: int) -> bool:\n if self.d.get(val):\n return False\n else:\n self.d[val] = True\n return True", "def insert(self, val):\n if val in self.d:\n return False\n self.d[val] = len(self.l)\n self.l.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.randomized_hash:\n self.randomized_hash[val].append(len(self.array))\n self.array.append(val)\n return False\n else:\n self.randomized_hash[val] = [len(self.array)]\n self.array.append(val)\n return True", "def test_0_data_insertion(self):\n s = self.fitness.insert_in_database(self.fitness_dict, date_time=self.dt1)\n self.assertTrue(s)", "def insert(self, val: int) -> bool:\n if(val not in self.randomSet):\n self.randomSet[val] = 1\n return True\n else:\n return False", "def test_insert_will_not_duplicate_value(bst_balanced):\n bst_balanced.insert(6)\n assert bst_balanced.size() == 6", "def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True", "def insert(self, val: int) -> bool:\n if val in self.map:\n return False\n index = len(self.keys)\n self.map[val] = index\n self.keys.append(val)\n return True", "def insert(self, val):\n new_item = False\n if val not in self.ds:\n self.ds.add(val)\n self.keys.append(val)\n new_item = True\n return new_item", "def insert(self):\n pass", "def has_insert(self, shape):\n for insert in self.inserts:\n if insert.shape == shape:\n return True\n return False", "def insert(self, val):\n if val in self.map:\n return False\n \n self.nums.append(val)\n self.map[val] = len(self.nums) - 1\n \n return True", "def insert(self, val):\n res = val in self.map\n idx = len(self.vec)\n if res:\n self.map[val].append(idx)\n self.vec.append(val)\n else:\n self.map[val] = [idx]\n self.vec.append(val)\n return not res", "def insert(self, val: int) -> bool:\n \n retVal = True if val not in self.map else False\n if retVal:\n self.map[val] = len(self.arr)\n self.arr.append(val)\n return retVal", "def insert(self, val):\n if val not in self.posFind or self.posFind[val] == -1:\n self.nums.append(val)\n self.posFind[val] = len(self.nums) - 1\n return True\n return False", "def insert(self, val: int) -> bool:\n if self.store_dict.get(val) != None:\n return False\n self.store_list.append(val)\n self.store_dict[val] = len(self.store_list) - 1\n return True", "def inject_genome(self, genome: Genome):\n self.population[genome.key] = genome", "def isPostInsert(self):\n raise ProofException.ProofNotImplementedException( \\\n \"IdGenerator.isPostInsert: need to be overrided.\" )", "def insert_chromosome(mutated_genome):\n index = random.randint(0,len(mutated_genome))\n if color_mode == 'RGB':\n color_red = random.randint(0,255)\n color_green = random.randint(0,255)\n color_blue = random.randint(0,255)\n color = (color_red, color_blue, color_green)\n else: #color_mode == 'L':\n color = random.randint(0,255)\n opacity = random.randint(0,255)\n points = []\n mutated_genome.insert(index, [color,opacity,points])", "def insert(tablename: str, data: dict):\n try:\n if (t := tablenameRev[tablename]) not in sequenceTables:\n return False\n db.session.add(t.new(**data))\n db.session.commit()\n updateSequence([t])\n del_cache_for_sequence_table(tablename)\n return True\n except:\n return False", "def can_insert(data):\n return isinstance(data, dict)", "def insert(self, val: int) -> bool:\n if val not in self.set:\n self.nums.append(val);\n self.set.add(val);\n return True;\n return False;", "def place_tiger(self, addr: str) -> bool:\n try:\n self.get_pos(addr).place_tiger()\n self._push_move(f\"T{addr}\")\n return True\n except Exception:\n return False", "def insert(self):\n ret = True\n\n schema = self.schema\n fields = self.depopulate(False)\n\n q = self.query\n q.set_fields(fields)\n pk = q.insert()\n if pk:\n fields = q.fields\n fields[schema.pk.name] = pk\n self._populate(fields)\n\n else:\n ret = False\n\n return ret", "def insert(self, val: int) -> bool:\n if val in self.map:\n return False\n # put in slot\n self.slot.append(val)\n # insert to map\n self.map[val] = len(self.slot) - 1\n return True", "def insert(self, val: int) -> bool:\n if val in self.idx:\n return False\n else:\n # append value into data \n self.data.append(val)\n \n # record the idx of the value in data\n self.idx[val] = len(self.data) - 1\n return True", "def insert(self, val: int) -> bool:\n if val in self.hashmap:\n return False\n self.hashmap[val] = len(self.array)\n self.array.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.l:\n return False\n self.l.add(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.map:\n return False\n self.array.append(val)\n self.map[val] = len(self.array)-1\n return True", "def insert(self, nucleotide, index):\n if nucleotide not in ['A', 'C', 'T', 'G', 'a', 'c', 't', 'g']:\n raise ValueError\n try:\n temp = list(self.__dna)\n temp[index] = nucleotide\n self.__dna = \"\".join(temp)\n except IndexError as e:\n return f\"error {e}\"", "def insert(self, val):\n if val not in self.table.keys():\n self.table[val] = len(self.ls)\n self.ls.append(val)\n return True\n return False", "def insert(self, val):\r\n if len(self.data) != self.len:\r\n self.data[self.len] = val\r\n else:\r\n self.data.append(val)\r\n if val in self.indices:\r\n self.indices[val].append(self.len)\r\n self.len += 1\r\n return False\r\n else:\r\n self.indices[val] = [self.len]\r\n self.len += 1\r\n return True", "def insert(self, val: int) -> bool:\n if val in self.d:\n return False\n self.d[val] = len(self.arr)\n self.arr.append(val) \n return True", "def insert(self, val: int) -> bool:\n # print(f\"insert {val}\")\n already = val in self.indexes\n self.items.append(val)\n self.indexes[val].add(len(self.items) - 1)\n return not already", "def insert(self, val: int) -> bool:\n if val not in self.value_set:\n self.value_set.add(val)\n self.values.append(val)\n return True\n else:\n return False", "def insert(self, val: int) -> bool:\n if val not in self.arr:\n self.arr.append(val)\n index = len(self.arr) - 1\n self.map[val] = index\n return True\n return False", "def insert_fragment_in_place(sequences, fragment):\n # First, check if the fragment fits in any of the existing sequences\n inserted = False\n for i in range(0, len(sequences)):\n sequence = sequences[i]\n LOGGER.debug(\"Looking at sequence #{} of size {}\".format(i, sequence.length()))\n inserted = sequence.insert_if_overlaps(fragment)\n if inserted:\n # If the fragment was inserted in the sequence, we can move on\n break\n\n # If we were unable to insert in an existing sequence, create a new one\n if inserted == False:\n LOGGER.debug(\"Unable to fit this fragment in an existing sequence. Creating a new sequence\")\n sequence = Sequence()\n sequence.append(fragment)\n sequences.append(sequence)\n LOGGER.debug(\"{} sequences now in the array\".format(len(sequences)))\n else:\n LOGGER.debug(\"{} fragments now in the sequence\".format(sequence.length()))", "def insert(self, row, col, value):\n if self.valid_square(row, col, value) or value == 0:\n self.puzzle[row][col] = value\n return True\n return False", "def test_insert_adds_value_to_tree(bst_balanced):\n bst_balanced.insert(15)\n assert bst_balanced.contains(15) is True\n assert bst_balanced.search(15).val == 15", "def insert(self, index, chromosome):\n self.chromosome_list.insert(index, to_chromosome(chromosome))", "def _insert_node(node, before_node=None, after_node=None, in_index=0):\n\n if not before_node and not after_node:\n log.info('Nowhere to insert %s to.' % node.path())\n return False\n\n if before_node:\n log.debug('Inserting %s before %s' % (node.path(), before_node.path()))\n before_node_input = None\n before_node_inputs = before_node.inputs()\n if not before_node_inputs:\n return False\n before_node_input = before_node_inputs[0]\n\n node.setInput(in_index, before_node_input)\n before_node.setInput(in_index, node)\n return True\n\n if after_node:\n log.debug('Inserting %s after %s' % (node.path(), after_node.path()))\n after_node_output = None\n after_node_outputs = after_node.outputs()\n if not after_node_outputs:\n node.setInput(in_index, after_node)\n return True\n\n after_node_output = after_node_outputs[0]\n\n after_node_output.setInput(in_index, node)\n node.setInput(in_index, after_node)\n return True\n\n return False", "def _insert_if_possible(self, query, values):\n try:\n self.cursor.execute(query, values)\n self.cnx.commit()\n return True\n except mysql.connector.errors.IntegrityError:\n self.cnx.rollback()\n return False", "def insert(self, key, value):\n\n if None == self.root:\n self.root = BSTNode(key,value)\n return True\n current_node = self.root\n while current_node:\n if key == current_node.key:\n print(\"The key does exist!\")\n return False\n elif key < current_node.key:\n if current_node.left:\n current_node = current_node.left\n else:\n current_node.left = BSTNode(key, value, current_node)\n return True\n else:\n if current_node.right:\n current_node = current_node.right\n else:\n current_node.right = BSTNode(key,value,current_node)\n return True", "def test_binarytree_insert_exists(empty_list):\n assert empty_list.insert(42)", "def insert(self):\n self.getDbRecord().insert()\n\n return", "def IsInsertedPoint(self, ):\n ...", "def insert(self, val: int) -> bool:\n \n self.items.append(val)\n self.ind[val].add(len(self.items)-1)\n return len(self.ind[val]) == 1", "def test_insert_no_value(tree):\n with pytest.raises(TypeError):\n assert tree.insert()", "def insertLast(self, value):\n if not self.isFull():\n self._data.append(value)\n return True\n else:\n return False", "def test_insert_must_be_a_number(bst_empty):\n with pytest.raises(TypeError):\n bst_empty.insert(\"dfsdfadgasdg\")", "def insert(self, item):\r\n if not self.is_full():\r\n for i in range(1,len(self.items)):\r\n if self.items[i] is None:\r\n self.items[i] = item\r\n self.size += 1\r\n self.perc_up(i)\r\n return True\r\n return False", "def insert(self, val: int) -> bool:\n if val in self.data:\n return False\n self.data[val] = None\n self.total += 1\n return True", "def insert(self, val: int) -> bool:\n if val not in self.dict:\n self.dict[val] = len(self.arr)\n self.arr.append(val)\n return True\n return False", "def insert(self, val: int) -> bool:\n self.dict[val].add(len(self.arr))\n self.arr.append(val)\n return len(self.dict[val]) == 1", "def insert_record(self, record, session):\n try:\n session.add(record)\n session.commit()\n session.close()\n return True\n except:\n\n logging.exception(\"http record cannot be added to db \" \":Time: \" + str(datetime.datetime.now()))\n return False", "def insert(self, val):\n if val not in self.table.keys():\n self.table[val] = 0\n return True\n return False", "def insert(self, val: int) -> bool:\n if val not in self.counts:\n self.counts[val] = 1\n return True\n else:\n self.counts[val] += 1\n return False", "def insert(self, val: int) -> bool:\n        if val not in self.hashmap:\n            self.list.append(val)\n            self.hashmap[val]=len(self.list)-1\n            return True\n        else:\n            return False\n            ", "def insert(self, val: int) -> bool:\n if val not in self._dict:\n self._dict[val] = len(self._array)\n self._array.append(val)\n return True\n return False", "def insert(self, data):\r\n pass", "def insert(self, val):\n # if it already exists return error\n if val in self.inds:\n return False\n # record the index and save the number\n self.inds[val] = len(self.nums)\n self.nums.append(val) \n return True", "def insert(self, val: int) -> bool:\n self.elements.append(val)\n self.idx[val].add(len(self.elements) - 1)\n return len(self.idx[val]) == 1", "def insert(self, val: int) -> bool:\n if val in self.val2i: return False\n if self.size == len(self.array): self.array.append(val)\n else: self.array[self.size] = val\n self.val2i[val] = self.size\n self.size += 1\n #print(self.size)\n return True", "def isPriorToInsert(self):\n raise ProofException.ProofNotImplementedException( \\\n \"IdGenerator.isPriorToInsert: need to be overrided.\" )", "def can_insert(data):\n if not isinstance(data, np.ndarray):\n return False\n if data.dtype.char in UNSUPPORTED_NUMERIC_TYPE_CODES:\n return False\n return np.issubdtype(data.dtype, np.number)", "def insert(self, position, insert):\n assert all(new in self.ALPHABET for new in insert)\n if position < 1 or position - 1 > len(self.sequence):\n raise ValueError(f\"Insertion position {position} out of bonds for given sequence.\")\n self.sequence = f\"{self.sequence[: position - 1]}{insert}{self.sequence[position:]}\"\n if \"mutations\" in self.metadata.keys():\n self.metadata[\"mutations\"] += f\" ins{position}{insert}\"\n else:\n self.metadata[\"mutations\"] = f\"ins{position}{insert}\"", "def insert(self, val: int) -> bool:\n value = val not in self.container\n self.container.add(val)\n return value", "def insert_chromosome(self, chromosome, index):\n if chromosome is None:\n #No class is schedule in that time slot \n if self.chromo_list[index] is None:\n #Create new list with empty chromosome object\n new_list = [Chromsome()]\n #Assigns new_chromo the empty chromosome object\n new_chromo = new_list[0]\n #Inserts the new list into the master chromosome list\n self.chromo_list.insert(index,new_list)\n \n \n #Class is already scheduled in the time slot\n else:\n #Get the existing list\n exist_list = self.chromo_list[index]\n #Append empty chromosome object to end of list\n exist_list.append(Chromosome())\n #Assigns new_chromo the empty chromosome object\n new_chromo = exist_list[-1]\n #Sets overlap to be true because another class is\n #scheduled at the same time\n new_chromo.overlap = True\n #Reassigns the list in chromo_list\n self.chromo_list[index] = exist_list\n\n #Returns pointer to the inserted chromosome\n return new_chromo\n\n else:\n #No class is schedule in that time slot \n if self.chromo_list[index] is None:\n #Inserts the existing chromosome into an empty list\n new_list = [chromosome]\n #Assigns new_chromo to the inserted object\n new_chromo = new_list[0]\n #Inserts the new list into the master chromosome list\n self.chromo_list[index] = new_list\n\n #Class is already scheduled in the time slot\n else:\n #Gets the existing list\n exist_list = self.chromo_list[index]\n #Adds the existing chromosome to the end of the existing list\n exist_list.append(chromosome)\n #Assigns new_chromo to the newly inserted chromosome\n new_chromo = exist_list[-1]\n #Inserts the existing list back into the master chromosome list\n self.chromo_list[index] = exist_list\n\n #Returns pointer to the chromosome that has been inserted\n return new_chromo", "def test_9_incorrect_database(self):\n fitness = Fitness(None, self.user_id_1)\n s = fitness.insert_in_database(self.fitness_dict, date_time=self.dt1)\n self.assertFalse(s)", "def test_insert(self):\n c = city.City(name=\"Freiburg\")\n p1 = city.Citizen(name=\"Peter\")\n p2 = city.Citizen(name=\"Georg\")\n c.add(p1, p2, rel=city.hasInhabitant)\n\n with DataspaceSession(URI) as session:\n wrapper = city.CityWrapper(session=session)\n wrapper.add(c)\n session.commit()\n\n check_state(self, c, p1, p2, db=DB)", "def insert(self, val: int) -> bool:\n if val not in self.dic:\n self.lst.append(val)\n self.dic[val] = len(self.lst) - 1\n return True", "def insert(self, val: int) -> bool:\n if val in self.dict:\n return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.dict:\n return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.dict:\n return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.dict:\n return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.dict:\n return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n return True", "def add_new_element_to_store(entry_sequence, element, is_propagated_call=False):\n\t\tglobal board, node_id\n\t\tsuccess = False\n\t\ttry:\n\t\t\tboard[int(entry_sequence)] = element\n\t\t\tsuccess = True\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn success", "def test_insert_on_empty(empty_trie):\n empty_trie.insert('hello')\n assert 'h' in empty_trie.root.keys()\n assert empty_trie.size == 1", "def test_insert_if_node_value_exist(balanced_3_nodes):\n with pytest.raises(ValueError):\n balanced_3_nodes.insert(10)", "def insertFront(self, value):\n if not self.isFull():\n self._data.insert(0,value)\n return True\n else:\n return False", "def test_double_insert_on_empty(empty_trie):\n empty_trie.insert('hello')\n empty_trie.insert('hello')\n assert empty_trie.size == 1", "def insert(self, val):\n if val not in self.dict_val:\n self.dict_val[val] = len(self.list_val)\n self.list_val.append(val)\n return True\n return False", "def __check_write_success_insert(self, data, path):\n with open(path, 'r') as f:\n for line in f:\n if len(line) > 1:\n last_line = line\n if last_line:\n if data[\"row_id\"] == json.loads(line)[\"row_id\"] and data[\"data\"] == json.loads(line)[\"data\"]:\n return True\n return False", "def push(self):\n return False", "def test_linked_list_insert_exists():\n assert LinkedList.insert", "def insert(self, val: int) -> bool:\n if val in self._dict:\n return False\n \n self._dict[val] = len(self._list)\n self._list.append(val)\n return True", "def _insert_if_new(cursor, table, data, **kwargs):\n pk_only_data = _subdict(METADATA_PRIMARY_KEYS[table], data, enforce_key_presence=True)\n if not _exists(cursor, table, pk_only_data):\n log('inserting new {}...'.format(table), end='')\n result = _insert_dict(cursor, table, data, **kwargs)\n log('done.')\n return result", "def test_insert(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n\n h.insert(7)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')\n\n h.insert(10)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')\n\n h.insert(5)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')" ]
[ "0.6346422", "0.6159592", "0.60494566", "0.57980555", "0.578149", "0.57695425", "0.5667096", "0.56136864", "0.5602667", "0.5582492", "0.5562881", "0.5556916", "0.55056053", "0.54988134", "0.54925364", "0.54842436", "0.5480843", "0.547767", "0.54687303", "0.54569113", "0.5446923", "0.5444414", "0.5444406", "0.54370433", "0.54211825", "0.54141146", "0.53800344", "0.5377163", "0.5376591", "0.5349561", "0.53468335", "0.53269964", "0.53218925", "0.5319786", "0.531225", "0.5306135", "0.5292535", "0.5290208", "0.5289491", "0.52837366", "0.52833873", "0.52796686", "0.5261663", "0.52601665", "0.5255249", "0.52418387", "0.5226477", "0.52220863", "0.521538", "0.5210022", "0.5205227", "0.52004963", "0.51920974", "0.519013", "0.51708615", "0.51701075", "0.5168588", "0.51645947", "0.5160515", "0.5153928", "0.51486164", "0.5140982", "0.51306534", "0.512757", "0.51246345", "0.5107014", "0.5104891", "0.5099149", "0.5096284", "0.5095433", "0.50894344", "0.5086636", "0.50809485", "0.50723046", "0.5069095", "0.50684685", "0.5068143", "0.50657964", "0.5057685", "0.50574565", "0.5054159", "0.50371265", "0.50148135", "0.49851677", "0.49851677", "0.49851677", "0.49851677", "0.49851677", "0.49811393", "0.4978569", "0.49747798", "0.49740845", "0.49557152", "0.49500284", "0.49469474", "0.4938115", "0.4934616", "0.49250734", "0.49143305", "0.4910251" ]
0.8167088
0
list of necessary ciphers objects
def ciphers_obj(self): if self.esp_enc_alg == "ENCR_AES_GCM_16_IIV": ## BEGIN code to update return [ AES.new(self.esp_enc_key,AES.MODE_GCM, nonce=self.nonce)] ## END code to update raise UnsupportedEncAlgError(sa.esp_enc_alg, "unsupported")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_cipher():\n return OpenSSL.cipher_algo.keys()", "def ciphers(self):\n return self._ciphers", "def ciphers(self) -> Sequence[str]:\n return pulumi.get(self, \"ciphers\")", "def ciphers(self) -> Sequence[str]:\n return pulumi.get(self, \"ciphers\")", "def list_ciphers():\n global AVAILABLE_CIPHERS\n print(\"[!] Available ciphers: \")\n for i in range(len(AVAILABLE_CIPHERS)):\n print(\" ----> %s.%s\"%(i+1, AVAILABLE_CIPHERS[i]))\n exit()", "def get_all_ciphers(method):\n ssl_method = getattr(SSL, method.replace('.', '_') + '_METHOD')\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n context = SSL.Context(ssl_method)\n context.set_cipher_list(\"ALL:COMPLEMENTOFALL\")\n sock = SSL.Connection(context, sock)\n ciphers = sock.get_cipher_list()\n except SSL.Error:\n ciphers = []\n finally:\n sock.close()\n\n return ciphers", "def test_result(self):\n connection = Connection(Context(SSLv23_METHOD), None)\n ciphers = connection.get_cipher_list()\n assert isinstance(ciphers, list)\n for cipher in ciphers:\n assert isinstance(cipher, str)", "def ciphers(self, ciphers):\n\n self._ciphers = ciphers", "def cipher_feedback(self):", "def test_set_cipher_list(self, context, cipher_string):\n context.set_cipher_list(cipher_string)\n conn = Connection(context, None)\n\n assert \"AES128-SHA\" in conn.get_cipher_list()", "def _sanity_check_ciphers(self, other):\n if not cipherfactory.tripleDESPresent:\n other.cipherNames = other.cipherNames[:]\n self._remove_all_matches(other.cipherNames, \"3des\")\n\n if not other.cipherNames:\n raise ValueError(\"No supported ciphers\")", "def operate_cipher(self):", "def __init__(self):\n self._profile = []\n self._ciphers = ''", "def org_apache_felix_https_jetty_ciphersuites_included(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_ciphersuites_included", "def __init__(self):\n super(BasicAuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1\n self._ciphers = ':'.join((\n 'AES128-SHA',\n 'DES-CBC3-SHA',\n 'AES256-SHA',\n 'DHE-DSS-DES-CBC3-SHA',\n 'DHE-RSA-DES-CBC3-SHA',\n 'DH-DSS-AES128-SHA',\n 'DH-RSA-AES128-SHA',\n 'DHE-DSS-AES128-SHA',\n 'DHE-RSA-AES128-SHA',\n 'DH-RSA-AES256-SHA',\n 'DHE-DSS-AES256-SHA',\n 'DHE-RSA-AES256-SHA',\n ))", "def test_set_cipher_list_no_cipher_match(self, context):\n with pytest.raises(Error) as excinfo:\n context.set_cipher_list(b\"imaginary-cipher\")\n assert excinfo.value.args[0][0] in [\n # 1.1.x\n (\n \"SSL routines\",\n \"SSL_CTX_set_cipher_list\",\n \"no cipher match\",\n ),\n # 3.0.x\n (\n \"SSL routines\",\n \"\",\n \"no cipher match\",\n ),\n ]", "def test_preferred_cipher(host, method):\n ssl_method = getattr(SSL, method.replace('.', '_') + '_METHOD')\n context = SSL.Context(ssl_method)\n context.set_cipher_list(\"ALL:COMPLEMENTOFALL\")\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock = SSL.Connection(context, sock)\n sock.connect(host.address)\n\n headers = make_request(sock, host.server)\n\n preferred = sock.cipher()\n host.report_preferred(method, preferred[0], preferred[2])\n except SSL.Error as e:\n pass\n finally:\n sock.close()", "def test_get_cipher_bits_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_bits() is None", "def get_cipher_bits(sock):\n cipher = binding_lib.SSL_get_current_cipher(sock._ssl)\n if cipher == binding_ffi.NULL:\n return None\n\n return binding_lib.SSL_CIPHER_get_bits(cipher, binding_ffi.NULL)", "def __init__(self):\n super(TLS12AuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1_2\n self._ciphers = ':'.join((\n 'AES128-SHA256',\n 'AES256-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-DSS-AES128-SHA256',\n 'DH-RSA-AES128-SHA256',\n 'DHE-DSS-AES128-SHA256',\n 'DHE-RSA-AES128-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-RSA-AES256-SHA256',\n 'DHE-DSS-AES256-SHA256',\n 'DHE-RSA-AES256-SHA256',\n 'ECDH-ECDSA-AES128-SHA256',\n 'ECDH-ECDSA-AES256-SHA256',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n 'ECDH-RSA-AES128-SHA256',\n 'ECDH-RSA-AES256-SHA384',\n 'ECDHE-RSA-AES128-SHA256',\n 'ECDHE-RSA-AES256-SHA384',\n 'ECDHE-ECDSA-AES128-GCM-SHA256',\n 'ECDHE-ECDSA-AES256-GCM-SHA384',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n ))", "def print_cipher_certificate(secure_client):\n cert = secure_client.getpeercert()\n #print(\"Ciphers offered to the Mail Server During Negotiations: {}\\r\\n\".format(secure_client.shared_ciphers()))\n print(\"Cipher in use for this TLS Connection: {} \\r\\n\".format(secure_client.cipher()))\n print(\"Certificate is Issued By: {} \\r\\n\".format(cert[\"issuer\"]))\n print(\"Certificate covers the following Domains: {}\\r\\n\".format(cert[\"subjectAltName\"]))", "def __init__(self):\n self._init_key_settings()\n self._init_misc_extensions()\n self.minVersion = (3, 1)\n self.maxVersion = (3, 4)\n self.versions = [(3, 4), (3, 3), (3, 2), (3, 1)]\n self.cipherNames = list(CIPHER_NAMES)\n self.macNames = list(MAC_NAMES)\n self.keyExchangeNames = list(KEY_EXCHANGE_NAMES)\n self.cipherImplementations = list(CIPHER_IMPLEMENTATIONS)", "def keyed_wheel_cipher(key, pool=None):\n if pool is None:\n pool = ascii_uppercase + digits\n original_pool = {}\n original_pool = list(pool)\n keyed_pool = makealphabet(key)\n # print(keyed_pool)\n return dict(zip(keyed_pool, original_pool))", "def select_cipher(supported: dict, proposed: dict) -> Tuple[str, int]:\n\n common_ciphers = set(supported.keys()).intersection(proposed.keys())\n\n cipher = None\n key_size = -1\n\n if common_ciphers != set():\n for c in common_ciphers:\n current_keysize = max(\n # -1 will be the max value if the intersection is empty\n set([-1]).union(set(supported.get(c)).intersection(proposed.get(c))))\n if current_keysize > key_size:\n key_size = current_keysize\n cipher = c\n\n if not cipher or key_size == -1:\n raise ValueError(\n 'Could not agree on a cipher')\n\n return (cipher, key_size)", "def get_session_algorithms(self): # real signature unknown; restored from __doc__\n return \"\"", "def choose_cipher(cls):\n while True:\n\n crypt = input(\"Would you like to encrypt or decrypt?\").lower()\n print(crypt)\n if (crypt != \"encrypt\") and (crypt != \"decrypt\"):\n crypt = 0\n print(\"Invalid Selection\")\n else:\n break\n\n while True:\n\n cipher_choice = input(\"Select Cipher: \\n\"\n \"A) Affine\\n\"\n \"B) Atbash\\n\"\n \"C) Keyword\\n\"\n ).lower()\n\n if cipher_choice == (\"a\" or \"a)\" or \"affine\"):\n cipher_choice = \"affine\"\n break\n elif cipher_choice == (\"b\" or \"b)\" or \"atbash\"):\n cipher_choice = \"atbash\"\n break\n elif cipher_choice == (\"c\" or \"c)\" or \"keyword\"):\n cipher_choice = \"keyword\"\n break\n\n else:\n print(\"Invalid Selection\")\n while True:\n message = input(\"Input your message: \")\n if (len(message) >= 1):\n break\n else:\n print(\"Invalid Message\")\n while True:\n otp = input(\"Enter one time pad: \")\n if crypt == \"encrypt\" or crypt == \"e\":\n if (len(message) % 5):\n otp_length = (len(message) + (5 - (len(message) % 5)))\n else:\n otp_length = (len(message))\n if len(otp) >= otp_length:\n break\n else:\n print(\"otp for this message must be \"\n \"{} characters long\".format(otp_length))\n else:\n break\n return cls(crypt, cipher_choice, otp, message)", "def poodle():\n with settings(hide('everything'), warn_only=True):\n check_accept = re.compile(r'\\bCipher\\sis\\b')\n check_cypher = re.compile(r'\\bCipher\\s*:\\s0000\\b|'\n r'\\bCipher\\s*:\\s\\(NONE\\)\\b')\n try:\n result = 'Poodle '\n timeout = run('which timeout')\n ssl2test = run('%s 3 openssl s_client -ssl2 -connect localhost:443'\n % timeout)\n if check_accept.search(ssl2test):\n check_ssl2 = check_cypher.search(ssl2test)\n if check_ssl2:\n result += 'SSLv2: OK'\n else:\n result += 'SSLv2: VULNERABLE'\n else:\n result += 'SSLv2: OK'\n ssl3test = run('%s 3 openssl s_client -ssl3 -connect localhost:443'\n % timeout)\n if check_accept.search(ssl2test):\n check_ssl3 = check_cypher.search(ssl3test)\n if check_ssl3:\n result += ' / SSLv3: OK'\n else:\n result += ' / SSLv3: VULNERABLE'\n else:\n result += ' / SSLv3: OK'\n print(\"%s: %s\" % (env.host, result))\n logging.warning(\"%s: %s\" % (env.host, result))\n except Exception as e:\n logging.warning('%s: Error: %s' % (env.host, e.message))", "def cipher_suite(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cipher_suite\")", "def ssl_cipher(self) -> str:\n return pulumi.get(self, \"ssl_cipher\")", "def create_encryptors():\r\n\r\n chars = list('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!#$%&\\()*+-:;<=>@[]^_`{}~.,?')\r\n almost = ''.join(random.choice(chars) for _ in range(0, random.randint(3, 6)))\r\n if almost[-1] == \".\" or almost[-1] == \"?\" or almost[-1] == \",\":\r\n almost = almost[:-1]\r\n almost += random.choice([\"'}\", \"{@\"])\r\n return almost", "def _copy_cipher_settings(self, other):\n other.cipherNames = self.cipherNames\n other.macNames = self.macNames\n other.keyExchangeNames = self.keyExchangeNames\n other.cipherImplementations = self.cipherImplementations\n other.minVersion = self.minVersion\n other.maxVersion = self.maxVersion\n other.versions = self.versions", "def get_cipher_block(cipher_text): # 4 Blocks 16 bit each\n cipher_block = []\n [cipher_block.append(int(cipher_text[i:i + 4], 16)) for i in range(0, len(cipher_text), 4)]\n return cipher_block", "def org_apache_felix_https_jetty_ciphersuites_excluded(self) -> ConfigNodePropertyArray:\n return self._org_apache_felix_https_jetty_ciphersuites_excluded", "def Generar_Claves():\n salida=Keypp()\n savekey(salida)\n savecomp(salida)", "def _available_algorithms(**_: str) -> Set[str]:\n avail = set()\n pass2 = set()\n for algo in hashlib.algorithms_available:\n lalgo = algo.lower()\n if \"with\" in lalgo:\n continue # skip apparently redundant ones\n if lalgo != algo:\n pass2.add(algo)\n else:\n avail.add(lalgo)\n for algo in pass2:\n if algo.lower() not in avail:\n avail.add(algo)\n return avail", "def get_next_conf_keys(self):\n C_List = []\n for key in self.Poss_Tree:\n key_c = int(str(key)[-1])\n for choice in self.Poss_Tree[key]:\n if choice == key_c:\n C_List.append(int(construct_pass(key, choice)))\n return C_List", "def break_cipher( self, ciphertexts ):\n\n\t\ttruncated = self.__truncate(ciphertexts)\n\t\tciphertexstring = b''.join( truncated )\n\n\t\tkeystream_size = len( truncated[0] )\n\n\t\ttransposed = self.__transpose( ciphertexstring, keystream_size )\n\n\t\txor_key = b''\n\t\tfor block in transposed:\n\t\t\thex_block = hexlify( block )\n\t\t\txor_key += bytes( [ ord( break_xor_single_byte_by_freq(hex_block) ) ] )\n\n\t\treturn xor_key", "def __get_cipher(self):\n return Fernet(open(self.__key_file, 'rb').read())", "def test_get_cipher_name_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_name() is None", "def certificates(self):\n if self.user.is_superuser:\n return Certificate.objects.all()\n else:\n return Certificate.objects.filter(licensee__in=self.licensees.all())", "def test_compatibility(cipher, mode):\n\n chiper_obj = cipher_params(cipher, os.urandom(length_by_cipher[cipher]))[0] #need to be object, not interface, to validate_for_algorithm work\n if chiper_obj.name == \"ChaCha20\":\n return True\n mode_object = None\n if mode == 'CBC':\n mode_object = modes.CBC(os.urandom(16))\n elif mode == 'GCM':\n mode_object = modes.GCM(os.urandom(16), os.urandom(16))\n else:\n return False\n\n return default_backend().cipher_supported(chiper_obj, mode_object)", "def list_encryption_type(self, search_opts=None):\n return [self._unify_encryption_type(encryption_type)\n for encryption_type in self._impl.list_encryption_type(\n search_opts=search_opts)]", "def eul59():\n cipher = [c.rstrip('\\n') for c in open('resources/p059_cipher.txt')]\n cipher = [int(c) for c in cipher[0].split(\",\")]\n for key in itertools.product(range(97, 123), repeat=3):\n msg = [x ^ y for x, y in zip(cipher, itertools.cycle(key))]\n if ' the ' in ''.join(map(chr, msg)):\n return sum(msg)", "def list_cas():\n cas = []\n for ca in settings.ACM_PRIVATE_CA_SETTINGS:\n _ca = get_ca(ca)\n cas.append(_ca.get_certificate_authority_certificate())\n return cas", "def caesar_cipher(self):\n chars = list(self.CHARS * 2)\n data = \"\"\n crypt_operator = add if self.crypt_type == \"encrypt\" else sub\n\n for character in self.file_data:\n try:\n index = crypt_operator(chars.index(character), self.key)\n data += chars[index]\n except (ValueError, TypeError) as e:\n logger.error(e)\n print(f\"Invalid character '{character}' in file {self.file}.\")\n logger.info(f\"data: {data}\")\n return data", "def circuit_list(self):\r\n return self.circuits.itervalues()", "def test_possible_keys(ciphertext):\n\n\ttop_keys = []\n\n\tfor num in range(0, 255, 1):\n\t\tdecrypted_str = xor_decrypt(ciphertext, num)\n\t\tscore = count_score(decrypted_str)\n\t\tif score > 80:\n\t\t\ttop_keys.append(chr(num))\n\n\treturn top_keys", "def test_ssl_object_attributes(self) -> None:\n self.start_dummy_server()\n\n sock = socket.create_connection((self.host, self.port))\n with SSLTransport(\n sock, self.client_context, server_hostname=\"localhost\"\n ) as ssock:\n cipher = ssock.cipher()\n assert type(cipher) == tuple\n\n # No chosen protocol through ALPN or NPN.\n assert ssock.selected_alpn_protocol() is None\n assert ssock.selected_npn_protocol() is None\n\n shared_ciphers = ssock.shared_ciphers()\n # SSLContext.shared_ciphers() changed behavior completely in a patch version.\n # See: https://github.com/python/cpython/issues/96931\n assert shared_ciphers is None or (\n type(shared_ciphers) is list and len(shared_ciphers) > 0\n )\n\n assert ssock.compression() is None\n\n validate_peercert(ssock)\n\n ssock.send(sample_request())\n response = consume_socket(ssock)\n validate_response(response)", "def encryptor(iv = os.urandom(16), key = os.urandom(32), bc = backend,key_type = 'AES128',mode='CBC'):\n\tif key_type == 'AES128':\n\t\talgo = algorithms.AES(key)\n\telif key_type == 'ChaCha20':\n\t\talgo = algorithms.ChaCha20(key,nonce=os.urandom(32))\n\telse:\n\t\traise('Error algorithm ' + key_type + ' not supported!')\n\tif mode == 'CBC':\n\t\tmode = modes.CBC(iv)\n\telif mode == 'GCM':\n\t\tmode = modes.GCM(iv)\n\telse :\n\t\traise('Error mode ' + mode + ' not supported!')\n\tcipher = Cipher(algo,mode,backend = bc)\n\treturn iv,key,cipher.encryptor()", "def test_kyc_get_legal_list(self):\n pass", "def get_cryptomatte_names(self):\n return [self.cryptomattes[x][\"name\"] for x in self.cryptomattes]", "def cbc_encrypt(pt_bin_list, keys, rounds):\n bsize = len(pt_bin_list[0])\n ivector = generate_random_binary(bsize) # Initialization Vector\n enc_result = []\n msg = pt_bin_list\n\n enc_result.append(feistel_encrypt(xor_compare(msg[0],ivector),keys[0],rounds))\n if len(msg) > 1:\n for i in range(1,len(msg)):\n enc_result.append(feistel_encrypt(xor_compare(msg[i], enc_result[i-1]),keys[i],rounds))\n enc_result.insert(0,ivector) # Store IV to the start of ciphertext\n return enc_result", "def get_product_courses(product):\n if product.content_type.model == CONTENT_TYPE_MODEL_COURSERUN:\n return [product.content_object.course]\n elif product.content_type.model == CONTENT_TYPE_MODEL_COURSE:\n return [product.content_object]\n elif product.content_type.model == CONTENT_TYPE_MODEL_PROGRAM:\n return list(\n product.content_object.courses.all().order_by(\"position_in_program\")\n )", "def find_ecb_cipher(ctexts, bsize):\n rank = dict()\n for c in ctexts:\n blocks = [c[i:i+bsize] for i in range(0, len(c), bsize)]\n count = Counter(blocks)\n top = count.most_common()[0][1]\n rank[top] = c\n return rank[max(rank.keys())]", "def compare_cow_transport_algorithms():\n all_cows = load_cows('PS1\\ps1_cow_data.txt')\n\n start = time.time()\n greedy_list = greedy_cow_transport(all_cows)\n end = time.time()\n total = end-start\n print (total, 'sec')\n print('Greedy Transport:', greedy_list)\n print('# of trips:', len(greedy_list))\n\n start = time.time()\n power_list = brute_force_cow_transport(all_cows)\n best = len(all_cows)\n for ledger in power_list:\n if len(ledger) < best:\n best = len(ledger)\n best_trip = ledger\n \n end = time.time()\n total = end-start\n print (total, 'sec')\n print('Best trip:', best_trip)\n print('# of trips:',best)", "def _get_checksum_algorithm_set(payload_info_list):\n return {d[\"checksum_algorithm\"] for d in payload_info_list}", "def _list_contexts(self):\r\n return sorted(list(self._bbreader.cache.keys()))", "def call_cipher(self):\n if self.cipher_choice == \"affine\":\n\n if self.crypt == \"encrypt\":\n encrypted_message = Affine().encrypt(self.message.upper())\n otp_encrypted = OneTimePad().encrypt(\n encrypted_message, self.otp.upper())\n return (otp_encrypted)\n\n elif self.crypt == \"decrypt\":\n otp_decrypted = OneTimePad().decrypt(\n self.message.upper(), self.otp.upper())\n decrypted_message = Affine().decrypt(otp_decrypted)\n return (decrypted_message.lower())\n\n elif self.cipher_choice == \"atbash\":\n\n if self.crypt == \"encrypt\":\n encrypted_message = Atbash().encrypt(self.message.upper())\n otp_encrypted = OneTimePad().encrypt(\n encrypted_message, self.otp.upper())\n return (otp_encrypted)\n\n elif self.crypt == \"decrypt\":\n otp_decrypted = OneTimePad().decrypt(\n self.message.upper(), self.otp.upper())\n decrypted_message = Atbash().decrypt(otp_decrypted)\n return (decrypted_message.lower())\n\n elif self.cipher_choice == \"keyword\":\n\n if self.crypt == \"encrypt\":\n encrypted_message = KeywordCipher().encrypt(self.message.upper())\n otp_encrypted = OneTimePad().encrypt(\n encrypted_message, self.otp.upper())\n return (otp_encrypted)\n\n elif self.crypt == \"decrypt\":\n otp_decrypted = OneTimePad().decrypt(\n self.message.upper(), self.otp.upper())\n decrypted_message = KeywordCipher().decrypt(otp_decrypted)\n return (decrypted_message.lower())", "def available_characterizations(self):\n return self.characterization_results.keys()", "def my_courses(self, signer):\n return list(chain(*[p.user_courses(signer=signer) for p in self.providers]))", "def tunnel1_phase2_encryption_algorithms(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"tunnel1_phase2_encryption_algorithms\")", "def keysAll():", "def test_single_cipher(host, method, cipher):\n ssl_method = getattr(SSL, method.replace('.', '_') + '_METHOD')\n context = SSL.Context(ssl_method)\n context.set_cipher_list(cipher)\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock = SSL.Connection(context, sock)\n sock.connect(host.address)\n\n headers = make_request(sock, host.server)\n\n bits = get_cipher_bits(sock)\n host.report_cipher(method, cipher, bits, HostInfo.CIPHER_ACCEPTED)\n except SSL.Error as e:\n host.report_cipher(method, cipher, -1, HostInfo.CIPHER_FAILED)\n finally:\n sock.close()", "def generate_keys(self):\n self.keys = []\n key = string_to_bit_array(self.passwd)\n key = self.permutation(key, CP_1) # Perform initial permutation on the key\n g, d = split_into_n(key, 28) # Split into g (LEFT) & d (RIGHT)\n for i in range(16): # Apply the 16 rounds\n g, d = self.shift(g, d, ROUND_KEY_SHIFT[i]) # Shift the key according to the round\n tmp = g + d # Merge them\n self.keys.append(self.permutation(tmp, CP_2)) # Perform the permutation to get the Ki", "def tunnel2_phase1_encryption_algorithms(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"tunnel2_phase1_encryption_algorithms\")", "def compare_cow_transport_algorithms():\n # TODO: Your code here\n pass", "def test_set_cipher_list_wrong_type(self, context):\n with pytest.raises(TypeError):\n context.set_cipher_list(object())", "def credential_list():\n rows = safeisland.list_certificates()\n certs = []\n for row in rows:\n# certs.append(row[\"cert\"])\n certs.append({\"uuid\": row[\"uuid\"], \"cert\": row[\"cert\"]})\n\n return {\"payload\": certs}", "def test_set_client_ca_list_errors(self):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n ctx.set_client_ca_list(\"spam\")\n with pytest.raises(TypeError):\n ctx.set_client_ca_list([\"spam\"])", "def test_get_cipher_bits(self):\n server, client = loopback()\n server_cipher_bits, client_cipher_bits = (\n server.get_cipher_bits(),\n client.get_cipher_bits(),\n )\n\n assert isinstance(server_cipher_bits, int)\n assert isinstance(client_cipher_bits, int)\n\n assert server_cipher_bits == client_cipher_bits", "def _cipher_bytes(self, cipher):\n if cipher == \"\":\n return \"00\"\n\n bytes_list = [b\"\\x00\\x04\", b\"\\x00\\x05\", b\"\\x00\\x07\", b\"\\x00\\x0a\",\n b\"\\x00\\x16\", b\"\\x00\\x2f\", b\"\\x00\\x33\", b\"\\x00\\x35\",\n b\"\\x00\\x39\", b\"\\x00\\x3c\", b\"\\x00\\x3d\", b\"\\x00\\x41\",\n b\"\\x00\\x45\", b\"\\x00\\x67\", b\"\\x00\\x6b\", b\"\\x00\\x84\",\n b\"\\x00\\x88\", b\"\\x00\\x9a\", b\"\\x00\\x9c\", b\"\\x00\\x9d\",\n b\"\\x00\\x9e\", b\"\\x00\\x9f\", b\"\\x00\\xba\", b\"\\x00\\xbe\",\n b\"\\x00\\xc0\", b\"\\x00\\xc4\", b\"\\xc0\\x07\", b\"\\xc0\\x08\",\n b\"\\xc0\\x09\", b\"\\xc0\\x0a\", b\"\\xc0\\x11\", b\"\\xc0\\x12\",\n b\"\\xc0\\x13\", b\"\\xc0\\x14\", b\"\\xc0\\x23\", b\"\\xc0\\x24\",\n b\"\\xc0\\x27\", b\"\\xc0\\x28\", b\"\\xc0\\x2b\", b\"\\xc0\\x2c\",\n b\"\\xc0\\x2f\", b\"\\xc0\\x30\", b\"\\xc0\\x60\", b\"\\xc0\\x61\",\n b\"\\xc0\\x72\", b\"\\xc0\\x73\", b\"\\xc0\\x76\", b\"\\xc0\\x77\",\n b\"\\xc0\\x9c\", b\"\\xc0\\x9d\", b\"\\xc0\\x9e\", b\"\\xc0\\x9f\",\n b\"\\xc0\\xa0\", b\"\\xc0\\xa1\", b\"\\xc0\\xa2\", b\"\\xc0\\xa3\",\n b\"\\xc0\\xac\", b\"\\xc0\\xad\", b\"\\xc0\\xae\", b\"\\xc0\\xaf\",\n b\"\\xcc\\x13\", b\"\\xcc\\x14\", b\"\\xcc\\xa8\", b\"\\xcc\\xa9\",\n b\"\\x13\\x01\", b\"\\x13\\x02\", b\"\\x13\\x03\", b\"\\x13\\x04\",\n b\"\\x13\\x05\"]\n\n counter = 1\n for bytes_values in bytes_list:\n bytes_values_str = str(bytes_values.hex())\n if cipher == bytes_values_str:\n break\n counter += 1\n\n hexvalue = str(hex(counter))[2:]\n # This part must always be two bytes.\n if len(hexvalue) < 2:\n return \"0\" + hexvalue\n else:\n return hexvalue", "def test_get_cipher_version_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_version() is None", "def decrypt(ciphertexts):\n e_x = []\n\n for i in range(3):\n c1, c2 = ciphertexts[i][x[i]]\n dec = elgamal.decrypt(c1, c2, secret_keys[i])\n e_x.append(str(bin(dec))[2:].zfill(16))\n\n return e_x", "async def ccdenylist(self, ctx):\n no_channels_msg = \"Chatchart is currently allowed everywhere in this server.\"\n channel_list = await self.config.guild(ctx.guild).channel_deny()\n if not channel_list:\n msg = no_channels_msg\n else:\n msg = \"Chatchart is not allowed in:\\n\"\n remove_list = []\n for channel in channel_list:\n channel_obj = self.bot.get_channel(channel)\n if not channel_obj:\n remove_list.append(channel)\n else:\n msg += f\"{channel_obj.mention}\\n\"\n if remove_list:\n new_list = [x for x in channel_list if x not in remove_list]\n await self.config.guild(ctx.guild).channel_deny.set(new_list)\n if len(remove_list) == len(channel_list):\n msg = no_channels_msg\n await ctx.send(msg)", "def hash_cipher(self):\n return self._digest_cipher", "def __create_cipher(self, nonce=None, iv=None):\r\n cipher = None\r\n if self.__encryption_method == EncryptionMethod.AES:\r\n if nonce is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES3:\r\n if nonce is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES:\r\n if nonce is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.SHIFT:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"Shift only supports ECB\")\r\n cipher = SimpleShiftCipher(self.__encryption_key)\r\n elif self.__encryption_method == EncryptionMethod.XOR:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"XOR only supports ECB\")\r\n cipher = SimpleXorCipher(self.__encryption_key)\r\n else:\r\n raise Exception(\"Unknown encryption method \" + str(self.__encryption_method))\r\n return cipher", "def get_codecs_list():\n for codec in CODECS_IN_FILE.iterkeys():\n print codec", "def __Cipher(self, selector):\n assert selector in self.OP_TYPES, 'Invalid selector :%s' % selector\n if selector == self.OP_ACTIVE and (len(self.ciphers.keys()) > 1 or\n not len(self.ciphers.keys())):\n assert 0, 'If both encryption and decryption used then selector must \\\n be OP_ENCRYPT or OP_DECRYPT and at least 1 must be active'\n\n cipher = None\n if selector == self.OP_ACTIVE:\n # should only be one cipher active\n cipher = self.ciphers.values()[0]\n else:\n cipher = self.ciphers.get(selector)\n # have we been created a cipher for this selector yet?\n if not cipher:\n # no, so set it up as requested\n\n # convert between AES and EVP modes\n # NOTE: AES auto-selects based on key size using the same mode, but\n # EVP requires different mode strings for each key size (in bits)\n mode = 'aes_%s_cbc' % (self.key_size * 8)\n cipher = EVP.Cipher(alg=mode,\n key=self.key_bytes,\n iv=self.IV,\n op=selector,\n padding=0)\n self.ciphers[selector] = cipher\n return cipher", "def _init_misc_extensions(self):\n self.certificateTypes = list(CERTIFICATE_TYPES)\n self.useExperimentalTackExtension = False\n self.sendFallbackSCSV = False\n self.useEncryptThenMAC = True\n self.ecdsaSigHashes = list(ECDSA_SIGNATURE_HASHES)\n self.more_sig_schemes = list(SIGNATURE_SCHEMES)\n self.usePaddingExtension = True\n self.useExtendedMasterSecret = True\n self.requireExtendedMasterSecret = False\n # PSKs\n self.pskConfigs = []\n self.psk_modes = list(PSK_MODES)\n # session tickets\n self.ticketKeys = []\n self.ticketCipher = \"aes256gcm\"\n self.ticketLifetime = 24 * 60 * 60\n self.max_early_data = 2 ** 14 + 16 # full record + tag\n # send two tickets so that client can quickly ramp up number of\n # resumed connections (as tickets are single-use in TLS 1.3\n self.ticket_count = 2\n self.record_size_limit = 2**14 + 1 # TLS 1.3 includes content type", "def __init__(self, crypt, cipher_choice, otp, message):\n self.crypt = crypt\n self.cipher_choice = cipher_choice\n self.otp = otp\n self.message = message", "def tunnel1_phase1_encryption_algorithms(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"tunnel1_phase1_encryption_algorithms\")", "def get_courses(self, *args):\n courses = []\n user = self.context['user']\n modules = user.profile.purchased_modules.all()\n for module in modules:\n course_id = self.course_in_courses(module.course.mnemo, courses)\n if course_id:\n courses[course_id[0]]['modules'].append({'mnemo': module.mnemo})\n else:\n courses.append({\n 'mnemo': module.course.mnemo,\n 'modules': [{'mnemo': module.mnemo}]\n })\n return courses", "def __init__(self, tls_1_2=None, tls_1_1=None, tls_1_0=None, ssl_3_0=None):\n self.tls_1_2 = tls_1_2\n self.tls_1_1 = tls_1_1\n self.tls_1_0 = tls_1_0\n self.ssl_3_0 = ssl_3_0", "def get_all_candidates(self) -> list:", "def tunnel2_phase2_encryption_algorithms(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"tunnel2_phase2_encryption_algorithms\")", "def assembly_courses(wall):\n courses = []\n vertices = set(wall.nodes())\n base = set(wall.nodes_where({'is_support': True}))\n\n if base:\n courses.append(list(base))\n\n seen = set()\n seen.update(base)\n\n vertices -= base\n\n while vertices:\n nbrs = set(nbr for key in courses[-1] for nbr in wall.neighbors(key))\n course = list(nbrs - seen)\n courses.append(course)\n seen.update(nbrs)\n vertices -= nbrs\n\n return courses", "async def test_setup_ssl_ciphers(\n hass: HomeAssistant, ssl_cipher_list: str, ssl_cipher_list_expected: SSLCipherList\n) -> None:\n with patch(\n \"homeassistant.components.rest.data.create_async_httpx_client\",\n return_value=MagicMock(request=AsyncMock(return_value=respx.MockResponse())),\n ) as httpx:\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"ssl_cipher_list\": ssl_cipher_list,\n }\n },\n )\n await hass.async_block_till_done()\n httpx.assert_called_once_with(\n hass,\n verify_ssl=True,\n default_encoding=\"UTF-8\",\n ssl_cipher_list=ssl_cipher_list_expected,\n )", "def show_all_certifications():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n \n certs = Cert.query.all()\n ## all possible certs...\n \n return render_template(\"certs_display.html\", certs = certs)", "def multi_encrypt(m, keys):\n pattern = []\n cipher = []\n\n for c in m:\n k = random.choice(keys)\n pattern.append(k)\n cipher.append(move_char(c, k))\n\n return \"\".join(cipher), pattern", "def get_celestrak_sections():\n return CelestrakDatabase.CELESTRAK_SELECT_SECTIONS", "def _server_select_certificate(self, settings, client_hello,\n cipher_suites, cert_chain,\n private_key, version):\n\n last_cert = False\n possible_certs = []\n\n # Get client groups\n client_groups = client_hello. \\\n getExtension(ExtensionType.supported_groups)\n if client_groups is not None:\n client_groups = client_groups.groups\n\n # If client did send signature_algorithms_cert use it,\n # otherwise fallback to signature_algorithms.\n # Client can also decide not to send sigalg extension\n client_sigalgs = \\\n client_hello. \\\n getExtension(ExtensionType.signature_algorithms_cert)\n if client_sigalgs is not None:\n client_sigalgs = \\\n client_hello. \\\n getExtension(ExtensionType.signature_algorithms_cert). \\\n sigalgs\n else:\n client_sigalgs = \\\n client_hello. \\\n getExtension(ExtensionType.signature_algorithms)\n if client_sigalgs is not None:\n client_sigalgs = \\\n client_hello. \\\n getExtension(ExtensionType.signature_algorithms). \\\n sigalgs\n else:\n client_sigalgs = []\n\n # Get all the certificates we can offer\n alt_certs = ((X509CertChain(i.certificates), i.key) for vh in\n settings.virtual_hosts for i in vh.keys)\n certs = [(cert, key)\n for cert, key in chain([(cert_chain, private_key)], alt_certs)]\n\n for cert, key in certs:\n\n # Check if this is the last (cert, key) pair we have to check\n if (cert, key) == certs[-1]:\n last_cert = True\n\n # Mandatory checks. If any one of these checks fail, the certificate\n # is not usuable.\n try:\n # Find a suitable ciphersuite based on the certificate\n ciphers = CipherSuite.filter_for_certificate(cipher_suites, cert)\n for cipher in ciphers:\n if cipher in client_hello.cipher_suites:\n break\n else:\n if client_groups and \\\n any(i in range(256, 512) for i in client_groups) and \\\n any(i in CipherSuite.dhAllSuites\n for i in client_hello.cipher_suites):\n raise TLSInsufficientSecurity(\n \"FFDHE groups not acceptable and no other common \"\n \"ciphers\")\n raise TLSHandshakeFailure(\"No mutual ciphersuite\")\n\n # Find a signature algorithm based on the certificate\n try:\n sig_scheme, _, _ = \\\n self._pickServerKeyExchangeSig(settings,\n client_hello,\n cert,\n key,\n version,\n False)\n except TLSHandshakeFailure:\n raise TLSHandshakeFailure(\n \"No common signature algorithms\")\n\n # If the certificate is ECDSA, we must check curve compatibility\n if cert and cert.x509List[0].certAlg == 'ecdsa' and \\\n client_groups and client_sigalgs:\n public_key = cert.getEndEntityPublicKey()\n curve = public_key.curve_name\n for name, aliases in CURVE_ALIASES.items():\n if curve in aliases:\n curve = getattr(GroupName, name)\n break\n\n if version <= (3, 3) and curve not in client_groups:\n raise TLSHandshakeFailure(\n \"The curve in the public key is not \"\n \"supported by the client: {0}\" \\\n .format(GroupName.toRepr(curve)))\n\n if version >= (3, 4):\n if GroupName.toRepr(curve) not in \\\n ('secp256r1', 'secp384r1', 'secp521r1'):\n raise TLSIllegalParameterException(\n \"Curve in public key is not supported \"\n \"in TLS1.3\")\n\n # If all mandatory checks passed add\n # this as possible certificate we can use.\n possible_certs.append((cipher, sig_scheme, cert, key))\n\n except Exception:\n if last_cert and not possible_certs:\n raise\n continue\n\n # Non-mandatory checks, if these fail the certificate is still usable\n # but we should try to find one that passes all the checks\n\n # Check if every certificate(except the self-signed root CA)\n # in the certificate chain is signed with a signature algorithm\n # supported by the client.\n if cert:\n cert_chain_ok = True\n for i in range(len(cert.x509List)):\n if cert.x509List[i].issuer != cert.x509List[i].subject:\n if cert.x509List[i].sigalg not in client_sigalgs:\n cert_chain_ok = False\n break\n if not cert_chain_ok:\n if not last_cert:\n continue\n break\n\n # If all mandatory and non-mandatory checks passed\n # return the (cert, key) pair, cipher and sig_scheme\n return cipher, sig_scheme, cert, key\n\n # If we can't find cert that passed all the checks, return the first usable one.\n return possible_certs[0]", "def cipher_feedback_mode_encode(msg, CEK, IV = int(0).to_bytes(8, 'big')):\n assert(len(CEK) == 32)\n assert(len(IV) == 8)\n last_block = IV\n res = b''\n for i in range(0, len(msg), 8):\n gamma = GOST2814789ECB_encode(last_block, CEK)\n block = msg[i: min(i + 8, len(msg))]\n encrypted_block = b''\n for j in range(len(block)):\n encrypted_block += int(block[j] ^ gamma[j]).to_bytes(1, 'big')\n res += encrypted_block\n last_block = encrypted_block\n return res", "def encrypt_caesar(plaintext):\n return ''.join([plain_to_cipher[old] for old in plaintext.upper()])", "def __init__(self, key, msg0503):\n enkey1 = map(ord, AES.new(key).encrypt(msg0503[:16]))\n self.cipher = AES.new(\"\".join(\n map(chr, (enkey1[i] ^ ord(msg0503[i + 16]) for i in range(16)))))\n self.encrypt_seq = random.randint(0, 0xffff)", "def main():\n # key = random(1024)\n # ciphertexts = [encrypt(key, msg) for msg in MSGS]\n\n # Get key and secret message\n knownPlain2 = \"The nice thing about Keeyloq is now we cryptographers can drive a lot of fancy cars - Dan Boneh\"\n key = strxor(ciphertexts[2], knownPlain2)\n secret = strxor(target, key)\n\n print \"Key: \" + key\n print \"Key (Hex): \" + key.encode(\"hex\")\n print \"Secret: \" + secret", "def tunnel1_phase2_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel1_phase2_encryption_algorithms\")", "def tunnel1_phase2_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel1_phase2_encryption_algorithms\")", "def get_charset_list(self):\n lst = []\n\n _lib.caca_get_dither_color_list.argtypes = [_Dither]\n _lib.caca_get_dither_color_list.restype = ctypes.POINTER(ctypes.c_char_p)\n\n for item in _lib.caca_get_dither_color_list(self):\n if item is not None and item != \"\":\n lst.append(item)\n else:\n #memory occurs otherwise\n break\n\n return lst", "def to_list(self):\n import tc\n opts_list = []\n for k, v in self.__class__.__dict__.iteritems():\n if isinstance(v, tc.TC):\n opts_list.append((k, v))\n opts_list = sorted(opts_list)\n return opts_list", "def tunnel2_phase1_encryption_algorithms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"tunnel2_phase1_encryption_algorithms\")" ]
[ "0.7565985", "0.7461285", "0.7372469", "0.7372469", "0.7340704", "0.66134185", "0.6335098", "0.63059235", "0.60204273", "0.59899426", "0.58400655", "0.5778174", "0.57402366", "0.5726471", "0.5650689", "0.56054634", "0.56048346", "0.5511376", "0.55048597", "0.5406176", "0.54003966", "0.5320266", "0.52777284", "0.5270038", "0.5221054", "0.51939213", "0.5183579", "0.51547176", "0.5147698", "0.5108888", "0.5071378", "0.5067851", "0.5064703", "0.50567377", "0.50521994", "0.50279826", "0.50217795", "0.49925968", "0.4979558", "0.49612695", "0.4954029", "0.4953171", "0.49527574", "0.49510705", "0.4948413", "0.4943252", "0.4940333", "0.4938578", "0.4938523", "0.49308705", "0.4920115", "0.4919393", "0.49189234", "0.49177033", "0.4916559", "0.49087974", "0.49082455", "0.4907284", "0.49066883", "0.48764175", "0.48730782", "0.4856628", "0.4851023", "0.4842438", "0.4840754", "0.48296228", "0.48147684", "0.48096487", "0.48006922", "0.4796095", "0.47951576", "0.4786035", "0.4784025", "0.47750583", "0.47729534", "0.47693384", "0.47689354", "0.4757671", "0.47508687", "0.47439378", "0.47429743", "0.47376546", "0.4731896", "0.47264838", "0.47227797", "0.47215903", "0.47206515", "0.4714976", "0.46924955", "0.46919498", "0.46790016", "0.46746957", "0.46552357", "0.46538663", "0.46529675", "0.4651101", "0.4651101", "0.46501356", "0.46407074", "0.46399385" ]
0.7322964
5
Creates useful information compiled from protein dictionary
def __init__(self, proteins_dict): self.proteins_dict = proteins_dict self.proteins = list(proteins_dict.keys())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def define_info_dict():\n\n d = {\n \"PRED\": {\n \"COLUMN\": [\"predicted_class\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"Predicted class: somatic, germline, artifact\",\n },\n \"PROB\": {\n \"COLUMN\": [\"prob_s\", \"prob_g\", \"prob_a\"],\n \"Number\": \"3\",\n \"Type\": \"Float\",\n \"Description\": \"Prediction probability of \"\n \"being somatic, germline, artifact in this order\",\n },\n \"SNP\": {\n \"COLUMN\": [\"is_on_db\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Present on SNP database (modified dbSNP/gnomAD (default) or user-provided database)\",\n },\n \"ANNO\": {\n \"COLUMN\": [\"annotation\"],\n \"Number\": \".\",\n \"Type\": \"String\",\n \"Description\": \"Indel annotation formatted as \"\n \"GeneSymbol|RefSeqAccession|CodonPos|IndelEffect\"\n \"Delimited by comma for multiple isoforms\",\n },\n \"COSMIC_CNT\": {\n \"COLUMN\": [\"cosmic_cnt\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"COSMIC count in v89\",\n },\n \"MAXMAF\": {\n \"COLUMN\": [\"max_maf\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Maximum minor allele frequency (MAF) \"\n \"reported in dbSNP, ClinVar and gnomAD non-cancer population\",\n },\n \"COMMON\": {\n \"COLUMN\": [\"is_common\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Common in dbSNP or MAXMAF > 0.01\",\n },\n \"CLIN\": {\n \"COLUMN\": [\"clin_info\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"ClinVar annotation formatted as ClinicalSignificance|Condition\",\n },\n \"ICP\": {\n \"COLUMN\": [\"indel_complexity\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Indel complexity: mismatches around the indel measured by edit distance\",\n },\n \"DSM\": {\n \"COLUMN\": [\"dissimilarity\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Dissimilarity: edit distance between indel and flanking sequences\",\n },\n \"ISZ\": {\n \"COLUMN\": [\"indel_size\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Indel size\",\n },\n \"REP\": {\n \"COLUMN\": [\"repeat\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Repeat: count of the indel-sequence repeats in flanking region\",\n },\n \"UQM\": {\n \"COLUMN\": [\"is_uniq_mapped\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Supported by uniquely mapped reads\",\n },\n \"NEB\": {\n \"COLUMN\": [\"is_near_boundary\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Near exon boundary\",\n },\n \"EQX\": {\n \"COLUMN\": [\"equivalence_exists\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Equivalent alignments exist for the indel\",\n },\n \"BID\": {\n \"COLUMN\": [\"is_bidirectional\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Supported by forward and reverse reads\",\n },\n \"MTA\": {\n \"COLUMN\": [\"is_multiallelic\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Multialleleic\",\n },\n \"FRM\": {\n \"COLUMN\": [\"is_inframe\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"In-frame indel\",\n },\n \"SPL\": {\n \"COLUMN\": [\"is_splice\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Located in splice region\",\n },\n \"TRN\": {\n \"COLUMN\": [\"is_truncating\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Truncating indel\",\n },\n \"CDD\": {\n \"COLUMN\": [\"is_in_cdd\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Located in conserved domain\",\n },\n \"LOC\": {\n \"COLUMN\": [\"indel_location\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Relative indel location within the transcript coding region\",\n },\n \"NMD\": {\n \"COLUMN\": [\"is_nmd_insensitive\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Insensitive to nonsense mediated decay\",\n },\n \"IPG\": {\n \"COLUMN\": [\"ipg\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Indels per gene\",\n },\n \"LEN\": {\n \"COLUMN\": [\"cds_length\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Coding sequence length. Median value if multiple isoforms exist\",\n },\n \"LC\": {\n \"COLUMN\": [\"lc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Linguistic complexity: diversity of k-mers in flanking 50-bp region\",\n },\n \"LLC\": {\n \"COLUMN\": [\"local_lc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local linguistic complexity: diversity of k-mers in flanking 6-bp region\",\n },\n \"GC\": {\n \"COLUMN\": [\"gc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"GC-content in flanking 50-bp region\",\n },\n \"LGC\": {\n \"COLUMN\": [\"local_gc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local GC-content in flanking 6-bp region\",\n },\n \"SG\": {\n \"COLUMN\": [\"strength\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"DNA bond strength of 2-mers in flanking 50-bp region\",\n },\n \"LSG\": {\n \"COLUMN\": [\"local_strength\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local DNA bond strength of 2-mers in flanking 6-bp region\",\n },\n \"INS\": {\n \"COLUMN\": [\"is_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Insertion\",\n },\n \"ATI\": {\n \"COLUMN\": [\"is_at_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single insertion of A or T\",\n },\n \"ATD\": {\n \"COLUMN\": [\"is_at_del\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single deletion of A or T\",\n },\n \"GCI\": {\n \"COLUMN\": [\"is_gc_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single insertion of G or C\",\n },\n \"GCD\": {\n \"COLUMN\": [\"is_gc_del\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single deletion of G or C\",\n },\n \"ALTC\": {\n \"COLUMN\": [\"alt_count\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Alt count: count of unique reads supporting ALT allele\",\n },\n \"REFC\": {\n \"COLUMN\": [\"ref_count\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Ref count: count of unique reads supporting REF allele\",\n },\n \"RCF\": {\n \"COLUMN\": [\"reclassified\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Reclassification applied\",\n },\n \"RQB\": {\n \"COLUMN\": [\"filtered\", \"rescued\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"Indel used to rescue this entry formatted as CHROM:POS:REF:ALT\",\n },\n }\n\n return d", "def infotodict(seqinfo):\n\n t1 = create_key('anat/sub-{subject}_run-{item:02d}_T1w')\n rest_fmri_ap = create_key('func/sub-{subject}_dir-ap_task-rest_run-{item:02d}_bold')\n rest_topup_ap = create_key('func/sub-{subject}_dir-ap_run-{item:02d}_bold')\n rest_topup_pa = create_key('func/sub-{subject}_dir-pa_run-{item:02d}_bold')\n fmap_rest_magnitude1 = create_key('fmap/sub-{subject}_run-{item:02d}_magnitude1')\n fmap_rest_phasediff = create_key('fmap/sub-{subject}_run-{item:02d}_phasediff')\n\n # Create an empty dictionary called info for each key\n\n info = {t1: [],\n rest_fmri_ap: [],\n rest_topup_ap: [],\n rest_topup_pa: [],\n fmap_rest_magnitude1: [],\n fmap_rest_phasediff: [],\n }\n\n # Loop over each sequence. Use if statements to determine which sequences should be linked to which key\n\n for idx, s in enumerate(seqinfo):\n\n if (('MPRAGE_GRAPPA2' in s.series_id) and\n ('tfl3d1_16ns' in s.sequence_name) and\n (s.dim3 == 192) and\n (s.dim4 == 1)):\n info[t1] = [s.series_id]\n\n if (('BOLD_resting 4X4X4 A>>P' in s.series_id) and\n ('epfid2d1_64' in s.sequence_name) and\n (s.dim3 == 35) and\n (s.dim4 == 190)):\n info[rest_fmri_ap] = [s.series_id]\n\n if (('rest_topup_A>>P' in s.series_id) and\n ('epse2d1_64' in s.sequence_name) and\n (s.dim3 == 140) and\n (s.dim4 == 1)):\n info[rest_topup_ap] = [s.series_id]\n\n if (('rest_topup_P>>A' in s.series_id) and\n ('epse2d1_64' in s.sequence_name) and\n (s.dim3 == 140) and\n (s.dim4 == 1)):\n info[rest_topup_pa] = [s.series_id]\n\n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n ('fm2d2r' in s.sequence_name) and\n (s.dim3 == 35) and\n (s.dim4 == 1) and\n (s.TE == 4.92)):\n info[fmap_rest_magnitude1] = [s.series_id]\n\n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n ('fm2d2r' in s.sequence_name) and\n (s.dim3 == 35) and\n (s.dim4 == 1) and\n (s.TE == 7.38)):\n info[fmap_rest_phasediff] = [s.series_id]\n\n return info", "def infotodict(seqinfo):\n \n \"\"\"\n MCF Pilot Protocol acquired on Friday April 13th\n \n >>> hdc_look.py -s mfc001 -ss 1\n series_id sequence_name series_description dim1 dim2 dim3 dim4 TR TE is_derived is_motion_corrected\n 0 1-localizer *fl2d1 localizer 192 192 3 1 0.020 5.00 False False\n 1 2-pre_Neutral1_A>>P Resting 4X4X4 *epfid2d1_64 pre_Neutral1_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 2 3-pre_topup_A>>P *epse2d1_64 pre_topup_A>>P 64 64 140 1 2.400 38.00 False False\n 3 4-pre_topup_P>>A *epse2d1_64 pre_topup_P>>A 64 64 140 1 2.400 38.00 False False\n 4 5-Field_mapping 4X4X4 A>>P *fm2d2r Field_mapping 4X4X4 A>>P 64 64 35 1 0.488 4.92 False False\n 5 6-Field_mapping 4X4X4 A>>P *fm2d2r Field_mapping 4X4X4 A>>P 64 64 35 1 0.488 7.38 False False\n 6 7-pre+heat1_A>>P 4X4X4 *epfid2d1_64 pre+heat1_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 7 8-pre_Neutral2_A>>P Resting 4X4X4 *epfid2d1_64 pre_Neutral2_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 8 9-pre+heat2_A>>P 4X4X4 *epfid2d1_64 pre+heat2_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 9 10-MPRAGE_GRAPPA2 *tfl3d1_16ns MPRAGE_GRAPPA2 256 240 192 1 2.300 2.98 False False\n 10 11-post_Neutral3_A>>P Resting 4X4X4 *epfid2d1_64 post_Neutral3_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 11 12-post+heat3_A>>P 4X4X4 *epfid2d1_64 post+heat3_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 12 13-post_Neutral4_A>>P Resting 4X4X4 *epfid2d1_64 post_Neutral4_A>>P Resting 4X4X4 64 64 35 148 2.000 25.00 False False\n 13 14-post+heat4_A>>P 4X4X4 *epfid2d1_64 post+heat4_A>>P 4X4X4 64 64 35 148 2.000 25.00 False False\n 14 15-post_topup_A>>P *epse2d1_64 post_topup_A>>P 64 64 140 1 2.400 38.00 False False\n 15 16-post_topup_P>>A *epse2d1_64 post_topup_P>>A 64 64 140 1 2.400 38.00 False False\n \n \"\"\"\n\n bids_prefix = 'sub-{subject}/{session}/'\n\n pre_neutral1_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral1_acq-epi_rec-fmap_bold.{item:01d}')\n pre_heat1_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat1_acq-epi_rec-fmap_bold.{item:01d}')\n pre_heat2_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat2_acq-epi_rec-fmap_bold.{item:01d}')\n pre_neutral2_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral2_acq-epi_rec-fmap_bold.{item:01d}')\n\n pre_neutral1_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral1_acq-epi_rec-topup_bold.{item:01d}')\n pre_heat1_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat1_acq-epi_rec-topup_bold.{item:01d}')\n pre_heat2_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preHeat2_acq-epi_rec-topup_bold.{item:01d}')\n pre_neutral2_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-preNeutral2_acq-epi_rec-topup_bold.{item:01d}')\n\n pre_topup_ap = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-preEpi_dir-ap_epi.{item:01d}')\n pre_topup_pa = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-preEpi_dir-pa_epi.{item:01d}')\n\n # The item was commented out for Phase Difference field maps. Conversion did not work correctly. I removed the item number to try to\n # isolate the problem.\n\n pre_fmap_magnitude1 = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-pre_magnitude1.{item:01d}')\n pre_fmap_phasediff = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-pre_phasediff.{item:01d}')\n\n t1w = create_key(bids_prefix + 'anat/sub-{subject}_{session}_T1w')\n\n post_neutral3_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral3_acq-epi_rec-fmap_bold.{item:01d}')\n post_heat3_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat3_acq-epi_rec-fmap_bold.{item:01d}')\n post_heat4_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat4_acq-epi_rec-fmap_bold.{item:01d}')\n post_neutral4_ap_fmap = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral4_acq-epi_rec-fmap_bold.{item:01d}')\n\n post_neutral3_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral3_acq-epi_rec-topup_bold.{item:01d}')\n post_heat3_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat3_acq-epi_rec-topup_bold.{item:01d}')\n post_heat4_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postHeat4_acq-epi_rec-topup_bold.{item:01d}')\n post_neutral4_ap_topup = create_key(bids_prefix + 'func/sub-{subject}_{session}_task-postNeutral4_acq-epi_rec-topup_bold.{item:01d}')\n\n post_topup_ap = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-postEpi_dir-ap_epi.{item:01d}')\n post_topup_pa = create_key(bids_prefix + 'fmap/sub-{subject}_{session}_acq-postEpi_dir-pa_epi.{item:01d}')\n\n # Create an empty dictionary called info for each key\n\n info = {pre_neutral1_ap_fmap: [],\n pre_heat1_ap_fmap: [],\n pre_heat2_ap_fmap: [],\n pre_neutral2_ap_fmap: [],\n\n pre_neutral1_ap_topup: [],\n pre_heat1_ap_topup: [],\n pre_heat2_ap_topup: [],\n pre_neutral2_ap_topup: [],\n\n pre_topup_ap: [],\n pre_topup_pa: [],\n\n pre_fmap_magnitude1: [],\n pre_fmap_phasediff: [],\n\n t1w: [],\n\n post_neutral3_ap_fmap: [],\n post_heat3_ap_fmap: [],\n post_heat4_ap_fmap: [],\n post_neutral4_ap_fmap: [],\n\n post_neutral3_ap_topup: [],\n post_heat3_ap_topup: [],\n post_heat4_ap_topup: [],\n post_neutral4_ap_topup: [],\n\n post_topup_ap: [],\n post_topup_pa: [],\n\n }\n\n # Loop over each sequence. Use if statements to determine which sequences should be linked to which key\n\n for idx, s in enumerate(seqinfo):\n\n if 'pre_Neutral1' in s.series_id:\n info[pre_neutral1_ap_fmap].append([s.series_id])\n info[pre_neutral1_ap_topup].append([s.series_id])\n\n if 'pre+heat1' in s.series_id:\n info[pre_heat1_ap_fmap].append([s.series_id])\n info[pre_heat1_ap_topup].append([s.series_id])\n\n if 'pre+heat2' in s.series_id:\n info[pre_heat2_ap_fmap].append([s.series_id])\n info[pre_heat2_ap_topup].append([s.series_id])\n\n if 'pre_Neutral2' in s.series_id:\n info[pre_neutral2_ap_fmap].append([s.series_id])\n info[pre_neutral2_ap_topup].append([s.series_id])\n\n if 'pre_topup_A>>P' in s.series_id:\n info[pre_topup_ap].append([s.series_id])\n\n if 'pre_topup_P>>A' in s.series_id:\n info[pre_topup_pa].append([s.series_id])\n\n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n (s.TE == 4.92)):\n info[pre_fmap_magnitude1].append([s.series_id])\n \n if (('Field_mapping 4X4X4 A>>P' in s.series_id) and\n (s.TE == 7.38)):\n info[pre_fmap_phasediff].append([s.series_id])\n\n if 'MPRAGE_GRAPPA2' in s.series_id:\n info[t1w].append([s.series_id])\n\n if 'post_Neutral3' in s.series_id:\n info[post_neutral3_ap_fmap].append([s.series_id])\n info[post_neutral3_ap_topup].append([s.series_id])\n\n if 'post+heat3' in s.series_id:\n info[post_heat3_ap_fmap].append([s.series_id])\n info[post_heat3_ap_topup].append([s.series_id])\n\n if 'post+heat4' in s.series_id:\n info[post_heat4_ap_fmap].append([s.series_id])\n info[post_heat4_ap_topup].append([s.series_id])\n\n if 'post_Neutral4' in s.series_id:\n info[post_neutral4_ap_fmap].append([s.series_id])\n info[post_neutral4_ap_topup].append([s.series_id])\n\n if 'post_topup_A>>P' in s.series_id:\n info[post_topup_ap].append([s.series_id])\n\n if 'post_topup_P>>A' in s.series_id:\n info[post_topup_pa].append([s.series_id])\n\n return info", "def infotodict(seqinfo):\n\n # data = create_key('run{item:03d}')\n # info = {data: []}\n # last_run = len(seqinfo)\n\n \"\"\"\n The namedtuple `s` contains the following fields:\n\n * total_files_till_now\n * example_dcm_file\n * series_id\n * dcm_dir_name\n * unspecified2\n * unspecified3\n * dim1\n * dim2\n * dim3\n * dim4\n * TR\n * TE\n * protocol_name\n * is_motion_corrected\n * is_derived\n * patient_id\n * study_description\n * referring_physician_name\n * series_description\n * image_type\n \"\"\"\n\n t1w = create_key('sub-{subject}/{session}/anat/sub-{subject}_{session}_T1w')\n t2w = create_key('sub-{subject}/{session}/anat/sub-{subject}_{session}_T2w')\n func_rest = create_key('sub-{subject}/{session}/func/sub-{subject}_{session}_task-rest_bold')\n dwi_ap = create_key('sub-{subject}/{session}/dwi/sub-{subject}_{session}_acq-AP_dwi')\n dwi_pa = create_key('sub-{subject}/{session}/dwi/sub-{subject}_{session}_acq-PA_dwi')\n t2star = create_key('sub-{subject}/{session}/dwi/sub-{subject}_{session}_T2star')\n t2w_fatsat = create_key('sub-{subject}/{session}/anat/sub-{subject}_{session}_acq-fatsat_T2w')\n \n info = {t1w: [], t2w: [], func_rest: [], dwi_ap: [], dwi_pa: [], t2star: [], t2w_fatsat: []}\n\n for idx, s in enumerate(seqinfo):\n if (s.example_dcm_file == 'mp_rage_1_mm-00001.dcm'):\n info[t1w].append(s.series_id)\n if ('edti_2mm_cdif45_AP' in s.series_description):\n info[dwi_ap].append(s.series_id)\n if ('edti_2mm_cdif45_PA' in s.series_description):\n info[dwi_pa].append(s.series_id)\n if (s.series_description == 'Sag CUBE T2'):\n info[t2w].append(s.series_id)\n if (s.series_description == 'ORIG Sag CUBE T2'):\n info[t2w_orig].append(s.series_id)\n if ('T2_1.7mm_fat_sat' in s.series_description): \n info[t2w_fatsat].append(s.series_id)\n if (s.series_description == 'Reverse blip EPI 3mm iso'):\n info[t2star].append(s.series_id) \n if (s.series_description == 'Resting EPI 3mm iso RS') and (s.dim3 == 12300):\n info[func_rest].append(s.series_id)\n return info", "def pro_code_dict(code=False, inverse=False, return_all=False):\n\n pro_code_dict = {\"0500\": \"Date\",\n \"0501\": \"height [> 0: top, < 0: bottom of elem.] (cm)\",\n \"0502\": \"element density (kg m-3)\",\n \"0503\": \"element temperature (degC)\",\n \"0504\": \"element ID (1)\",\n \"0506\": \"liquid water content by volume (%)\",\n \"0508\": \"dendricity (1)\",\n \"0509\": \"sphericity (1)\",\n \"0510\": \"coordination number (1)\",\n \"0511\": \"bond size (mm)\",\n \"0512\": \"grain size (mm)\",\n \"0513\": \"grain type (Swiss Code F1F2F3)\",\n \"0514\": \"grain type, grain size (mm), and density (kg m-3) of SH at surface\",\n \"0515\": \"ice volume fraction (%)\",\n \"0516\": \"air volume fraction (%)\",\n \"0517\": \"stress in (kPa)\",\n \"0518\": \"viscosity (GPa s)\",\n \"0519\": \"soil volume fraction (%)\",\n \"0520\": \"temperature gradient (K m-1)\",\n \"0521\": \"thermal conductivity (W K-1 m-1)\",\n \"0522\": \"absorbed shortwave radiation (W m-2)\",\n \"0523\": \"viscous deformation rate (1.e-6 s-1)\",\n \"0531\": \"deformation rate stability index Sdef\",\n \"0532\": \"natural stability index Sn38\",\n \"0533\": \"stability index Sk38\",\n \"0534\": \"hand hardness either (N) or index steps (1)\",\n \"0535\": \"optical equivalent grain size (mm)\",\n \"0540\": \"bulk salinity (g/kg)\",\n \"0541\": \"brine salinity (g/kg)\",\n \"0601\": \"snow shear strength (kPa)\",\n \"0602\": \"grain size difference (mm)\",\n \"0603\": \"hardness difference (1)\",\n \"0604\": \"ssi\",\n \"0605\": \"inverse texture index ITI (Mg m-4)\",\n \"0606\": \"critical cut length (m)\", }\n\n if inverse:\n inverse = {value: key for key, value in pro_code_dict.items()}\n return(inverse[code])\n if code:\n return (pro_code_dict[code])\n if return_all:\n return (pro_code_dict)", "def main():\n\n # Define the names of required input files, and other main configuration variables\n protein_w_underscores = os.getcwd().split('/')[-1]\n protein = protein_w_underscores.replace('_', ' ')\n pdbfile = 'pdb_structure.pdb' # the name of the PDB file\n pdbchain = None # chain in pdbfile -- there is only one chain, so not relevant here\n seqfile = 'protseq.txt' # file containing the protein sequence\n ddgdatafile = 'ddG_data.txt' # file containing the literature-culled ddG values\n ddgdatafile_warning = False # warn if ddgdatafile has conflicting ddG values for a mutation\n alignment_file = \"uniref_alignment-gaps_lt_0.1-identities_gt_0.5.fasta\" # file with aligned sequences\n phylip_path = '/Users/bloom/phylip-3.67/exe/' # path to phylip phylogeny program\n\n # Define the names of files that will be created by the script if they do not already exist\n cupsatfile = 'CUPSAT_ddGs.txt' # contains the ddG values from CUPSAT\n treefile = \"tree.newick\" # phylogenetic tree created by phylip\n phylipsequencefile = \"phylip_sequence_file\" # phylip input sequence file\n phylipdistancefile = \"phylip_distance_file\" # phylip distance matrix\n pipsddgsfile = \"pips_ddgs.txt\" # pips ddgs file\n regularizingpriorpipsddgsfile = 'pips_ddgs_with_regularizing_priors.txt' # pips ddgs file calculated with regularizing priors\n hydrophobicitypriorpipsddgsfile = 'pips_ddgs_with_hydrophobicity_priors.txt' # pips ddgs file calculated with hydrophobicity priors\n\n # Begin execution of the program\n seq = open(seqfile).read().strip() # read in protein sequence\n\n # Get the ddG values from CUPSAT and store in the dictionary cupsat_ddgs. Note that\n # in this and all subsequent ddG dictionaries, the first residue is numbered as 0.\n print \"\\nObtaining CUPSAT ddG values...\"\n sys.stdout.flush()\n if os.path.isfile(cupsatfile): # ddG values already obtained, just read from file\n (datetime, cupsat_ddgs) = pips.ddg_inference.ReadDDGs(cupsatfile)\n print \"Read the stored CUPSAT values from %s from the file %s.\" % (datetime, cupsatfile)\n else: # we need to obtain the ddG values from the CUPSAT webserver\n datetime = time.asctime()\n print \"Beginning to calculate and download CUPSAT ddGs at %s...\" % datetime\n sys.stdout.flush()\n cupsat_ddgs = pips.cupsat.RunCUPSAT(pdbfile, seq, pdbchain)\n pips.ddg_inference.WriteDDGs(cupsat_ddgs, cupsatfile, datetime)\n print \"Completed download of CUPSAT ddG values, stored in the file %s.\" % cupsatfile\n rescaled_cupsat_ddgs = pips.ddg_inference.RescaleDDGs(cupsat_ddgs, 10.0, '10TH_TO_90TH', recenter=5.0, min_max=(-3.0, 13.0)) \n\n # Read the literature-culled ddG data from ddgdatafile and store in the dictionary ddg_data\n print \"\\nReading the literature-culled ddG data from %s...\" % ddgdatafile\n sys.stdout.flush()\n ddgmatch = re.compile(\"^(?P<wt>[A-Y])(?P<r>\\d+)(?P<mut>[A-Y])\\s+(?P<ddg>\\-{0,1}\\d+\\.\\d+)$\")\n ddg_data = {}\n for r in range(len(seq)):\n rdict = {}\n wt = seq[r]\n for aa in pips.ddg_inference.AminoAcids():\n if aa != wt:\n rdict[aa] = []\n ddg_data[r] = (wt, rdict)\n for line in open(ddgdatafile).readlines(): # loop over all lines in ddgdatafile\n if line[0] == '#':\n continue # line is a comment\n m = ddgmatch.search(line.strip()) # match the ddG value\n if not m:\n raise ValueError, \"Cannot read ddG value of %s\" % line\n (wt, r, mut, ddg) = (m.group('wt'), int(m.group('r')), m.group('mut'), float(m.group('ddg')))\n r -= 1 # we decrement r because we are calling the first residue 0\n if seq[r] != wt:\n raise ValueError, \"Wildtype residue does not match protein sequence in %s\" % line\n ddg_data[r][1][mut].append(ddg) \n nddgs = 0\n ddgslist = []\n for (r, (wt, rddgs)) in ddg_data.iteritems():\n for mut in rddgs.iterkeys():\n if not rddgs[mut]:\n rddgs[mut] = None # no ddG value\n else:\n nddgs += 1\n ddg0 = rddgs[mut][0]\n allthesame = True\n for ddgi in rddgs[mut][1 : ]: # see if all ddG values are the same for mutation\n if ddgi != ddg0:\n allthesame = False\n if allthesame: # all of the ddG values are the same, take this value\n rddgs[mut] = ddg0\n ddgslist.append(ddg0)\n else: # ddG values differ, print warning and take the average value\n ddg = pips.stats.Mean(rddgs[mut])\n if ddgdatafile_warning:\n print \"WARNING: Mutation %s%d%s has multiple ddG values of\" % (wt, r + 1, mut),\n for ddgi in rddgs[mut]:\n print \"%.2f\" % ddgi,\n print \"--- taking the average value of %.2f.\" % ddg\n sys.stdout.flush()\n rddgs[mut] = ddg\n ddgslist.append(ddg)\n print \"Read a total of %d different ddG values from %s. The mean value is %.2f, the maximum value is %.2f, and the minimum value is %.2f.\" % (nddgs, ddgdatafile, pips.stats.Mean(ddgslist), max(ddgslist), min(ddgslist))\n\n # Read the aligned sequences (into sequences), give short names for phylip\n sequences = pips.fasta.Read(alignment_file)\n nsequences = len(sequences)\n sequences = [(\"SEQ%d\" % (i + 1), sequences[i][1]) for i in range(nsequences)] # rename \n pips.fasta.Write(sequences, 'renamed_alignment.fasta')\n sequences = pips.align.StripGapsToFirstSequence(sequences) \n print \"\\nThere are %d sequences in the alignment.\" % nsequences\n\n # Construct the phylogenetic tree\n if os.path.isfile(treefile):\n print \"A phylogenetic tree has already been constructed for these sequences, and is being read from %s.\" % treefile\n newick_tree = open(treefile).read()\n else:\n print \"Constructing a phylogenetic tree for these sequences...\"\n sys.stdout.flush()\n pips.phylip.WritePhylipSequenceFile(sequences, phylipsequencefile)\n open(phylipdistancefile, 'w').write(pips.phylip.Protdist(phylipsequencefile, phylip_path))\n newick_tree = pips.phylip.DistanceTree(phylipdistancefile, phylip_path, molecular_clock=True, neighbor_joining=True)\n print \"Finished constructing the phylogenetic tree, writing it to %s.\" % treefile\n sys.stdout.flush()\n open(treefile, 'w').write(newick_tree)\n\n # Perform the pips analysis\n sequences = pips.fasta.UnknownsToGaps(sequences) # replace unknown amino acids with gaps\n random.seed(1) # seed the random number generator to make output predictable\n (datetime, pips_ddgs) = pips.ddg_inference.ReadDDGs(pipsddgsfile)\n\n # Read things in with the new pips\n tree = pips.tree.Tree(newick_tree, tipnames_sequences=sequences) # phylogenetic tree data\n ddgset = pips.ddg_inference.DDGSet(seq, tree, ('TRANSITION_TRANSVERSION_RATIO', 0.5), ('SPECIFIED', pips_ddgs, 0, 0), ('BETA', 3, ('KYTE_DOOLITTLE_HYDROPHOBICITY', 1, 0)), 5.0, underflow=5, runtestcode=False)\n ddgset.MaximizePosterior(nrandomstarts=1, printprogress=True)\n new_pips_ddgs = ddgset.DDGDict()\n pips.ddg_inference.WriteDDGs(new_pips_ddgs, 'new_pips_ddgs.txt', time.asctime())\n\n # Get the consensus ddG\n consensus_ddgs = pips.ddg_inference.ConsensusDDGs(seq, sequences, pseudocounts=1)\n\n sys.exit()\n\n # Perform analysis of correlations, and make pylab plots\n print \"\\nAnalysis of correlations to experimental ddG values...\"\n ddgtypes = ['actual', 'CUPSAT', 'consensus', '\\\\begin{tabular}{c} PIPS with \\\\\\\\ informative prior \\end{tabular}', '\\\\begin{tabular}{c} PIPS with \\\\\\\\ regularizing prior \\end{tabular}', '\\\\begin{tabular}{c} PIPS with \\\\\\\\ hydrophobicity prior \\end{tabular}']\n zippedlists = pips.ddg_inference.ZippedDDGLists(ddg_data, cupsat_ddgs, consensus_ddgs, pips_ddgs, pips_ddgs_regularizing, pips_ddgs_hydrophobicity)\n mutations = zippedlists[0]\n nmutations = len(mutations)\n ddgs = dict([(ddgtypes[i], zippedlists[i + 1]) for i in range(len(ddgtypes))])\n pylab.rc('text', usetex=True)\n nplots = len(ddgtypes) - 1 # number of different plots\n invnplots = 1.0 / nplots\n (xscale, yscale) = (2.8, 2.5) # each plot covers a rectangle of this size, in inches\n bottom = 1.06\n (tmargin, bmargin, lmargin, rmargin) = (0.03, 0, 0.22, 0.03)\n fig = pylab.figure(figsize=(xscale * (1 + lmargin + rmargin), 3 * yscale * (1 + tmargin + bmargin) * bottom))\n figaxes = pylab.axes([0, 0, 1, 1])\n figaxes.axison = False\n iplot = 0\n maxticks = 5\n (xmin, xmax) = (int(round(min(ddgs['actual'])) - 1), int(round(max(ddgs['actual'])) + 1))\n xtick = 1\n while (xmax - xmin) / float(xtick) > maxticks:\n xtick += 1\n nxticks = int(math.ceil((xmax - xmin) / float(xtick)))\n xticks = [x for x in range(xmin, xmin + nxticks * xtick + 1, xtick)]\n xticklocator = matplotlib.ticker.FixedLocator(xticks)\n xtickformatter = matplotlib.ticker.FixedFormatter([\"%d\" % x for x in xticks])\n for ddgtype in ddgtypes[1 : ]:\n if ddgtype == ddgtypes[-1]:\n xlabel = 'experimental $\\Delta\\Delta G$ values'\n else:\n xlabel = ''\n (r, p, npoints) = pips.stats.PearsonCorrelation(ddgs['actual'], ddgs[ddgtype])\n axes = pylab.axes([lmargin, 1.0 - invnplots * (1 + iplot + bmargin) / bottom, 1.0 - rmargin - lmargin, invnplots * (1.0 - tmargin - bmargin) / bottom], xlabel=xlabel, ylabel=ddgtype)\n nolabels = matplotlib.ticker.NullFormatter()\n (ymin, ymax) = (int(round(min(ddgs[ddgtype])) - 1), int(round(max(ddgs[ddgtype])) + 1))\n ytick = 1\n while (ymax - ymin) / float(ytick) > maxticks:\n ytick += 1\n nyticks = int(math.ceil((ymax - ymin) / float(ytick)))\n yticks = [y for y in range(ymin, ymin + nyticks * ytick + 1, ytick)]\n yticklocator = matplotlib.ticker.FixedLocator(yticks)\n ytickformatter = matplotlib.ticker.FixedFormatter([\"%d\" % y for y in yticks])\n axes.xaxis.set_major_locator(xticklocator)\n axes.yaxis.set_major_locator(yticklocator)\n axes.yaxis.set_major_formatter(ytickformatter)\n if ddgtype != ddgtypes[-1]:\n axes.xaxis.set_major_formatter(nolabels)\n else:\n axes.xaxis.set_major_formatter(xtickformatter)\n iplot += 1\n pylab.text(0.64, 0.14, '$R^2 = %.2f$' % r**2, transform=axes.transAxes, ha='left', va='top', size=14)\n pylab.scatter(ddgs['actual'], ddgs[ddgtype], figure=fig, axes=axes)\n pylab.savefig(\"%s_vertical_plot.eps\" % protein_w_underscores)\n\n pylab.show()", "def bioinfo():\n\n pass", "def info() -> Dict[str, Any]:", "def _generate_common_proposal_info_in_dict(proposal_info: 'ProposalInfo') -> dict:\n proposal_info_in_dict = {\n \"id\": '0x' + bytes.hex(proposal_info.id),\n \"proposer\": str(proposal_info.proposer),\n \"proposerName\": proposal_info.proposer_name,\n \"status\": hex(proposal_info.status),\n \"startBlockHeight\": hex(proposal_info.start_block_height),\n \"endBlockHeight\": hex(proposal_info.end_block_height),\n \"contents\": {\n \"title\": proposal_info.title,\n \"description\": proposal_info.description,\n \"type\": hex(proposal_info.type),\n \"value\": proposal_info.value\n }\n }\n return proposal_info_in_dict", "def convertXmlToProtein(self, xml):\n\t\t# XML to dictionary\n\t\tproteinObject = Protein()\n\t\t\n\t\tdictionary = xmltodict.parse(xml)\n\t\troot = dictionary[\"uniprot\"]\n\t\tentry = root[\"entry\"]\n\t\t\n\t\tfor element, value in entry.items():\n\t\t\tif element == \"@accession\":\n\t\t\t\tproteinObject.addAttribute(\"id\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"name\":\n\t\t\t\tproteinObject.addAttribute(\"proteinShortName\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"protein\":\n\t\t\t\tfullname = value[\"recommendedName\"][\"fullName\"]\n\t\t\t\tproteinObject.addAttribute(\"proteinFullName\", \"uniprot\", fullname)\n\t\t\t\t\n\t\t\tif element == \"@created\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"creationDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\t\n\t\t\tif element == \"@modified\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"modifiedDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\n\t\t\tif element == \"comment\":\n\t\t\t\tfor comment in entry[\"comment\"]:\n\t\t\t\t\tif \"text\" in comment:\n\t\t\t\t\t\ttext = comment[\"text\"][\"#text\"] if isinstance(comment[\"text\"], OrderedDict) else comment[\"text\"]\n\t\t\t\t\t\tproteinObject.addAttribute(comment[\"@type\"], \"uniprot\",text)\n\t\t\t\t\t\n\t\t\tif element == \"gene\":\n\t\t\t\tgenes = []\n\t\t\t\tfor gene in value[\"name\"]:\n\t\t\t\t\tif \"#text\" in gene and isinstance(gene, OrderedDict):\n\t\t\t\t\t\tgenes.append(gene[\"#text\"])\n\t\t\t\t\t\n\t\t\t\tproteinObject.addAttribute(\"geneName\", \"uniprot\", genes)\n\t\t\t\t\t\n\t\t\tif element == \"organism\":\n\t\t\t\tif isinstance(value[\"name\"], list):\n\t\t\t\t\torganisms = []\n\t\t\t\t\tfor organism in value[\"name\"]:\n\t\t\t\t\t\torganisms.append(organism[\"#text\"])\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tproteinObject.addAttribute(\"organism\", \"uniprot\", value[\"name\"][\"#text\"])\n\t\t\t\t\n\t\t\t\n\t\t\tif element == \"sequence\":\n\t\t\t\tproteinObject.addAttribute(\"sequence\", \"uniprot\",value[\"#text\"].replace(\"\\n\",\"\"))\n\t\t\t\tproteinObject.addAttribute(\"sequencelength\", \"uniprot\",value[\"@length\"].replace(\"\\n\",\"\"))\n\n\n\t\treturn proteinObject", "def build(ctx, inputs, output, cs):\n click.echo('chemdataextractor.dict.build')\n dt = DictionaryTagger(lexicon=ChemLexicon(), case_sensitive=cs)\n names = []\n for input in inputs:\n for line in input:\n tokens = line.split()\n names.append(tokens)\n dt.build(words=names)\n dt.save(output)", "def newPhraseInfo(phrase):\n return {\"count\":0,\n \"ids\":set(),\n \"phrase\":phrase\n }", "def get_meta_information() -> Dict:\n return {'name': 'NAS-Bench-201',\n 'references': ['Xuanyi Dong, Yi Yang',\n 'NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search',\n 'https://openreview.net/forum?id=HJxyZkBKDr',\n 'https://github.com/D-X-Y/AutoDL-Projects'],\n }", "def compile_metadata(inventory_dict):\n inventory_meta = {}\n #inventory_meta['InventoryDictionary'] = inventory_dict\n for source, year in inventory_dict.items():\n inventory_meta[source] = stewi.getMetadata(source, year)\n return inventory_meta", "def clean_form_dict(self, dict_):\n clean_dict = {}\n first_pdb_type, first_pdb_id, first_pdb_file = '', '', ''\n second_pdb_type, second_pdb_id, second_pdb_file = '', '', ''\n x1, y1, z1, x2, y2, z2 = '0', '0', '0', '0', '0', '0'\n degXY_1, degYZ_1, degXY_2, degYZ_2 = '0', '0', '0', '0'\n\n num_of_proteins = dict_.get('num_of_proteins')\n user_rand = dict_.get('user_rand')\n first_pdb_type = dict_.get('first_pdb_type')\n if first_pdb_type == 'by_id':\n first_pdb_id = dict_.get('first_pdb_id')\n first_pdb_file = ''\n elif first_pdb_type == 'by_file':\n first_pdb_id = ''\n first_pdb_file = dict_.get('first_pdb_file')\n\n if num_of_proteins == '2':\n second_pdb_type = dict_.get('second_pdb_type')\n if second_pdb_type == 'by_id':\n second_pdb_id = dict_.get('second_pdb_id')\n second_pdb_file = ''\n elif first_pdb_type == 'by_file':\n second_pdb_id = ''\n second_pdb_file = dict_.get('second_pdb_file')\n x2, y2, z2 = dict_.get('x2', 0), dict_.get('y2', 0), dict_.get('z2', 0)\n degXY_2, degYZ_2 = dict_.get('degXY_2', 0), dict_.get('degYZ_2', 0)\n\n x1, y1, z1 = dict_.get('x1', 0), dict_.get('y1', 0), dict_.get('z1', 0)\n degXY_1, degYZ_1 = dict_.get('degXY_1', 0), dict_.get('degYZ_1', 0)\n\n temperature_scale = dict_.get('temperature_scale', '')\n temperature = dict_.get('temperature', '')\n time_step_number = dict_.get('time_step_number', '')\n\n clean_dict['user_rand'] = user_rand\n clean_dict['num_of_proteins'] = num_of_proteins\n clean_dict['first_pdb_type'] = first_pdb_type\n clean_dict['first_pdb_id'] = first_pdb_id\n clean_dict['first_pdb_file'] = first_pdb_file\n clean_dict['second_pdb_type'] = second_pdb_type\n clean_dict['second_pdb_id'] = second_pdb_id\n clean_dict['second_pdb_file'] = second_pdb_file\n clean_dict['x1'] = x1\n clean_dict['y1'] = y1\n clean_dict['z1'] = z1\n clean_dict['x2'] = x2\n clean_dict['y2'] = y2\n clean_dict['z2'] = z2\n clean_dict['degXY_1'] = degXY_1\n clean_dict['degYZ_1'] = degYZ_1\n clean_dict['degXY_2'] = degXY_2\n clean_dict['degYZ_2'] = degYZ_2\n clean_dict['temperature_scale'] = temperature_scale\n clean_dict['temperature'] = temperature\n clean_dict['time_step_number'] = time_step_number\n\n return clean_dict", "def info(self) -> dict:", "def Translate(self):\n dna_to_protein = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',\n }\n \n length = self.length\n reading = {}\n for i in range(3):\n reading['frame_'+str(i+1)] = tuple([dna_to_protein[self.sequence[index:index+3]] for index in range(i,length-2,3)])\n reverse_strand = Analyze_DNA_Sequence.Complementary(self,'5-3')\n for i in range(3):\n reading['frame_'+str(i+4)] = tuple([dna_to_protein[reverse_strand[index:index+3]] for index in range(i,length-2,3)])\n\n return reading", "def get_info_dict(self):\n return {\n 'bidi': self.bidi,\n 'code': self.code,\n 'name': self.name,\n 'name_local': self.name_local\n }", "def _build_single_proc_data_info(proc_data_id, data_type, samples):\n proc_data = Artifact(proc_data_id)\n proc_info = {'processed_date': str(proc_data.timestamp)}\n proc_info['pid'] = proc_data_id\n proc_info['data_type'] = data_type\n proc_info['processed_date'] = str(proc_info['processed_date'])\n params = proc_data.processing_parameters.values\n del params['input_data']\n ref = Reference(params.pop('reference'))\n proc_info['reference_name'] = ref.name\n proc_info['taxonomy_filepath'] = basename(ref.taxonomy_fp)\n proc_info['sequence_filepath'] = basename(ref.sequence_fp)\n proc_info['tree_filepath'] = basename(ref.tree_fp)\n proc_info['reference_version'] = ref.version\n proc_info['algorithm'] = 'sortmerna'\n proc_info['samples'] = sorted(proc_data.prep_templates[0].keys())\n proc_info.update(params)\n\n return proc_info", "def add_prot(code, target, xtal_path, xtal, input_dict):\n\n # Split code by : before the get or create operation and use the first part of the name (split[0])\n # code is normally the xtal directory in the aligned folder, but this may have been modified to have\n # an alternate name added to it - in the form 'directory:alternate_name'.\n code_first_part = code.split(\":\")[0]\n proteins = Protein.objects.filter(code__contains=code_first_part, target_id=target)\n if proteins.exists():\n new_prot = proteins.first()\n logger.debug(\"Pre-existing Protein (%s)\", new_prot)\n else:\n new_prot = Protein.objects.get_or_create(code=code, target_id=target)\n logger.debug(\"New Protein (code='%s' target_id='%s')\", code, target)\n new_prot = new_prot[0]\n\n new_prot.apo_holo = True\n\n # Check filepaths of all associated files.\n filepaths = {\n 'pdb_info': ('pdbs', get_path_or_none(xtal_path, xtal, input_dict, \"APO\")),\n 'bound_info': ('bound', get_path_or_none(xtal_path, xtal, input_dict, \"BOUND\")),\n 'cif_info': ('cifs', get_path_or_none(xtal_path, xtal, input_dict, \"CIF\")),\n 'mtz_info': ('mtzs', get_path_or_none(xtal_path, xtal, input_dict, \"MTZ\")),\n 'map_info': ('maps', get_path_or_none(xtal_path, xtal, input_dict, \"PMAP\")),\n 'sigmaa_info': ('maps', get_path_or_none(xtal_path, xtal, input_dict, \"SIGMAA\")),\n 'diff_info': ('maps', get_path_or_none(xtal_path, xtal, input_dict, \"DIFF\")),\n 'event_info': ('maps', get_path_or_none(xtal_path, xtal, input_dict, \"EVENT\")),\n 'trans_matrix_info': ('trans', get_path_or_none(xtal_path, xtal, input_dict, \"TRANS\")),\n 'pdb_header_info': ('pdbs', get_path_or_none(xtal_path, xtal, input_dict, \"HEADER\")),\n 'apo_desolve_info': ('pdbs', get_path_or_none(xtal_path, xtal, input_dict, \"DESOLV\")),\n }\n\n to_unpack = {k: v for k, v in filepaths.items() if v[1] is not None}\n\n for key in to_unpack.keys():\n save_path = os.path.join(to_unpack[key][0], to_unpack[key][1].split('/')[-1])\n path = default_storage.save(save_path, open(to_unpack[key][1], 'rb'))\n setattr(new_prot, key, path)\n\n new_prot.save()\n return new_prot", "def audit_process():\n st_types, pc_types = audit(OSMFILE)\n #pprint.pprint(dict(st_types))\n #pprint.pprint(dict(pc_types))\n\n correct_name = {}\n for st_type, ways in st_types.iteritems():\n for name in ways:\n better_name = update_name(name, mapping)\n correct_name[name] = better_name\n #print name, \"=>\", better_name\n \n correct_code = {}\n for _, pc_type in pc_types.iteritems():\n for code in pc_type:\n better_code = update_postalcode(code)\n correct_code[code] = better_code\n #print code, \"=>\", better_code\n \n return correct_name, correct_code", "def main():\n\n args = get_args()\n codons = {\n 'A': 4, 'C': 2, 'D': 2, 'E': 2, 'F': 2, 'G': 4, 'H': 2, 'I': 3,\n 'K': 2, 'L': 6, 'M': 1, 'N': 2, 'P': 4, 'Q': 2, 'R': 6, 'S': 6,\n 'T': 4, 'V': 4, 'W': 1, 'Y': 2, '*': 3,\n }\n print(product(map(codons.get, args.protein + '*')) % args.modulo)", "def setupdict(parfile):\n pardict = {}\n with open(parfile,'r+') as f:\n for line in f:\n flags = line[56:65].split(' ')\n try:\n flags = [int(f) for f in flags]\n except:\n continue\n # if we found res pars\n if( all(flags) <= 3 ):\n # if any varied pars\n if( any(flags) > 0 ):\n # energies are dict keys\n estring = endf_float_str(float(line[0:11]))\n pardict[estring] = []\n pars = [float(line[0+11*i:11+11*i]) for i in range(len(flags))]\n for i,flag in enumerate(flags):\n if( flag > 0 ):\n pardict[estring].append((i,pars[i]))\n return pardict", "def generate_cons_pos_all_info(cons_pos_all,all_gpcrs_info):\n for prot_info in all_gpcrs_info:\n cons_pos_prot = prot_info[4]\n for gpcr_class, cons_class_lists in cons_pos_prot.items():\n if cons_class_lists:\n list_num=0 # list 0 or 1\n while list_num < len(cons_class_lists):\n cons_pos_li=cons_class_lists[list_num]\n cons_pos_num = 0\n while cons_pos_num < len(cons_pos_li):\n cons_pos_info=cons_pos_li[cons_pos_num]\n if cons_pos_info[2] != \"None\":\n cons_pos_all[gpcr_class][list_num][cons_pos_num][2]+=(cons_pos_info[2]+\",\")\n cons_pos_num +=1\n list_num+=1\n show_class={}\n for gpcr_class, cons_pos_class in cons_pos_all.items():\n for cons_pos_li in cons_pos_class:\n for cons_pos in cons_pos_li:\n if cons_pos[2]:\n cons_pos[2]=cons_pos[2].rstrip(\",\")\n else:\n cons_pos[1]=\"Position not found.\"\n cons_pos[2]=\"None\"\n show_class[gpcr_class]=True\n active_class_all= {'A': ['', ''], 'C': ['', ''], 'F': ['', ''], 'B': ['', '']}\n classes=sorted(cons_pos_all)\n active_class_all[classes[0]]=['active', 'in active']\n return (cons_pos_all,show_class,active_class_all)", "def _makeimap(self):\n self.map_['source'] = 'NAOJ'\n self.map_['provider'] = 'NRO'\n self.map_['instrument'] = 'NORH'\n self.map_['phyobs'] = ''", "def extract_uniprot4protein_keys(self, proteins_dict):\n\t\treturn {key.split(\"|\")[1]: value for (key, value) in proteins_dict.items()}", "def infotodict(seqinfo):\n\n last_run = len(seqinfo)\n\n info = {\n t1w: [], t2w: [], epi_fmap_AP: [], epi_fmap_PA: [],\n\n rest_ap_run1: [], rest_pa_run2: [],\n rest_ap_run3: [], rest_pa_run4: [],\n rest_ap_run1_sbref: [], rest_pa_run2_sbref: [],\n rest_ap_run3_sbref: [], rest_pa_run4_sbref: [],\n\n dwi_ap_run1: [], dwi_pa_run2: [],\n dwi_ap_run3: [], dwi_pa_run4: [],\n dwi_ap_run1_sbref: [], dwi_pa_run2_sbref: [],\n dwi_ap_run3_sbref: [], dwi_pa_run4_sbref: []\n }\n\n def get_latest_series(key, s):\n # if len(info[key]) == 0:\n info[key].append(s.series_id)\n # else:\n # info[key] = [s.series_id]\n\n for s in seqinfo:\n if \"abort\" in s.protocol_name.lower():\n continue\n\n if s.protocol_name == 'SpinEchoFieldMap_AP':\n get_latest_series(epi_fmap_AP, s)\n\n elif s.protocol_name == 'SpinEchoFieldMap_PA':\n get_latest_series(epi_fmap_PA, s)\n\n elif s.protocol_name == 'rfMRI_REST_AP_Run1':\n if s.dim3 > 1:\n get_latest_series(rest_ap_run1, s)\n else:\n get_latest_series(rest_ap_run1_sbref, s)\n\n elif s.protocol_name == 'rfMRI_REST_PA_Run2':\n if s.dim3 > 1:\n get_latest_series(rest_pa_run2, s)\n else:\n get_latest_series(rest_pa_run2_sbref, s)\n\n elif s.protocol_name == 'rfMRI_REST_AP_Run3':\n if s.dim3 > 1:\n get_latest_series(rest_ap_run3, s)\n else:\n get_latest_series(rest_ap_run3_sbref, s)\n\n elif s.protocol_name == 'rfMRI_REST_PA_Run4':\n if s.dim3 > 1:\n get_latest_series(rest_pa_run4, s)\n else:\n get_latest_series(rest_pa_run4_sbref, s)\n\n # dMRI naming conventions switch half-way through. Some end with _RunX\n elif s.protocol_name.startswith('dMRI_dir98_AP'):\n if s.dim3 > 1:\n get_latest_series(dwi_ap_run1, s)\n else:\n get_latest_series(dwi_ap_run1_sbref, s)\n\n elif s.protocol_name.startswith('dMRI_dir98_PA'):\n if s.dim3 > 1:\n get_latest_series(dwi_pa_run2, s)\n else:\n get_latest_series(dwi_pa_run2_sbref, s)\n\n elif s.protocol_name.startswith('dMRI_dir99_AP'):\n if s.dim3 > 1:\n get_latest_series(dwi_ap_run3, s)\n else:\n get_latest_series(dwi_ap_run3_sbref, s)\n\n elif s.protocol_name.startswith('dMRI_dir99_PA'):\n if s.dim3 > 1:\n get_latest_series(dwi_pa_run4, s)\n else:\n get_latest_series(dwi_pa_run4_sbref, s)\n\n elif s.protocol_name == 'T1w_MPR':\n get_latest_series(t1w, s)\n\n elif s.protocol_name == 'T2w_SPC':\n get_latest_series(t2w, s)\n\n else:\n print(\"Series not recognized!: \", s.protocol_name, s.dcm_dir_name)\n return info", "def present_map(cmdb_ci_types, db_ci_types, cmdb_rel_types, db_rel_types, cmdb_ci_attributes, db_ci_attributes, cmdb_rel_attributes, db_rel_attributes, similar_ci, similar_rel, similar_attr_ci, similar_attr_rel):\n print(\"\\n===============================================================================================================================================================================\")\n print(blue + \"CONFIGURATION ITEMS MAPPING\" + reset)\n print(\"===============================================================================================================================================================================\")\n print()\n data = []\n for db_ci in similar_ci:\n cmdb_ci = list(similar_ci[db_ci].keys())[0]\n sim = similar_ci.get(db_ci).get(cmdb_ci)\n row = [cmdb_ci, cmdb_ci_types.get(\n cmdb_ci), db_ci, db_ci_types.get(db_ci), sim]\n data.append(row)\n print(tabulate(data, headers=[\n \"CI in CMDB\", \"Description\", \"CI in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()\n\n for db_ci in similar_ci:\n data = []\n cmdb_ci = list(similar_ci[db_ci].keys())[0]\n print(\"**************************************************************************************************\")\n print(\n green + str(cmdb_ci) + \" Attributes Mapping\" + reset)\n print(\"**************************************************************************************************\")\n print()\n atrs = similar_attr_ci.get(cmdb_ci)\n if atrs != None:\n for cmdb_at in atrs:\n db_at = list(atrs.get(cmdb_at).keys())[0]\n sim = atrs.get(cmdb_at).get(db_at)\n row = [cmdb_at, cmdb_ci_attributes.get(\n cmdb_ci).get(cmdb_at), db_at, db_ci_attributes.get(db_ci).get(db_at), sim]\n data.append(row)\n print(tabulate(data, headers=[\"Attribute in CMDB\", \"Description\",\n \"Attribute in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()\n print()\n\n print(\"===============================================================================================================================================================================\")\n print(blue + \"RELATIONSHIPS MAPPING\" + reset)\n print(\"===============================================================================================================================================================================\")\n print()\n\n data = []\n for db_rel in similar_rel:\n cmdb_rel = list(similar_rel[db_rel].keys())[0]\n sim = similar_rel.get(db_rel).get(cmdb_rel)\n row = [cmdb_rel, cmdb_rel_types.get(\n cmdb_rel), db_rel, db_rel_types.get(db_rel), sim]\n data.append(row)\n atrs = similar_attr_rel.get(cmdb_rel)\n print(tabulate(data, headers=[\n \"Relationship in CMDB\", \"Description\", \"Relationship in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()\n\n for db_rel in similar_rel:\n data = []\n cmdb_rel = list(similar_rel[db_rel].keys())[0]\n print(\"**************************************************************************************************\")\n print(green + str(cmdb_rel) + \" Attributes Mapping\" + reset)\n print(\"**************************************************************************************************\")\n print()\n for cmdb_at in atrs:\n db_at = list(atrs.get(cmdb_at).keys())[0]\n sim = atrs.get(cmdb_at).get(db_at)\n cmdb_at_desc = cmdb_rel_attributes.get(cmdb_rel)\n if cmdb_at_desc != None:\n cmdb_at_desc = cmdb_at_desc.get(cmdb_at)\n db_at_desc = db_rel_attributes.get(db_rel)\n if db_at_desc != None:\n db_at_desc = db_at_desc.get(db_at)\n row = [cmdb_at, cmdb_at_desc, db_at,\n db_at_desc, sim]\n data.append(row)\n print(tabulate(data, headers=[\"Attribute in CMDB\", \"Description\",\n \"Attribute in DB\", \"Description\", \"Similarity Coeficient\"]))\n print()", "def generate(self) -> Dict[str, Any]:\n raise NotImplementedError", "def ParserPDB(a):\n\tcontenu=list()\n\tmon_fichier=open(a,\"r\")\n\tfor line in mon_fichier.readlines():\n\t\tcontenu.append(line.strip()) #met le contenu du fichier pdb dans la liste \"contenu\"\n\n\tacidea=dict()\n\t\n\n\n\tfor chain in range(len(contenu)): #On parcourt cette liste contenant tout le fichier pdb\n\t\tif contenu[chain][0:5]==\"MODEL\":\n\t\t\tnewProt = contenu[chain][7:14]\n\t\t\t\n\t\t\tif newProt not in acidea.keys():\n\t\t\t\tacidea[newProt]={}\n\t\t\t\t\n\t\tif contenu[chain][0:4]==\"ATOM\": #Si la ligne commence par \"ATOM\" \n\t\t\tChaine = contenu[chain][21]\n\t\t\t\n\t\t\tif Chaine not in acidea[newProt].keys(): #Si la chaine ( A, B ... ) existe pas deja \n\t\t\t\tacidea[newProt][Chaine] = {} #creation du dictionnaire qui a pour nom les caractères a la ligne 21 ( Chaine)\n\t\t\t\n\t\t\tPosi = contenu[chain][24:26]\n\t\t\tif Posi not in acidea[newProt][Chaine].keys(): #Si la position pour une chaine n'existe pas deja (ex : -3 dans la chaine A)\n\t\t\t\tacidea[newProt][Chaine][Posi]={} # creation du dictionnaire poisition dans le dictionnaire chaine \n\t\t\t\n\t\t\tresidu = contenu[chain][12:16]\n\t\t\tif residu not in acidea[newProt][Chaine][Posi].keys(): #si le residu n'existe pas deja pour une chaine et une position donnée (ex : un CO de la chaine A a la position -3)\n\t\t\t\tacidea[newProt][Chaine][Posi][residu]= {} #Creation du dictionnaire nom de l'atome, contenu dans le dictionnaire position lui meme contenu dans le dictionnaire chaine\t\n\t\t\t\n\t\t\t#repartition de l'information dans le dictionnaire.\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"x\"] = float(contenu[chain][32:38]) #Mise des information de X dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"y\"] = float(contenu[chain][40:46]) #Mise des information de Y dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"z\"] = float(contenu[chain][48:54]) #Meme chose pour Z\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"Id\"] = contenu[chain][9:11] #Meme chose pour Identifiant\n\n\treturn( acidea)", "def build_pcfg(self):\n part = 0 # 0 for grammar, 1 for lexicon\n rule = r'(\\d*\\.\\d*)\\ (.*)->(.*)[\\n]*'\n\n with open(self.grammar_txt) as file:\n for line in file:\n if line == 'Grammar\\n':\n continue\n elif line == 'Lexicon\\n':\n part = 1\n else:\n line = [s for s in re.split(rule, line) if s]\n prob, parent, child = line[0], line[1], line[2]\n if part is 0: # Grammar part\n child = tuple(i for i in child.split())\n self.grammar[parent][child] = Decimal(prob)\n else: # Lexicon part\n self.lexicon[parent][child.lower()] = Decimal(prob)\n # print_nested_dict(self.grammar)\n # print_nested_dict(self.lexicon)\n file.close()", "def _CDSinfo(CDS, outfmt, fmtdictCDS = None,\n fmtdictRecord = None,\n parentRecord = None, hashConstructor = None) :\n if fmtdictCDS is None :\n fmtdictCDS = _GB_CDS_FMTDICT\n if fmtdictRecord is None :\n fmtdictRecord = _GB_RECORD_FMTDICT\n CDS.parentRecord = parentRecord\n info = dict()\n for k in [x for x in outfmt if x in fmtdictCDS.keys()] :\n info[k] = fmtdictCDS[k](CDS)\n for k in [x for x in outfmt if x in fmtdictRecord.keys()] :\n info[k] = fmtdictRecord[k](parentRecord)\n if hashConstructor is not None :\n protSeq = fmtdictCDS[\"prot\"](CDS)\n h = hashConstructor()\n h.update(protSeq)\n info[\"hash\"] = h.hexdigest()\n return info", "def _generate_info_dict(meta_path, bands='ugrizy'):\n return DC2DMTractCatalog._generate_info_dict(meta_path, bands)", "def create_model():\n # Get list of all syllables: [\"<s>\", \"AH\", \"</s>\", \"<s>\", \"T\", ...]\n syllabifier = Syllabifier()\n all_syllables = syllabifier.all_syllables()\n\n # Count conditional probabilties of phoneme tuples\n tcf = TrigramCollocationFinder.from_words(all_syllables)\n bcf = BigramCollocationFinder.from_words(all_syllables)\n tri_dict = dict(sorted(tcf.ngram_fd.items(), key=lambda t: (-t[1], t[0])))\n bi_dict = dict(sorted(bcf.ngram_fd.items(), key=lambda t: (-t[1], t[0])))\n\n # Create dictionary to count cond prob all phoneme tuples\n accepted_phonemes = [i[0] for i in cmudict.phones()]\n accepted_phonemes.append('<s>')\n accepted_phonemes.append('</s>')\n phoneme_tups = [p for p in itertools.product(accepted_phonemes, repeat=3)]\n cond_probs_dict = dict([(char, 0) for char in phoneme_tups])\n\n for t in tri_dict:\n p1, p2, p3 = t[0], t[1], t[2]\n tri_count = tri_dict[t]\n bi_count = bi_dict[(p1, p2)]\n if bi_count > 1:\n cond_prob = tri_count * 1.0 / bi_count\n else:\n cond_prob = 0.0\n cond_probs_dict[(p1, p2, p3)] = cond_prob\n\n pickle.dump(cond_probs_dict, open(COND_PROBS_PATH, \"wb\"))\n return", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def construct_dicts(self, path):\n module_dicts = self.read_dict(path, use_superpkg=True)\n\n id_dict = dict()\n name_dict = dict()\n\n for cmd_dict in module_dicts:\n # Create a cmd template object\n cmd_temp = cmd_template.CmdTemplate(\n cmd_dict[self.OP_CODE_FIELD],\n cmd_dict[self.MNEMONIC_FIELD],\n cmd_dict[self.COMPONENT_FIELD],\n cmd_dict[self.ARGS_FIELD],\n cmd_dict[self.DESC_FIELD],\n )\n\n id_dict[cmd_dict[self.OP_CODE_FIELD]] = cmd_temp\n name_dict[cmd_dict[self.MNEMONIC_FIELD]] = cmd_temp\n\n return (id_dict, name_dict)", "def _get_information(self):\n pros_cons = []\n pros_cons_dict = {}\n\n for i, div in enumerate(self._tab.find_all(\"div\")):\n for p in div.find_all(\"p\"):\n pro_con = p.get_text(strip=True)\n pros_cons.append(pro_con)\n pros_cons_dict.update({self._keys_dict[i]: pros_cons})\n pros_cons = []\n\n return pros_cons_dict", "def setpmidInfo():\n DB = PT.DB\n for p in PT.proteins:\n f = DB[p]['PMID_link']\n #print f\n try:\n auth, tit = t.fetchPMIDSummary(f['text'])\n #print 'got info', tit\n except:\n print 'no pmid'\n try:\n f['authors'] = auth\n f['title'] = tit\n print auth, tit\n #print DB[p]['PMID_link']\n except:\n print 'no dict'\n \n return", "def gen_info():\n # Carga la metainfo de departamentos de covidstas y filtramos departamentos de Santa Fe\n covidstats_meta_df = pd.read_csv('covidstats.csv',sep=';')\n covidstats_meta_df['LOCATION']='ARGENTINA/'+covidstats_meta_df['Provincia'].apply(normalize_str)+'/'+covidstats_meta_df['Departamento'].apply(normalize_str)\n covidstats_meta_df=covidstats_meta_df[covidstats_meta_df['LOCATION'].apply(lambda l : l.startswith('ARGENTINA/SANTA FE'))]\n covidstats_meta_df\n\n # Cargamos la info poblacional y chequemos que tengamos toda la info\n info_df=pd.read_csv('info_general.csv')\n s = set(info_df['LOCATION'])\n for l in set(covidstats_meta_df['LOCATION']):\n if l not in s:\n print('FALTA INFO DE: {}'.format(l))\n\n # Cargamos la info geografica y chequemos que tengamos toda la info\n gdf = gpd.read_file('maps_general.geojson')\n gdf=gdf[gdf['LOCATION'].apply(lambda l : l.startswith('ARGENTINA/SANTA FE'))]\n s = set(gdf['LOCATION'])\n for l in set(covidstats_meta_df['LOCATION']):\n if l not in s:\n print('FALTA INFO GEOGRAFICA DE: {}'.format(l))\n return covidstats_meta_df, info_df, gdf", "def metadata(self) -> dict:\n meta = {}\n meta['name'] = self.name\n meta['id'] = self.id\n meta['family'] = self.family\n \n meta['ptd_type'] = []\n meta['pos'] = []\n meta['atype'] = []\n meta['db_vect'] = []\n meta['scale'] = []\n for cp in self.parameters:\n meta['ptd_type'].append(cp.get('ptd_type', None))\n meta['pos'].append(cp.get('pos', None))\n meta['atype'].append(cp.get('atype', None))\n meta['db_vect'].append(cp.get('db_vect', None))\n meta['scale'].append(cp.get('scale', None))\n \n return meta", "def program_info():\n\n print(\n color.GREEN\n + color.UNDERLINE\n + color.BOLD\n + \"Program Info Center:\\n\"\n + color.END\n )\n print(\n color.UNDERLINE\n + color.BOLD\n + \"About The Program:\"\n + color.END\n + \" This program works with the Blockchain-19 protocols defined within it's respective project. Blockchain-19 is an adaptation of the cryptocurrency blockchain or the Blockchain game used for education purposes, instead relating the content on the Blockchain to COVID-19. Given patient information the program can calculate the hashes within the Blockchain, creating a solved ledger. The program offers users the option of creating a new ledger or importing a previously exported ledger.\\n\"\n )\n\n print(\n color.UNDERLINE\n + color.BOLD\n + \"Necessary Patient Info:\"\n + color.END\n + \"\\n* Hospital \\n* Patient ID \\n* Current Status\\n\"\n )\n\n print(\n color.UNDERLINE\n + color.BOLD\n + \"Current Patient Status Key:\"\n + color.END\n + \"\\n* A = Admitted \\n* B = Stable \\n* C = Moderate \\n* D = Severe \\n* E = Discharged \\n* F = ICU\\n\\n\"\n )", "def extract_information(preprocessed_sentences):\n parsed = list(map(lambda sentence: nlp(sentence), preprocessed_sentences))\n\n quantities = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'QUANTITY'), parsed))\n dates = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'DATE'), parsed))\n\n hurricane_name = eh.extract_frequent_regex_match(parsed, '[Hh]urricane ([A-Z][a-z]+)').most_common(1)[0][0]\n hurricane_category = eh.extract_frequent_regex_match(parsed, '[Cc]ategory ([0-9]+)').most_common(1)[0][0]\n\n tropical_storm_name = eh.extract_frequent_regex_match(parsed, '[Tt]ropical [Ss]torm ([A-Z][a-z]+)').most_common(1)[0][0]\n formation_date, middle_month = extract_storm_timeline(dates, hurricane_name)\n\n preperation_info = extract_preparation_information(parsed)\n prep_gpes = preperation_info[0].most_common(3)\n\n restore_info = extract_restoration_information(parsed)\n\n landfall_info = extract_landfall_information(parsed)\n\n wind_info = extract_wind_information(quantities)\n rain_info = extract_rain_information(quantities)\n size_info = extract_size_information(parsed)\n\n # formation_info = extract_formation_info(parsed)\n death_info = extract_death_damages_info(parsed)\n\n print(constants.HURRICANE_SENTENCE.format(hurricane_name, middle_month, hurricane_category))\n print(constants.LANDFALL_SENTENCE.format(hurricane_name, landfall_info[2], landfall_info[3], landfall_info[0], landfall_info[1]))\n print(constants.WIND_SENTENCE.format(wind_info[0], wind_info[1], wind_info[2]))\n print(constants.RAIN_SENTENCE.format(hurricane_name, rain_info[1], rain_info[0], rain_info[2]))\n print(constants.FORMATION_SENTENCE.format(formation_date, tropical_storm_name))\n print(constants.PREPARATION_SENTENCE.format(prep_gpes[0][0], prep_gpes[1][0], prep_gpes[2][0], preperation_info[1].\n most_common(1)[0][0]))\n print(constants.SIZE_SENTENCE.format(size_info[0], size_info[1]))", "def info_scrambled(out: Export = Export(\"cwb.encoded_scrambled/data/.info\"),\n sentences: AnnotationCommonData = AnnotationCommonData(\"misc.<sentence>_count\"),\n firstdate: AnnotationCommonData = AnnotationCommonData(\"cwb.datefirst\"),\n lastdate: AnnotationCommonData = AnnotationCommonData(\"cwb.datelast\"),\n resolution: AnnotationCommonData = AnnotationCommonData(\"dateformat.resolution\"),\n protected: bool = Config(\"korp.protected\")):\n create_info_file(sentences, firstdate, lastdate, resolution, protected, out)", "def create_meta_dict_L1(adcp_meta):\n meta_dict = {}\n with open(adcp_meta) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n next(csv_reader, None) # Skip header row\n for row in csv_reader:\n # extract all metadata from csv file into dictionary -- some items not passed to netCDF file but are extracted anyway\n if row[0] == '' and row[1] == '':\n print('Metadata file contains a blank row; skipping this row !')\n elif row[0] != '' and row[1] == '':\n print('Metadata item in csv file has blank value; skipping this row '\n 'in metadata file !')\n else:\n meta_dict[row[0]] = row[1]\n\n # Add conventions metadata to meta_dict\n meta_dict['deployment_type'] = 'Sub Surface'\n meta_dict['flag_meaning'] = 'no_quality_control, good_value, probably_good_value, probably_bad_value, ' \\\n 'bad_value, changed_value, value_below_detection, value_in_excess, ' \\\n 'interpolated_value, missing_value'\n meta_dict['flag_references'] = 'BODC SeaDataNet'\n meta_dict['flag_values'] = '0, 1, 2, 3, 4, 5, 6, 7, 8, 9'\n meta_dict['keywords'] = 'Oceans > Ocean Circulation > Ocean Currents'\n meta_dict['keywords_vocabulary'] = 'GCMD Science Keywords'\n meta_dict['naming_authority'] = 'BODC, MEDS, CF v72'\n meta_dict['variable_code_reference'] = 'BODC P01'\n meta_dict['Conventions'] = \"CF-1.8\"\n\n return meta_dict", "def get_info(self):\n pattern = \"{}-{}-{}\".format(*self.diagram).replace(\"/\", \"|\")\n info = \"\"\n info += \"name: triangle group {}\\n\".format(pattern)\n info += \"cox_mat: {}\\n\".format(self.cox_mat)\n info += \"vertices: {}\\n\".format(self.num_vertices)\n info += \"edges: {}\\n\".format(self.num_edges)\n info += \"faces: {}\\n\".format(self.num_faces)\n info += \"states in the automaton: {}\\n\".format(self.G.dfa.num_states)\n info += \"reflection table:\\n{}\\n\".format(self.G.reftable)\n info += \"the automaton is saved as {}_dfa.png\".format(pattern)\n self.G.dfa.draw(pattern + \"_dfa.png\")\n return info", "def isolate_relevant_information(self):\n\n def get_formula(oncat_formula):\n \"\"\"will need to go from something like\n \"${value/10e11}`\"\n to something more pythonic\n \"{value/10e11}\"\"\"\n regular_expression = r'\\$(?P<formula>.+)\\`'\n m = re.search(regular_expression, oncat_formula)\n if m:\n return m.group('formula')\n else:\n return \"\"\n\n template_information = {}\n for _index, _element in enumerate(self._oncat_default_template):\n _title = _element[\"name\"]\n _path = _element[\"path\"]\n if \"units\" in _element:\n _units = _element[\"units\"]\n else:\n _units = \"\"\n if \"transform\" in _element:\n _formula = get_formula(_element[\"transform\"])\n else:\n _formula = \"\"\n template_information[_index] = {'title': _title,\n 'path': _path,\n 'units': _units,\n 'formula': _formula}\n self.template_information = template_information", "def produce_protein_interaction_dict (inPath, outPath): \n PPIs = pd.read_table(inPath, sep=\"\\t\")\n proteins = set(PPIs[[\"Protein_1\", \"Protein_2\"]].values.flatten())\n proteinPartners = {}\n for protein in proteins:\n partners = set(PPIs.loc[(PPIs[[\"Protein_1\", \"Protein_2\"]]==protein).any(1),\n [\"Protein_1\", \"Protein_2\"]].values.flatten()) - {protein}\n if sum((PPIs[[\"Protein_1\", \"Protein_2\"]]==protein).all(1)) > 0:\n partners.add(protein)\n proteinPartners[protein] = partners\n with open(outPath, 'wb') as fOut:\n pickle.dump(proteinPartners, fOut)", "def dict(dict: Dict[str, Pin], /) -> None:", "def _build_single_study_info(study, info, study_proc, proc_samples):\n PI = StudyPerson(info['principal_investigator_id'])\n status = study.status\n if info['publication_doi'] is not None:\n pmids = get_pubmed_ids_from_dois(info['publication_doi']).values()\n info['pmid'] = \", \".join([pubmed_linkifier([p]) for p in pmids])\n info['publication_doi'] = \", \".join([doi_linkifier([p])\n for p in info['publication_doi']])\n\n else:\n info['publication_doi'] = \"\"\n info['pmid'] = \"\"\n if info[\"number_samples_collected\"] is None:\n info[\"number_samples_collected\"] = 0\n info[\"shared\"] = _get_shared_links_for_study(study)\n # raw data is any artifact that is not Demultiplexed or BIOM\n\n info[\"num_raw_data\"] = len([a for a in study.artifacts()\n if a.artifact_type not in ['Demultiplexed',\n 'BIOM']])\n info[\"status\"] = status\n info[\"study_id\"] = study.id\n info[\"pi\"] = study_person_linkifier((PI.email, PI.name))\n del info[\"principal_investigator_id\"]\n del info[\"email\"]\n # Build the proc data info list for the child row in datatable\n info[\"proc_data_info\"] = []\n for data_type, proc_datas in viewitems(study_proc[study.id]):\n info[\"proc_data_info\"].extend([\n _build_single_proc_data_info(pd_id, data_type, proc_samples[pd_id])\n for pd_id in proc_datas])\n return info", "def build_phenotype(phenotype_id, adapter):\n phenotype_obj = {}\n phenotype = adapter.hpo_term(phenotype_id)\n if phenotype:\n phenotype_obj[\"phenotype_id\"] = phenotype[\"hpo_id\"]\n phenotype_obj[\"feature\"] = phenotype[\"description\"]\n return phenotype", "def MakePmapProgram(MaterialInfoList,OutputPath,GasType,GasAtomType,SpecialPairList,GasAtomDictionary,\r\n MaterialAtomDictionary,GridSpacingP,HEPCP,CutOff,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting):\r\n\r\n def MakeAtomAtomFile(PmapOutputPath,MaterialInfo,GasAtomType,SpecialPairList,GasAtomDictionary,MaterialAtomDictionary,CutOff):\r\n\r\n with open('%s/atom_atom_file' % (PmapOutputPath), 'w') as AtomAtomFile:\r\n\r\n AtomAtomFile.write('-'.center(80, '-'))\r\n AtomAtomFile.write('\\n')\r\n\r\n for i in range(len(MaterialInfo[5])):\r\n for j in range(len(MaterialInfo[5])):\r\n if i <= j:\r\n AtomAtomFile.write('%-10s%-10sOFF\\n' % (MaterialInfo[5][i], MaterialInfo[5][j]))\r\n\r\n for k in range(len(GasAtomType)):\r\n for l in range(len(GasAtomType)):\r\n if k <= l:\r\n Key=False\r\n for SpecialPair in SpecialPairList:\r\n if GasAtomType[k] in SpecialPair[0] and GasAtomType[l] in SpecialPair[0] and GasAtomType[k]!=GasAtomType[l]:\r\n Key=True\r\n if Key==False:\r\n num1 = GasAtomDictionary.get(GasAtomType[k])\r\n num2 = GasAtomDictionary.get(GasAtomType[l])\r\n sig1 = str('%.3f' % ((float(num1[0]) + float(num2[0])) / 2))\r\n eps1 = str('%.3f' % ((float(num1[1]) * float(num2[1])) ** 0.5))\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%-10sLOCUT@0.1000\\n%-10s%-10s%-10sHICUT@%-10sALPHA@0.10\\n'%(GasAtomType[k],GasAtomType[l],'LJ',sig1,eps1,CutOff,GasAtomType[k],GasAtomType[l],'WFCOUL',CutOff))\r\n\r\n for h in range(len(GasAtomType)):\r\n for g in range(len(MaterialInfo[5])):\r\n Key = False\r\n for SpecialPair in SpecialPairList:\r\n if GasAtomType[h] in SpecialPair[0] and MaterialInfo[5][g] in SpecialPair[0]:\r\n Key = True\r\n if Key==False:\r\n num3 = GasAtomDictionary.get(GasAtomType[h])\r\n num4 = MaterialAtomDictionary.get(MaterialInfo[5][g])\r\n sig2 = str('%.3f' % ((float(num3[0]) + float(num4[0])) / 2))\r\n eps2 = str('%.3f' % ((float(num3[1]) * float(num4[1])) ** 0.5))\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%-10sLOCUT@0.1000\\n%-10s%-10s%-10sHICUT@%-10sALPHA@0.10\\n'%(GasAtomType[h],MaterialInfo[5][g],'LJ',sig2,eps2,CutOff,GasAtomType[h],MaterialInfo[5][g],'WFCOUL',CutOff))\r\n\r\n for m in SpecialPairList:\r\n AtomAtomFile.write('%-10s%-10s%-10sSIG@%-20sEPS@%-20sHICUT@%-10sLOCUT@0.1000\\n%-10s%-10s%-10sHICUT@%-10sALPHA@0.10\\n'%(m[0][0],m[0][1],'LJ',m[1][0],m[1][1],CutOff,m[0][0],m[0][1],'WFCOUL',CutOff))\r\n\r\n AtomAtomFile.write('-'.center(80, '-'))\r\n\r\n def MakeIntramolecularFile(PmapOutputPath,MaterialInfo,GasAtomType,GasAtomDictionary):\r\n\r\n with open('%s/intramolecular_file' % (PmapOutputPath), 'w') as IntraFile:\r\n IntraFile.write('Intra: %s'%(MaterialInfo[7]))\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0]!='0':\r\n IntraFile.write('\\nIntra: %s'%(i))\r\n\r\n def MakeMoleMolePmapFile(PmapOutputPath,MaterialInfo,GasAtomType,GasAtomDictionary):\r\n\r\n with open('%s/mole_mole_pmap_file' % (PmapOutputPath), 'w') as MoleMolePmap:\r\n MoleMolePmap.write('''%s %s NCOUL OFF\r\n%s %s COUL OFF\\n\\n'''%(MaterialInfo[7],MaterialInfo[7],MaterialInfo[7],MaterialInfo[7]))\r\n\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0]!='0':\r\n MoleMolePmap.write('''%s %s NCOUL OFF\r\n%s %s COUL OFF\r\n\r\n%s %s NCOUL BASIC LJ FAST\r\n%s %s COUL OFF\\n\\n''' % (i, i, i, i, i,MaterialInfo[7], i, MaterialInfo[7]))\r\n\r\n def MakePmapMaker(PmapOutputPath,MaterialInfo,GasAtomType,GridSpacingP,HEPCP,GasAtomDictionary):\r\n\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0]!='0':\r\n with open('%s/pmap_maker_%s_in_%s.ctr'%(PmapOutputPath,i,MaterialInfo[7]), 'w') as PmapMaker:\r\n PmapMaker.write('''------ General Information ------------------------------------------\r\n%s molecule in %s\r\n1 # No. of iterations\r\n1 # No. of steps between writes to output/log file\r\n2 # No. of steps between writes to crash file\r\n2 # No. of steps between writes to config. file\r\n1 # Start numbering simulations from .\r\n30728 # Iseed\r\n1 # specifies contents of config file\r\n%s_in_%s.res # Restart File to write to\r\n%s_in_%s.con # Configuration File\r\n\r\n------ Atomic Types --------------------------------------------------\r\n%s # number of atomic types\r\n\r\n%s\r\n%s.atm'''%(i,MaterialInfo[7],i,MaterialInfo[7],i,MaterialInfo[7],len(MaterialInfo[5])+1,i,i))\r\n\r\n for j in MaterialInfo[5]:\r\n PmapMaker.write('\\n\\n%s\\n%s.atm' % (j,j))\r\n\r\n PmapMaker.write('''\\n------ Molecule Types -------------------------------------------------\r\n2\r\n\r\n%s\r\n%s.mol\r\n\r\n%s\r\n%s.mol\r\n------ Simulation Cell Information ------------------------------------\r\n%s # Fundamental cell file\r\n%s # No. of unit cells in x, y, z direction\r\n1, 1, 1 # (1 = Periodic) in x, y, z\r\n------ Forcefield Information -------------------------------------------\r\nBASIC\r\nMOL\r\natom_atom_file # atom-atom interaction file\r\nmole_mole_pmap_file # sorbate-sorbate interaction file\r\nintramolecular_file # intramolecular interaction file/specification\r\n------ Mapmaker Information -----------------------------------------------\r\n1 # Number of maps to make\r\n\r\n%s # Sorbent to map\r\n%s # Sorbate to probe map with\r\nNCOUL LJ # Interaction type to map\r\n%s # Approxiamte grid spacing (Ang)\r\n%s # High end potential cutoff (kJ/mol)\r\n%s_in_%s.pmap # Map filename or AUTO\r\n------ Configuration Initialization -------------------------------------\r\n%s # Sorbate_Type\r\nMOLECULE NULL\r\n%s # Sorbate_Type\r\nFIXED NULL''' % (i, i,MaterialInfo[7],MaterialInfo[7],MaterialInfo[7],', '.join(MaterialInfo[4]),MaterialInfo[7],i,GridSpacingP,HEPCP,i,MaterialInfo[7],i,MaterialInfo[7]))\r\n\r\n def MakeTorqueFile(PmapOutputPath,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting,GasAtomType,GasAtomDictionary,MaterialInfo,OutputPath):\r\n\r\n Node = random.choice(Nodes)\r\n\r\n with open('%s/run_pmapmaker.pbs' % (PmapOutputPath), 'w') as Torque:\r\n Torque.write('''#!/bin/bash\r\n#PBS -l nodes=%s\r\n#PBS -N MuSiC_pmap.%s\r\n#PBS -o music_pmap_jobs.out\r\n#PBS -j oe\r\n\r\n#\r\n# The number of processors you desire is indicated by replacing\r\n# <nproc> above.\r\n#\r\n\r\n#\r\n# GROMACS path and arguments to mdrun :\r\n#\r\ncd $PBS_O_WORKDIR\r\n\r\n# =============== Environment Setting ============================ #\\n''' % (Node, TaskSuffix))\r\n\r\n for i in TorqueSetting:\r\n Torque.write('%s' % (i))\r\n\r\n Torque.write('''# =============== Don't Change Above Setting ===================== #\r\n\r\necho \"============The computed nodes============\"\r\ncp -f $PBS_NODEFILE NODE.txt\r\necho \"User: \" $USER\r\ncat $PBS_NODEFILE\r\necho \"Job ID: \" $PBS_JOBID\r\necho \"Job Cookie: \" $PBS_JOBCOOKIE\r\necho \"Using executable: \" `which mpirun`\r\necho `date`\r\necho \"============Finished setting==============\"\r\n\r\n# =========== Setting Jobs ============================ #\\n''')\r\n\r\n for j in MuSiCSetting:\r\n Torque.write('%s' % (j))\r\n\r\n Torque.write('''export ATOMSDIR=%s\r\n export MOLSDIR=%s\r\n export PMAPDIR=%s\r\n export EMAPDIR=%s\r\n export SMAPDIR=%s''' % (os.path.join(OutputPath, 'Atoms'), os.path.join(OutputPath, 'Mols'),\r\n os.path.join(OutputPath, 'Maps'), os.path.join(OutputPath, 'Maps'),\r\n os.path.join(OutputPath, 'Maps')))\r\n\r\n Torque.write('''# =========== Setting Jobs ============================ #\r\n\r\n# +++++++++++++++ Start Computing +++++++++++++++++++++ #\r\n\r\nTIME_DIR=$(date '+%Y-%m-%d_%H-%M-%S')\r\nTIME_DIR=\"${USER}_jobs_${TIME_DIR}_${PBS_JOBID}\"\r\nif [ -d /utmp ]; then\r\n TEMP_DIR=/utmp/${USER}/${TIME_DIR}\r\nelse\r\n TEMP_DIR=/temp/${USER}/${TIME_DIR}\r\nfi\r\nmkdir -p ${TEMP_DIR}\r\ncp -rf * ${TEMP_DIR}\r\ncd ${TEMP_DIR}\r\nrm -f music_pmap_jobs.out\r\necho \"The temp direcotry: \" ${TEMP_DIR}\r\necho \"============Finished setting==============\"\r\n\r\necho \"+++++++++++++ Run MuSic ++++++++++++++++++++++++++++\"\\n''')\r\n\r\n for i in GasAtomType:\r\n pseudo = i.split('_')\r\n if pseudo[0] != 'M' and GasAtomDictionary.get(i)[0] != '0':\r\n Torque.write('music_mapmaker pmap_maker_%s_in_%s.ctr > pmap_maker_%s_in_%s.txt\\necho `date`\\n'%(i,MaterialInfo[7],i,MaterialInfo[7]))\r\n\r\n Torque.write('''echo \"+++++++++++++ Finish MuSic +++++++++++++++++++++++++\"\r\n\r\ncd $PBS_O_WORKDIR\r\ncp -rf ${TEMP_DIR}/* .\r\nrm -rf ${TEMP_DIR}\r\n\r\n\r\necho \"All files were copied back!\"\r\necho \"The work direcotry: \" $PBS_O_WORKDIR\r\necho `date`\r\necho \"============Finished Job ==============\"''')\r\n\r\n def main():\r\n\r\n for MaterialInfo in MaterialInfoList:\r\n if MaterialInfo[6]==True:\r\n PmapOutputPath='%s/%s/%s/%s'%(OutputPath,'MakePmap','_'.join(GasType),MaterialInfo[7])\r\n if os.path.exists(PmapOutputPath):\r\n pass\r\n else:\r\n os.makedirs(PmapOutputPath)\r\n\r\n MakeAtomAtomFile(PmapOutputPath,MaterialInfo,GasAtomType,SpecialPairList,GasAtomDictionary,MaterialAtomDictionary,CutOff)\r\n MakeMoleMolePmapFile(PmapOutputPath, MaterialInfo, GasAtomType,GasAtomDictionary)\r\n MakePmapMaker(PmapOutputPath,MaterialInfo,GasAtomType,GridSpacingP,HEPCP,GasAtomDictionary)\r\n MakeIntramolecularFile(PmapOutputPath, MaterialInfo, GasAtomType,GasAtomDictionary)\r\n MakeTorqueFile(PmapOutputPath,Nodes,TaskSuffix,TorqueSetting,MuSiCSetting,GasAtomType,GasAtomDictionary,MaterialInfo,OutputPath)\r\n\r\n if __name__ == '__main__':\r\n main()", "def dict() -> Dict[str, Pin]:", "def genomic_tx_data():\n return dict(\n gene=\"BRAF\",\n strand=\"-\",\n tx_pos_range=(2053, 2188),\n alt_pos_range=(140439611, 140439746),\n alt_aln_method=\"splign\",\n tx_exon_id=780496,\n alt_exon_id=1927265,\n pos_change=(92, 43),\n alt_pos_change_range=(140439703, 140439703),\n tx_ac=\"NM_004333.4\",\n alt_ac=\"NC_000007.13\"\n )", "def build_catalog_info(self, catalog_info):\n cat = SourceFactory.build_catalog(**catalog_info)\n catalog_info['catalog'] = cat\n # catalog_info['catalog_table'] =\n # Table.read(catalog_info['catalog_file'])\n catalog_info['catalog_table'] = cat.table\n catalog_info['roi_model'] =\\\n SourceFactory.make_fermipy_roi_model_from_catalogs([cat])\n catalog_info['srcmdl_name'] =\\\n self._name_factory.srcmdl_xml(sourcekey=catalog_info['catalog_name'])\n return CatalogInfo(**catalog_info)", "def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'", "def main():\n args = get_args()\n FILE = args.FILE\n annotations = args.annotations\n outfile = args.outfile\n \n \n if not os.path.isfile(FILE):\n die('\"{}\" is not a file'.format(FILE))\n if not os.path.isfile(annotations):\n die('\"{}\" is not a file'.format(annotations))\n if os.path.isfile(FILE) and os.path.isfile(annotations):\n reader = csv.DictReader(open(FILE), delimiter = '\\t', fieldnames = (\"qseqid\", \"sseqid\", \"pident\", \"length\", \"mismatch\", \"gapopen\", \"qstart\", \"qend\", \"sstart\", \"send\", \"evalue\", \"bitscore\"))\n reader_a = csv.DictReader(open(annotations), fieldnames = (\"centroid\", \"domain\", \"kingdom\", \"phylum\", \"class\", \"order\", \"genus\", \"species\"))\n reader_b = csv.reader(open(annotations, 'r'))\n anno_dict = {}\n for row in reader_b:\n key1 = row[0]\n anno_dict[key1] = row[1:]\n\n #print(anno_dict)\n \n \"\"\"for dct in map(dict, reader_a):\n genus = (f\"{dct['genus']}\")\n species = (f\"{dct['species']}\")\n if genus == \"\": \n print(\"NA\")\n else:\n print(genus)\n if species == \"\":\n print(\"NA\")\n else:\n print(species)\"\"\"\n for dct in map(dict, reader):\n seq_id = (f\"{dct['sseqid']}\") \n pident = (f\"{dct['pident']}\")\n #print(seq_id)\n for dct_a in map(dict, reader_a):\n genus = (f\"{dct_a['genus']}\")\n species = (f\"{dct_a['species']}\")\n if any(seq_id == key for key in anno_dict): \n \"\"\"print(seq_id)\n print(pident)\n print(genus)\n print(species)\n #find a way to print genus and species of seq_id\n \"\"\"\n \n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"for line_a in reader_a:\n an_id = (line_a['centroid']) \n print('\"{}\" is an_id'.format(an_id)) \n for line in reader:\n seq_id = (line['sseqid'])\n print('\"{}\" is seq_id'.format(seq_id))\n if seq_id == an_id:\n print(\"hi\")\n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"\n #pprint.pprint(dict_list)\n #pprint.pprint(dict_list_a)\n #for key, value in d1.items():\n #if key is 'sseqid':\n #print(value)\n #print(dict_list_a['centroid']) ", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ANAP').get('abstractTypes')\n exolinks = globalMap.get('ANAP').get('exolinks')\n\n # DataType GraphicsHandlerType\n currentMap = {}\n abstractTypes['GraphicsHandlerType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'] = currentMap\n loadMaps['ANAP.GraphicsHandlerType'] = currentMap\n currentMap['tag'] = 'ANAP.GraphicsHandlerType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AnalysisProfile\n currentMap = {}\n abstractTypes['AnalysisProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'analysisProfiles'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnalysisProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnalysisProfile.bgColor\n currentMap = {}\n contentMap['bgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'] = currentMap\n loadMaps['ANAP.AnalysisProfile.bgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.bgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'\n currentMap['name'] = 'bgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#FFFFFF'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.fgColor\n currentMap = {}\n contentMap['fgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'] = currentMap\n loadMaps['ANAP.AnalysisProfile.fgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.fgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'\n currentMap['name'] = 'fgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#000000'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.font\n currentMap = {}\n contentMap['font'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'] = currentMap\n loadMaps['ANAP.AnalysisProfile.font'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.font'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'\n currentMap['name'] = 'font'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.graphicsHandler\n currentMap = {}\n contentMap['graphicsHandler'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'] = currentMap\n loadMaps['ANAP.AnalysisProfile.graphicsHandler'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.graphicsHandler'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'\n currentMap['name'] = 'graphicsHandler'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'Tk'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001')\n\n # Attribute AnalysisProfile.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AnalysisProfile.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'] = currentMap\n loadMaps['ANAP.AnalysisProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AnalysisProfile.panView\n currentMap = {}\n contentMap['panView'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'] = currentMap\n loadMaps['ANAP.AnalysisProfile.panView'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.panView'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'\n currentMap['name'] = 'panView'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.sendBugReports\n currentMap = {}\n contentMap['sendBugReports'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile.sendBugReports'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.sendBugReports'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'\n currentMap['name'] = 'sendBugReports'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'maybe'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2010-11-17-16:21:33_00001')\n\n # Attribute AnalysisProfile.transientDialogs\n currentMap = {}\n contentMap['transientDialogs'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientDialogs'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientDialogs'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'\n currentMap['name'] = 'transientDialogs'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.transientWindows\n currentMap = {}\n contentMap['transientWindows'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientWindows'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientWindows'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'\n currentMap['name'] = 'transientWindows'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.twoCharShortcuts\n currentMap = {}\n contentMap['twoCharShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'] = currentMap\n loadMaps['ANAP.AnalysisProfile.twoCharShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.twoCharShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'\n currentMap['name'] = 'twoCharShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useCrosshair\n currentMap = {}\n contentMap['useCrosshair'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useCrosshair'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useCrosshair'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'\n currentMap['name'] = 'useCrosshair'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useGlobalShortcuts\n currentMap = {}\n contentMap['useGlobalShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useGlobalShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useGlobalShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'\n currentMap['name'] = 'useGlobalShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.userEmail\n currentMap = {}\n contentMap['userEmail'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userEmail'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userEmail'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'\n currentMap['name'] = 'userEmail'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute AnalysisProfile.userName\n currentMap = {}\n contentMap['userName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userName'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'\n currentMap['name'] = 'userName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.userOrganisation\n currentMap = {}\n contentMap['userOrganisation'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userOrganisation'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userOrganisation'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'\n currentMap['name'] = 'userOrganisation'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.webBrowser\n currentMap = {}\n contentMap['webBrowser'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'] = currentMap\n loadMaps['ANAP.AnalysisProfile.webBrowser'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.webBrowser'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'\n currentMap['name'] = 'webBrowser'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role AnalysisProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnalysisProfile.colorSchemes\n currentMap = {}\n contentMap['colorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'] = currentMap\n loadMaps['ANAP.AnalysisProfile.colorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.colorSchemes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'\n currentMap['name'] = 'colorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.macros\n currentMap = {}\n contentMap['macros'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'] = currentMap\n loadMaps['ANAP.AnalysisProfile.macros'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.macros'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'\n currentMap['name'] = 'macros'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.marksColor\n currentMap = {}\n contentMap['marksColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'] = currentMap\n loadMaps['ANAP.AnalysisProfile.marksColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.marksColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'\n currentMap['name'] = 'marksColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n\n # Role AnalysisProfile.refExpProfiles\n currentMap = {}\n contentMap['refExpProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'] = currentMap\n loadMaps['ANAP.AnalysisProfile.refExpProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.refExpProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'\n currentMap['name'] = 'refExpProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.residueProfiles\n currentMap = {}\n contentMap['residueProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'] = currentMap\n loadMaps['ANAP.AnalysisProfile.residueProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.residueProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'\n currentMap['name'] = 'residueProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.rulersColor\n currentMap = {}\n contentMap['rulersColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'] = currentMap\n loadMaps['ANAP.AnalysisProfile.rulersColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.rulersColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'\n currentMap['name'] = 'rulersColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n # End of AnalysisProfile\n\n currentMap = abstractTypes.get('AnalysisProfile')\n aList = ['createdBy', 'graphicsHandler', 'guid', 'isModifiable', 'lastUnlockedBy', 'name', 'panView', 'sendBugReports', 'transientDialogs', 'transientWindows', 'twoCharShortcuts', 'useCrosshair', 'useGlobalShortcuts', 'userEmail', 'webBrowser']\n currentMap['headerAttrs'] = aList\n aList = ['bgColor', 'fgColor', 'font', 'userName', 'userOrganisation', 'marksColor', 'rulersColor']\n currentMap['simpleAttrs'] = aList\n aList = ['residueProfiles', 'refExpProfiles', 'macros', 'colorSchemes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['colorSchemes', 'macros', 'refExpProfiles', 'residueProfiles']\n currentMap['children'] = aList\n\n # Class ColorScheme\n currentMap = {}\n abstractTypes['ColorScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'] = currentMap\n loadMaps['ANAP.ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'colorSchemes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ColorScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ColorScheme.colors\n currentMap = {}\n contentMap['colors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'] = currentMap\n loadMaps['ANAP.ColorScheme.colors'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.colors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'\n currentMap['name'] = 'colors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute ColorScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'] = currentMap\n loadMaps['ANAP.ColorScheme.name'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ColorScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ColorScheme\n\n currentMap = abstractTypes.get('ColorScheme')\n aList = ['colors', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Macro\n currentMap = {}\n abstractTypes['Macro'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'] = currentMap\n loadMaps['ANAP.Macro'] = currentMap\n currentMap['tag'] = 'ANAP.Macro'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'macros'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Macro.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Macro.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'] = currentMap\n loadMaps['ANAP.Macro.details'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Macro.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'] = currentMap\n loadMaps['ANAP.Macro.function'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.isInMenu\n currentMap = {}\n contentMap['isInMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'] = currentMap\n loadMaps['ANAP.Macro.isInMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'\n currentMap['name'] = 'isInMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.isInMouseMenu\n currentMap = {}\n contentMap['isInMouseMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'] = currentMap\n loadMaps['ANAP.Macro.isInMouseMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMouseMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'\n currentMap['name'] = 'isInMouseMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.module\n currentMap = {}\n contentMap['module'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'] = currentMap\n loadMaps['ANAP.Macro.module'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.module'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'\n currentMap['name'] = 'module'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'] = currentMap\n loadMaps['ANAP.Macro.name'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Macro.ordering\n currentMap = {}\n contentMap['ordering'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'] = currentMap\n loadMaps['ANAP.Macro.ordering'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.ordering'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'\n currentMap['name'] = 'ordering'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.path\n currentMap = {}\n contentMap['path'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'] = currentMap\n loadMaps['ANAP.Macro.path'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.path'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'\n currentMap['name'] = 'path'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00003')\n\n # Attribute Macro.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'] = currentMap\n loadMaps['ANAP.Macro.serial'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.shortcut\n currentMap = {}\n contentMap['shortcut'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'] = currentMap\n loadMaps['ANAP.Macro.shortcut'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.shortcut'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'\n currentMap['name'] = 'shortcut'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Macro.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Macro\n\n currentMap = abstractTypes.get('Macro')\n aList = ['function', 'isInMenu', 'isInMouseMenu', 'module', 'ordering', 'serial', 'shortcut']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'path']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class RefExpProfile\n currentMap = {}\n abstractTypes['RefExpProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'] = currentMap\n loadMaps['ANAP.RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refExpProfiles'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefExpProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefExpProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'] = currentMap\n loadMaps['ANAP.RefExpProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute RefExpProfile.peakSymbolColors\n currentMap = {}\n contentMap['peakSymbolColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakSymbolColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakSymbolColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'\n currentMap['name'] = 'peakSymbolColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.peakTextColors\n currentMap = {}\n contentMap['peakTextColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakTextColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakTextColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'\n currentMap['name'] = 'peakTextColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.refExpNames\n currentMap = {}\n contentMap['refExpNames'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'] = currentMap\n loadMaps['ANAP.RefExpProfile.refExpNames'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.refExpNames'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'\n currentMap['name'] = 'refExpNames'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role RefExpProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefExpProfile.negColorSchemes\n currentMap = {}\n contentMap['negColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'] = currentMap\n loadMaps['ANAP.RefExpProfile.negColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.negColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'\n currentMap['name'] = 'negColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role RefExpProfile.posColorSchemes\n currentMap = {}\n contentMap['posColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'] = currentMap\n loadMaps['ANAP.RefExpProfile.posColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.posColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'\n currentMap['name'] = 'posColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of RefExpProfile\n\n currentMap = abstractTypes.get('RefExpProfile')\n aList = ['name']\n currentMap['headerAttrs'] = aList\n aList = ['peakSymbolColors', 'peakTextColors', 'refExpNames', 'negColorSchemes', 'posColorSchemes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ResidueProfile\n currentMap = {}\n abstractTypes['ResidueProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'] = currentMap\n loadMaps['ANAP.ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'residueProfiles'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ResidueProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ResidueProfile.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'] = currentMap\n loadMaps['ANAP.ResidueProfile.ccpCode'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.guiName\n currentMap = {}\n contentMap['guiName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'] = currentMap\n loadMaps['ANAP.ResidueProfile.guiName'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.guiName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'\n currentMap['name'] = 'guiName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'] = currentMap\n loadMaps['ANAP.ResidueProfile.molType'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ResidueProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ResidueProfile\n\n currentMap = abstractTypes.get('ResidueProfile')\n aList = ['ccpCode', 'guiName', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AnalysisProfile\n currentMap = {}\n exolinks['AnalysisProfile'] = currentMap\n loadMaps['ANAP.exo-AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-AnalysisProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['name'] = 'AnalysisProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to ColorScheme\n currentMap = {}\n exolinks['ColorScheme'] = currentMap\n loadMaps['ANAP.exo-ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ColorScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['name'] = 'ColorScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Macro\n currentMap = {}\n exolinks['Macro'] = currentMap\n loadMaps['ANAP.exo-Macro'] = currentMap\n currentMap['tag'] = 'ANAP.exo-Macro'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['name'] = 'Macro'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to RefExpProfile\n currentMap = {}\n exolinks['RefExpProfile'] = currentMap\n loadMaps['ANAP.exo-RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-RefExpProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['name'] = 'RefExpProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ResidueProfile\n currentMap = {}\n exolinks['ResidueProfile'] = currentMap\n loadMaps['ANAP.exo-ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ResidueProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['name'] = 'ResidueProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CCLB').get('abstractTypes')\n exolinks = globalMap.get('CCLB').get('exolinks')\n\n # Class AtomLabel\n currentMap = {}\n abstractTypes['AtomLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'] = currentMap\n loadMaps['CCLB.AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'atomLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AtomLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AtomLabel.isotopeCode\n currentMap = {}\n contentMap['isotopeCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'] = currentMap\n loadMaps['CCLB.AtomLabel.isotopeCode'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.isotopeCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'\n currentMap['name'] = 'isotopeCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'] = currentMap\n loadMaps['CCLB.AtomLabel.name'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.subType\n currentMap = {}\n contentMap['subType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'] = currentMap\n loadMaps['CCLB.AtomLabel.subType'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.subType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'\n currentMap['name'] = 'subType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute AtomLabel.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'] = currentMap\n loadMaps['CCLB.AtomLabel.weight'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role AtomLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AtomLabel\n\n currentMap = abstractTypes.get('AtomLabel')\n aList = ['isotopeCode', 'name', 'subType', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ChemCompLabel\n currentMap = {}\n abstractTypes['ChemCompLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'] = currentMap\n loadMaps['CCLB.ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemCompLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemCompLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemCompLabel.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'] = currentMap\n loadMaps['CCLB.ChemCompLabel.ccpCode'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ChemCompLabel.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'] = currentMap\n loadMaps['CCLB.ChemCompLabel.molType'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ChemCompLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemCompLabel.isotopomers\n currentMap = {}\n contentMap['isotopomers'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'] = currentMap\n loadMaps['CCLB.ChemCompLabel.isotopomers'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.isotopomers'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'\n currentMap['name'] = 'isotopomers'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of ChemCompLabel\n\n currentMap = abstractTypes.get('ChemCompLabel')\n aList = ['ccpCode', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['isotopomers', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopomers']\n currentMap['children'] = aList\n\n # Class Isotopomer\n currentMap = {}\n abstractTypes['Isotopomer'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'] = currentMap\n loadMaps['CCLB.Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopomers'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotopomer.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotopomer.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'] = currentMap\n loadMaps['CCLB.Isotopomer.serial'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotopomer.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'] = currentMap\n loadMaps['CCLB.Isotopomer.weight'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role Isotopomer.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Isotopomer.atomLabels\n currentMap = {}\n contentMap['atomLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'] = currentMap\n loadMaps['CCLB.Isotopomer.atomLabels'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.atomLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'\n currentMap['name'] = 'atomLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of Isotopomer\n\n currentMap = abstractTypes.get('Isotopomer')\n aList = ['serial', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['atomLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['atomLabels']\n currentMap['children'] = aList\n\n # Class LabelingScheme\n currentMap = {}\n abstractTypes['LabelingScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'] = currentMap\n loadMaps['CCLB.LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'labelingSchemes'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute LabelingScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute LabelingScheme.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'] = currentMap\n loadMaps['CCLB.LabelingScheme.details'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute LabelingScheme.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute LabelingScheme.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.longName\n currentMap = {}\n contentMap['longName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'] = currentMap\n loadMaps['CCLB.LabelingScheme.longName'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.longName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'\n currentMap['name'] = 'longName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute LabelingScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'] = currentMap\n loadMaps['CCLB.LabelingScheme.name'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role LabelingScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role LabelingScheme.chemCompLabels\n currentMap = {}\n contentMap['chemCompLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'] = currentMap\n loadMaps['CCLB.LabelingScheme.chemCompLabels'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.chemCompLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'\n currentMap['name'] = 'chemCompLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of LabelingScheme\n\n currentMap = abstractTypes.get('LabelingScheme')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy', 'name']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'longName']\n currentMap['simpleAttrs'] = aList\n aList = ['chemCompLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemCompLabels']\n currentMap['children'] = aList\n\n # Out-of-package link to AtomLabel\n currentMap = {}\n exolinks['AtomLabel'] = currentMap\n loadMaps['CCLB.exo-AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-AtomLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['name'] = 'AtomLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ChemCompLabel\n currentMap = {}\n exolinks['ChemCompLabel'] = currentMap\n loadMaps['CCLB.exo-ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-ChemCompLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['name'] = 'ChemCompLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n\n # Out-of-package link to Isotopomer\n currentMap = {}\n exolinks['Isotopomer'] = currentMap\n loadMaps['CCLB.exo-Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.exo-Isotopomer'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['name'] = 'Isotopomer'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to LabelingScheme\n currentMap = {}\n exolinks['LabelingScheme'] = currentMap\n loadMaps['CCLB.exo-LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.exo-LabelingScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['name'] = 'LabelingScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))", "def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}", "def get_info(self):\n return {'q_ref': self.q_ref, 'v_ref': self.v_ref, 'U': self.U, 'type': 'POD'}", "def info(out: Export = Export(\"cwb.encoded/data/.info\"),\n sentences: AnnotationCommonData = AnnotationCommonData(\"misc.<sentence>_count\"),\n firstdate: AnnotationCommonData = AnnotationCommonData(\"cwb.datefirst\"),\n lastdate: AnnotationCommonData = AnnotationCommonData(\"cwb.datelast\"),\n resolution: AnnotationCommonData = AnnotationCommonData(\"dateformat.resolution\"),\n protected: bool = Config(\"korp.protected\")):\n create_info_file(sentences, firstdate, lastdate, resolution, protected, out)", "def map_protein_to_go(filename):\n\n try:\n with open(filename) as go_association_file:\n go_association = go_association_file.read()\n split_go_association = re.split(r\"\\n\", go_association)\n\n # Ignore the general file information, which is the line starting\n # with \"!\"\".\n go_association_info = []\n for line in split_go_association:\n if line and not line.startswith(\"!\"):\n go_association_info.append(line)\n\n # Declare the tuple to parse the protein and go term as a pair and\n # store it in the set to avoid duplicate situation\n go_protein_dict = {}\n for column in go_association_info:\n column_info = re.split(r\"\\t\", column)\n protein_id = column_info[1]\n go_term = column_info[4]\n\n if protein_id in go_protein_dict:\n go_protein_dict[protein_id].add(go_term)\n else:\n go_protein_dict[protein_id] = {go_term}\n return go_protein_dict\n\n except FileNotFoundError:\n return {}", "def DictProteomeNameToSeq(X, n):\n DictProtToSeq_UP = {}\n for rec2 in SeqIO.parse(X, \"fasta\"):\n UP_seq = str(rec2.seq)\n if n == \"full\":\n UP_name = rec2.description.split(\"HUMAN \")[1].split(\" OS\")[0]\n DictProtToSeq_UP[UP_name] = str(UP_seq)\n if n == \"gene\":\n try:\n UP_name = rec2.description.split(\" GN=\")[1].split(\" \")[0]\n DictProtToSeq_UP[UP_name] = str(UP_seq)\n except BaseException:\n continue\n return DictProtToSeq_UP", "def getInfo():", "def map_file_normalization_info(file_normalization_event):\n event_info = {}\n if not file_normalization_event:\n return\n try:\n event_info.update(\n {\n \"premis:outcome\": file_normalization_event.event_outcome_detail,\n }\n )\n if file_normalization_event.event_detail:\n event_info.update(\n {\n \"prov:softwareAgent\": file_normalization_event.event_detail.split(\n \";\"\n )[0],\n \"premis:version\": file_normalization_event.event_detail.split(\";\")[\n 1\n ],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file normalization tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_normalization_event.event_detail,\n )\n return event_info", "def metadata(self):\n return {\n \"wildtype\" : self.wildtype,\n \"genotypes\" : self.genotypes,\n \"phenotypes\" : self.Raw.phenotypes,\n \"stdeviations\" : self.stdeviations,\n \"n_replicates\" : self.n_replicates,\n \"mutations\" : self.mutations,\n \"log_transform\" : self.log_transform,\n \"order\" : self.order,\n \"epistasis\" : {\n \"keys\" : self.epistasis.keys,\n \"values\" : self.epistasis.values,\n }\n }", "def mkMsg(self):\n # getting the version of project_coords\n project_coords_cmd = 'project_coords --version'\n outp = popen2.Popen4(project_coords_cmd)\n outpline = outp.fromchild.readlines()\n pcoorVer = outpline[0].split()[-1]\n \n self.meta = {}\n self.meta['module']= []\n self.meta['meta'] = []\n self.meta['input'] = []\n self.meta['output']= []\n self.meta['errorlist'] = []\n \n self.meta['module'].append(('module','name='+self.modName,'version='+__version__,'dataset='+self.obsName))\n self.meta['module'].append(('root',self.root))\n self.meta['meta'].append(('meta',))\n self.meta['meta'].append(('depend',))\n self.meta['meta'].append(('pkg',))\n self.meta['meta'].append(('name','python'))\n self.meta['meta'].append(('version',pyversion.split()[0]))\n self.meta['meta'].append(('pkg',))\n self.meta['meta'].append(('name','pyfits'))\n self.meta['meta'].append(('version',pyfits.__version__.split()[0]))\n self.meta['meta'].append(('pkg',))\n self.meta['meta'].append(('name','project_coords'))\n self.meta['meta'].append(('version',pcoorVer))\n self.meta['meta'].append(('pkg',))\n self.meta['meta'].append(('name','Guide Star Catalog'))\n self.meta['meta'].append(('version',_URL_.split(\"/\")[-1].split(\"q\")[0]))\n \n # SExtractor info\n sub = subprocess.Popen(['sex', '--version'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True)\n outp = sub.stdout.readlines()\n name = outp[0].split()[0]\n ver = outp[0].split()[2]\n self.meta['meta'].append(('pkg',))\n self.meta['meta'].append(('name',name))\n self.meta['meta'].append(('version',ver))\n cmdline1 = 'sex fitsfile -c self.InParFileName'\n self.meta['meta'].append(('commandline',cmdline1))\n del outp,sub,name,ver\n \n if self.errorList:\n self.meta['errorlist'].append(('errorlist',))\n for pkg,err in self.errorList:\n self.meta['errorlist'].append(('erroritem',err,'frompkg='+pkg))\n \n # input section\n self.meta['input'].append(('input',))\n for f in self.inputList:\n if string.find(f,\"_asn\") == -1:\n self.meta['input'].append(('file','type=image/x-fits'))\n self.meta['input'].append(('name',os.path.join(\"Images\",f)))\n else:\n self.meta['input'].append(('file','type=image/x-fits'))\n self.meta['input'].append(('name',os.path.join(\"Images\",f)))\n \n # output section\n if self.outputList:\n self.meta['output'].append(('output',))\n for f in self.outputList.keys():\n if string.find(f,\".xml\") == -1:\n self.meta['output'].append(('file','type=image/x-fits'))\n self.meta['output'].append(('name',os.path.join(\"Images\",f)))\n for pred in self.outputList[f]:\n self.meta['output'].append(('predecessor',os.path.join(\"Images\",pred)))\n else:\n self.meta['output'].append(('file','type=text/xml'))\n self.meta['output'].append(('name',os.path.join(\"Images\",f)))\n for pred in self.outputList[f]:\n self.meta['output'].append(('predecessor',os.path.join(\"Images\",pred)))\n \n # pass this dictionary to the class pMessage...\n msgFile = os.path.join(self.messagedir,self.modName+\"_module.xml\")\n mmsg = pMessage(self.meta)\n mmsg.writeMsg(msgFile)\n return", "def __init__(self, dictionary):\n self.d = {}\n for word in dictionary:\n abbr = self.getAbbr(word)\n if abbr in self.d:\n self.d[abbr] += word,\n else:\n self.d[abbr] = [word]", "def get_aa_mut_info(coding_pos, somatic_base, gene_seq):\n # if no mutations return empty result\n if not somatic_base:\n aa_info = {'Reference Codon': [],\n 'Somatic Codon': [],\n 'Codon Pos': [],\n 'Reference Nuc': [],\n 'Reference AA': [],\n 'Somatic AA': []}\n return aa_info\n\n # get codon information into three lists\n ref_codon, codon_pos, pos_in_codon, ref_nuc = zip(*[cutils.pos_to_codon(gene_seq, p)\n for p in coding_pos])\n ref_codon, codon_pos, pos_in_codon, ref_nuc = list(ref_codon), list(codon_pos), list(pos_in_codon), list(ref_nuc)\n\n # construct codons for mutations\n mut_codon = [(list(x) if x != 'Splice_Site' else []) for x in ref_codon]\n for i in range(len(mut_codon)):\n # splice site mutations are not in a codon, so skip such mutations to\n # prevent an error\n if pos_in_codon[i] is not None:\n pc = pos_in_codon[i]\n mut_codon[i][pc] = somatic_base[i]\n mut_codon = [(''.join(x) if x else 'Splice_Site') for x in mut_codon]\n\n # output resulting info\n aa_info = {'Reference Codon': ref_codon,\n 'Somatic Codon': mut_codon,\n 'Codon Pos': codon_pos,\n 'Reference Nuc': ref_nuc,\n 'Reference AA': [(utils.codon_table[r] if (r in utils.codon_table) else None)\n for r in ref_codon],\n 'Somatic AA': [(utils.codon_table[s] if (s in utils.codon_table) else None)\n for s in mut_codon]}\n\n return aa_info", "def main():\r\n\r\n # contents = ['ATGGCCATGGCCCCCAGAACTGAGATCAATAGTACCCGTATTAACGGGTGA', 'MA'] # sample input\r\n contents = []\r\n for line in sys.stdin:\r\n contents.append(line.strip())\r\n myPeptide = GenomeEncoding(contents[0], contents[1])\r\n myPeptide.getCodonSeqs()\r\n myPeptide.getRevCodonSeqs()\r\n myPeptide.printEncodePep()", "def merge_canton_dict():\n final = open('data/cantonese/final.dat', 'w')\n\n with open('data/dict.dat', 'r') as mandarin_dict:\n for line in mandarin_dict.readlines():\n char = line.split(' ')[0]\n if len(char) > 1:\n\n skip = False # In case it is already found\n\n # Do we need this one translated? Check if it already exists\n with open('data/cantonese/dict.dat', 'r') as cantonese_dict:\n for cantonese_line in cantonese_dict.readlines():\n split = cantonese_line.split('\\t')\n if char == split[0]:\n skip = True\n continue\n\n if skip:\n continue\n\n tmp = [] # Store the cantonese info for each character\n\n for c in char:\n if re.match('[\\u4E00-\\u9FCC]', c):\n with open('data/cantonese/dict.dat', 'r') as cantonese_dict:\n for cantonese_line in cantonese_dict.readlines():\n split = cantonese_line.split('\\t')\n if c == split[0]:\n # Found the character as an exact match, now we want to store the cantonese pinyin\n # But some single characters have multiple pronunciations\n regex_result = re.search('\\[(.+?)\\]', cantonese_line)\n pinyin = regex_result.group(1)\n tmp.append(pinyin.split()[0]) # As I don't speak Cantonese, assume the first one\n break # The first one found is of higher quality\n\n if tmp:\n translation = (re.search('\\](.+)', line).group(1)[2:-1])\n translation = translation.split('/CL:', 1)[0] # Don't bother with the CL (measurewords)\n\n # Add only if all characters were translatable\n if len(tmp) == len(char):\n final.write('{0}\\t[{1}]{2}\\n'.format(char, ' '.join(tmp), translation))\n\n final.close()", "def build_positional_table(profile):\n prop_dict = {'pos': [], 'ref_base': [], 'cov': [], 'mismatch_rate': [], 'a_mism': [], 'g_mism': [], 't_mism': [],\n 'c_mism': [], 'arrest_rate': []}\n\n ref = sys.argv[3]\n print(ref.replace('__tt__', '|'))\n for line in profile:\n line1 = line.strip().split()\n if line1[0] == ref.replace('__tt__', '|') and start <= int(line1[1]) <= end:\n prop_dict['pos'].append(int(line1[1]))\n prop_dict['ref_base'].append(line1[2])\n prop_dict['cov'].append(int(line1[3]))\n prop_dict['mismatch_rate'].append(float(line1[5]))\n prop_dict['a_mism'].append(int(line1[6]) + int(line1[11]))\n prop_dict['g_mism'].append(int(line1[7]) + int(line1[12]))\n prop_dict['t_mism'].append(int(line1[8]) + int(line1[13]))\n prop_dict['c_mism'].append(int(line1[9]) + int(line1[14]))\n prop_dict['arrest_rate'].append(float(line1[-1]))\n\n return prop_dict", "def get_info(program):\n\n return INFO[program].copy()", "def dict_initialise(metadata, analysistype):\n for sample in metadata:\n sample[analysistype].dnaseq = dict()\n sample[analysistype].protseq = dict()\n sample[analysistype].ntindex = dict()\n sample[analysistype].aaindex = dict()\n sample[analysistype].ntalign = dict()\n sample[analysistype].aaalign = dict()\n sample[analysistype].aaidentity = dict()\n return metadata", "def produce_protein_chain_dict (inPath, outPath):\n chainMap = pd.read_table(inPath, sep=\"\\t\")\n proteins = set(chainMap[\"Query\"])\n proteinChains = {}\n for protein in proteins:\n proteinChains[protein] = set(chainMap.loc[chainMap[\"Query\"]==protein, \"Subject\"])\n with open(outPath, 'wb') as fOut:\n pickle.dump(proteinChains, fOut)", "def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA')\n self.meta['dec'] = dict(ext=0, card='DEC')\n self.meta['target'] = dict(ext=0, card='object')\n self.meta['idname'] = dict(ext=0, card='obsmode')\n self.meta['decker'] = dict(ext=0, card='MASKNAME')\n self.meta['binning'] = dict(card=None, compound=True) # Uses CCDSUM\n self.meta['detector']=dict(ext=0,card='detector')\n self.meta['mjd'] = dict(ext=0, card='MJD-OBS')\n self.meta['exptime'] = dict(ext=0, card='EXPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n self.meta['dispname'] = dict(ext=0, card='GRISM')\n self.meta['datasec'] = dict(ext=1, card='DATASEC')\n self.meta['dichroic'] = dict(ext=0, card='FILTER1')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')", "def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n sample_category = sample_info[\"sample_category\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n logging.info(\"Building dictionaries for sample %s...\" % process_name)\n for charge_selection in self.charge_selections:\n central_or_shift_extensions = [\"\", \"hadd\", \"addBackgrounds\"]\n central_or_shifts_extended = central_or_shift_extensions + self.central_or_shifts\n for central_or_shift_or_dummy in central_or_shifts_extended:\n process_name_extended = [ process_name, \"hadd\" ]\n for process_name_or_dummy in process_name_extended:\n if central_or_shift_or_dummy in [ \"hadd\" ] and process_name_or_dummy in [ \"hadd\" ]:\n continue\n if central_or_shift_or_dummy != \"central\" and central_or_shift_or_dummy not in central_or_shift_extensions:\n if not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift_or_dummy, sample_info):\n continue\n\n key_dir = getKey(process_name_or_dummy, charge_selection, central_or_shift_or_dummy)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy, central_or_shift_or_dummy)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name_or_dummy)\n for subdirectory in [ \"comp_jetToTauFakeRate\", \"makePlots\" ]:\n key_dir = getKey(subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel, subdirectory)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel, subdirectory)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.get_dir_type(dir_type), dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n\n numDirectories = 0\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n numDirectories += len(self.dirs[key])\n else:\n numDirectories += 1\n logging.info(\"Creating directory structure (numDirectories = %i)\" % numDirectories)\n numDirectories_created = 0;\n frac = 1\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n numDirectories_created += len(self.dirs[key])\n else:\n create_if_not_exists(self.dirs[key])\n numDirectories_created = numDirectories_created + 1\n while 100*numDirectories_created >= frac*numDirectories:\n logging.info(\" %i%% completed\" % frac)\n frac = frac + 1\n logging.info(\"Done.\")\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_info, self.max_files_per_job)\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n inputFileList = inputFileLists[sample_name]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name))\n\n is_mc = (sample_info[\"type\"] == \"mc\")\n sample_category = sample_info[\"sample_category\"]\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n \n if central_or_shift != \"central\" and not is_mc:\n continue\n if not self.accept_central_or_shift(central_or_shift, sample_info):\n continue\n\n # build config files for executing analysis code\n key_analyze_dir = getKey(process_name, charge_selection, central_or_shift)\n\n for jobId in inputFileList.keys():\n\n analyze_job_tuple = (process_name, charge_selection, central_or_shift, jobId)\n key_analyze_job = getKey(*analyze_job_tuple)\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n logging.warning(\"No input ntuples for %s --> skipping job !!\" % (key_analyze_job))\n continue\n\n cfgFile_modified_path = os.path.join(self.dirs[key_analyze_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%i_cfg.py\" % analyze_job_tuple)\n logFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%i.log\" % analyze_job_tuple)\n histogramFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_HIST], \"analyze_%s_%s_%s_%i.root\" % analyze_job_tuple)\n rleOutputFile_path = os.path.join(self.dirs[key_analyze_dir][DKEY_RLES], \"rle_%s_%s_%s_%i.txt\" % analyze_job_tuple) \\\n if self.select_rle_output else \"\"\n\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : cfgFile_modified_path,\n 'histogramFile' : histogramFile_path,\n 'logFile' : logFile_path,\n 'chargeSelection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_tight' : self.hadTau_selection_tight,\n 'hadTauSelection_denominator' : self.hadTau_selection_denominator,\n 'hadTauSelections_numerator' : self.hadTau_selections_numerator,\n 'trigMatchingOptions' : self.trigMatchingOptions,\n 'selEventsFileName_output' : rleOutputFile_path,\n 'absEtaBins' : self.absEtaBins,\n 'decayModes' : self.decayModes,\n 'central_or_shift' : central_or_shift,\n 'central_or_shifts_local' : [],\n 'apply_hlt_filter' : self.hlt_filter,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job], sample_info)\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1_dir = getKey(process_name, charge_selection)\n hadd_stage1_job_tuple = (process_name, charge_selection)\n key_hadd_stage1_job = getKey(*hadd_stage1_job_tuple)\n if not key_hadd_stage1_job in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1_job] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1_job].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1_job] = os.path.join(self.dirs[key_hadd_stage1_dir][DKEY_HIST],\n \"hadd_stage1_%s_%s.root\" % hadd_stage1_job_tuple)\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1_job = getKey(process_name, charge_selection)\n key_hadd_stage2_dir = getKey(\"hadd\", charge_selection)\n key_hadd_stage2_job = getKey(charge_selection)\n if not key_hadd_stage2_job in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2_job] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2_job].append(self.outputFile_hadd_stage1[key_hadd_stage1_job])\n self.outputFile_hadd_stage2[key_hadd_stage2_job] = os.path.join(self.dirs[key_hadd_stage2_dir][DKEY_HIST],\n \"hadd_stage2_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n charge_key = \"comp_%s\" % charge_selection\n self.comp_input_files[charge_key] = []\n for trigMatchingOption in self.trigMatchingOptions:\n key_hadd_stage2_job = getKey(charge_selection)\n key_comp_jetToTauFakeRate_dir = getKey(\"comp_jetToTauFakeRate\")\n key_comp_jetToTauFakeRate_job = getKey(charge_selection, trigMatchingOption)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_%s_cfg.py\" % (charge_selection, trigMatchingOption)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s_%s.root\" % (charge_selection, trigMatchingOption)),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s_%s.log\" % (charge_selection, trigMatchingOption)),\n 'looseRegion' : \"jetToTauFakeRate_%s_%s/denominator/\" % (charge_selection, trigMatchingOption),\n 'tightRegion' : \"jetToTauFakeRate_%s_%s/numerator/\" % (charge_selection, trigMatchingOption),\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins,\n 'decayModes' : self.decayModes,\n 'hadTauSelections' : self.hadTau_selections_numerator,\n 'trigMatchingOption' : trigMatchingOption,\n 'plots_outputFileName' : os.path.join(self.dirs[key_comp_jetToTauFakeRate_dir][DKEY_PLOT], \"comp_jetToTauFakeRate_%s.png\" % trigMatchingOption)\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n comp_output = self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile']\n self.targets.append(comp_output)\n self.comp_input_files[charge_key].append(comp_output)\n self.comp_output_files[charge_key] = os.path.join(self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection)\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_dir = getKey(\"makePlots\")\n key_makePlots_job = getKey(charge_selection) \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for trigMatchingOption in self.trigMatchingOptions:\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"denominator\") \n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_denominator_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, trigMatchingOption, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/denominator/%s\" % (charge_selection, trigMatchingOption, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_hadd_stage2_job = getKey(charge_selection)\n key_makePlots_job = getKey(charge_selection, trigMatchingOption, absEtaBin, \"numerator\", hadTau_selection_numerator)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2_job],\n 'cfgFile_modified' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_CFGS], \"makePlots_%s_%s_%s_numerator_%s_%s_cfg.py\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[key_makePlots_dir][DKEY_PLOT], \"makePlots_%s_%s_%s_numerator_%s_%s.png\" % \\\n (self.channel, charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s_%s/numerator/%s/%s\" % (charge_selection, trigMatchingOption, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : self.make_plots_backgrounds\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile, make_dependency = \"phony_hadd_stage1\", max_mem = '4096M')\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_comp_hadd(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n\n logging.info(\"Done.\")\n\n return self.num_jobs", "def make_protein_group(self):\r\n prot_names = [\r\n 'Ala', 'Arg', 'Asn', 'Asp', 'Cys', 'Gln', 'Glu',\r\n 'Gly', 'His', 'Ile', 'Leu', 'Lys', 'Met', 'Phe',\r\n 'Pro', 'Ser', 'Thr', 'Trp', 'Tyr', 'Val'\r\n ]\r\n self.__make_group_by_res('Protein', prot_names)", "def pdb2pka_sugelm(self):\n import Protool\n P=Protool.structureIO()\n P.readpdb(self.pdbfile)\n P.RemoveALT()\n #import Protool.mutate\n #MUT=Protool.mutate.Mutate(P)\n #\n # Construct arrays\n #\n import pKD_dict\n self.data=pKD_dict.pKD_dict()\n self.atom_data=pKD_dict.pKD_dict()\n #\n # Create dir for mutant PDB files\n #\n import os\n mutdir=os.path.join(self.topdir,self.pdbfile+'.pdbs')\n if not os.path.isdir(mutdir):\n os.mkdir(mutdir)\n #\n # Loop over all residues\n #\n residues=P.residues.keys()\n residues.sort()\n for residue in residues:\n orgres=P.resname(residue)\n print 'Calculating for %s %s' %(residue,P.resname(residue))\n #\n # If neutral mutate to Asp, Glu, Lys, Arg, His\n #\n targets=[]\n for res in ['ARG','LYS','HIS','ASP','GLU']:\n if P.resname(residue)!=res:\n targets.append(res)\n #if orgres=='GLU':\n # targets.append('GLN')\n #elif orgres=='ASP':\n # targets.append('ASN')\n #elif orgres=='HIS':\n # targets.append('PHE')\n #elif orgres=='ARG' or P.resname(residue)=='LYS':\n # targets.append('MET')\n #\n # Target identified. Now model each\n #\n for target in targets:\n import pKD_tools\n resid=pKD_tools.get_resid_from_res(residue)\n orgres=P.resname(residue)\n filename=os.path.join(mutdir,'%s:%s:%s.pdb' %(residue,orgres,target))\n mutation='%s:%s:%s' %(residue,orgres,target)\n if not os.path.isfile(filename):\n import Design_pKa_help\n Design_pKa_help.make_mutation(self.pdbfile,mutation)\n NP=Protool.structureIO()\n NP.readpdb(filename)\n NP.writepdb(filename,TER=None)\n #\n # Calculate the interaction energies\n #\n protein,routines,forcefield,apbs_setup,lig_titgrps = pdb2pka.pre_init(pdbfilename=filename,\n ff='parse',\n ligand=None,\n verbose=1)\n mypkaRoutines = pdb2pka.pKaRoutines(protein, routines, forcefield,apbs_setup)\n #\n # Find our group\n #\n sp=residue.split(':')\n chainid=sp[0]\n resnum=int(sp[1])\n mypkaRoutines.findTitratableGroups()\n this_pKa=None\n for pKa in mypkaRoutines.pKas:\n print pKa.residue.resSeq,resnum\n print pKa.residue.chainID,chainid\n print pKa.residue.name,target\n print pKa.pKaGroup.name,target\n print '--------------'\n print 'ChainID',pKa.residue.chainID\n if pKa.residue.resSeq==resnum and pKa.residue.chainID==chainid and pKa.residue.name==target and pKa.pKaGroup.name==target:\n #print 'Found group',pKa.residue.resSeq,pKa.pKaGroup.name\n this_pKa=pKa\n break\n if not this_pKa:\n raise Exception,'Could not find inserted titratable group'\n mypkaRoutines.get_interaction_energies_setup(this_pKa,mode='pKD')\n matrix=mypkaRoutines.matrix\n #\n # Dig the interaction energies out of the pdb2pka array\n #\n for titration1 in matrix[this_pKa].keys():\n for state1 in matrix[this_pKa][titration1].keys():\n grp_sub=matrix[this_pKa][titration1][state1]\n if mypkaRoutines.is_charged(this_pKa,titration1,state1):\n for pKa2 in grp_sub.keys(): \n import string\n chainID2=pKa.residue.chainID\n resid2='%s:%s' %(chainID2,string.zfill(pKa2.residue.resSeq,4))\n for titration2 in grp_sub[pKa2].keys():\n for state2 in grp_sub[pKa2][titration2].keys():\n if mypkaRoutines.is_charged(pKa2,titration2,state2):\n #\n # Both states are charged, so now we can pull the\n # interaction energies out\n #\n if not self.data.has_key(mutation):\n self.data[mutation]={}\n self.data[mutation][resid2]=grp_sub[pKa2][titration2][state2]\n #\n # Get the potentials at all atoms too\n #\n all_pots=mypkaRoutines.all_potentials[this_pKa][titration1][state1]\n sub_all_pots=all_pots[pKa2][titration2][state2]\n for atom in sub_all_pots.keys():\n resid=mutation\n import pKD_tools\n resid2=pKD_tools.get_resid_from_res(atom)\n atomname=atom.split(':')[-1] #atom.name\n if atomname[0]=='H' or atomname in ['N','C','O']:\n continue # Skip all H atoms and all non-CA backbone atoms to save memory\n if not self.atom_data.has_key(resid):\n self.atom_data[resid]={}\n if not self.atom_data[resid].has_key(resid2):\n self.atom_data[resid][resid2]={}\n self.atom_data[resid][resid2][atomname]=abs(sub_all_pots[atom])\n return self.data,self.atom_data", "def _compute_plain_from_map(self):\n # get PDBID\n pdbid = get_id(self.pdb_path)\n\n # write hydrogen bonds to the plain file\n with open(conf.temp_dir + os.path.sep + pdbid + '.hb', 'w') as f:\n\n for pos in self.potential_map:\n a = str(pos[0])\n b = str(pos[1])\n c = str(self.potential_map[pos][0])\n d = str(self.potential_map[pos][1])\n e = [a, b, c, d]\n\n f.write(' '.join(e) + os.linesep)", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def produce_proteinSeq_dict (inPath, outPath):\n s = list(SeqIO.parse(str(inPath), 'fasta'))\n proteinSeq = {}\n for _, elm in enumerate(s):\n proteinSeq[elm.id] = str(elm.seq)\n with open(outPath, 'wb') as fOut:\n pickle.dump(proteinSeq, fOut)", "def _meta_dict(self, node):\n meta = {n: self._text(node, n) for n in ('source', 'date', 'key')}\n meta.update(self.infon_dict(node))\n return meta", "def make_phys():\n for rn in dcm_dict.keys():\n # PPG\n if not dcm_dict[rn]['ppg_file'] == 'File missing':\n # Files\n ppg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ppg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['ppg_file'],ppg_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 100.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(ppg_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # Respiration\n if not dcm_dict[rn]['resp_file'] == 'File missing':\n # Files\n resp_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.tsv.gz')\n resp_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 25.0\n data['StartTime'] = -30.0\n data['Columns'] = 'respiratory'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # ECG\n # What to do if they have PPG and ECG?\n if not dcm_dict[rn]['ecg_file'] == 'File missing':\n # Files\n ecg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ecg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 1000.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)", "def info(self) -> str:\n info = \"\"\n\n # Ensure we have a latitude and a longitude\n if self.latitude is None:\n raise GenerateError(\"Missing latitude\")\n elif self.longitude is None:\n raise GenerateError(\"Missing longitude\")\n\n # Ensure we have a symbol table and symbol ID\n if self.symbol_table is None:\n raise GenerateError(\"Missing symbol table\")\n elif self.symbol_id is None:\n raise GenerateError(\"Missing symbol ID\")\n\n # Set data type ID\n if self.timestamp is None:\n if self.messaging is False:\n self.data_type_id = \"!\"\n else:\n self.data_type_id = \"=\"\n else:\n if self.messaging is False:\n self.data_type_id = \"/\"\n else:\n self.data_type_id = \"@\"\n\n # Set the timestamp\n info += APRSUtils.encode_timestamp(self.timestamp, self.timestamp_type)\n\n if self.compressed:\n # Add the position in a compressed format\n info += self._generate_compressed_position(\n self.latitude, self.longitude, self.symbol_table, self.symbol_id, self.altitude,\n self.course, self.speed, self.radio_range, self.compression_fix,\n self.compression_source, self.compression_origin)\n\n # PHG, etc is not supported for compressed formats (see APRS 1.01 C9 P36)\n if self.comment:\n info += self.comment\n\n else:\n # Add the position in an uncompressed format\n # TODO: handle BRG/NRQ\n info += self._generate_uncompressed_position(\n self.latitude, self.longitude, self.symbol_table, self.symbol_id, self.ambiguity\n )\n\n # Handle PHG\n if self.power is not None and self.height is not None and self.gain is not None \\\n and self.directivity is not None:\n phg = APRSUtils.encode_phg(self.power, self.height, self.gain, self.directivity)\n info += \"PHG{}\".format(\n self._generate_data(phg=phg, altitude=self.altitude, comment=self.comment)\n )\n\n # Handle DFS\n elif self.strength is not None and self.height is not None and self.gain is not None \\\n and self.directivity is not None:\n dfs = APRSUtils.encode_dfs(self.strength, self.height, self.gain, self.directivity)\n info += \"DFS{}\".format(\n self._generate_data(dfs=dfs, altitude=self.altitude, comment=self.comment)\n )\n\n # Handle course/speed\n elif self.course is not None and self.speed is not None:\n info += \"{}/{}\".format(\n str(self.course).zfill(3),\n str(self.speed).zfill(3)\n )\n info += self._generate_data(altitude=self.altitude, comment=self.comment)\n\n # Handle RNG\n elif self.radio_range is not None:\n info += \"RNG{}\".format(\n str(self.radio_range).zfill(4)\n )\n info += self._generate_data(altitude=self.altitude, comment=self.comment)\n\n else:\n info += self._generate_data(altitude=self.altitude, comment=self.comment)\n\n return info", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def _process_cdss(self, prot_fasta_path):\n if self.is_metagenome:\n prot_fasta = {} # type: dict\n untranslatable_prot = set()\n for cds_id in self.cdss:\n cds = self.feature_dict[cds_id]\n try:\n prot_seq = str(Seq(cds['dna_sequence']).translate(\n self.code_table, cds=True).strip(\"*\"))\n except TranslationError as e:\n cds['warnings'] = cds.get('warnings', []) + [str(e)]\n # NOTE: we may need a different way of handling this for metagenomes.\n prot_seq = \"\"\n if self.is_metagenome:\n untranslatable_prot.add(cds_id)\n\n if self.is_metagenome:\n if prot_seq != \"\":\n protein_id = \"\"\n if cds.get(\"aliases\"):\n aliases = cds['aliases']\n for key, val in aliases:\n if key == \"protein_id\":\n protein_id = val\n if not protein_id:\n protein_id = cds['id'] # assign to some default\n else:\n # log a warning here?\n pass\n # TODO: update header to reflect what we actually want people\n # to see.\n if protein_id in prot_fasta:\n prot_fasta[protein_id][0] += \"|\" + cds['id']\n else:\n fasta_seq_data = \">\" + protein_id + \" cds_ids:\" + cds['id']\n prot_fasta[protein_id] = [fasta_seq_data, prot_seq]\n else:\n pass\n\n else:\n cds.update({\n \"protein_translation\": prot_seq,\n \"protein_md5\": hashlib.md5(prot_seq.encode('utf8')).hexdigest(),\n \"protein_translation_length\": len(prot_seq),\n })\n\n if 'parent_gene' in cds:\n parent_gene = self.feature_dict[cds['parent_gene']]\n # no propigation for now\n propagate_cds_props_to_gene(cds, parent_gene, self.is_metagenome)\n elif self.generate_genes:\n spoof = copy.copy(cds)\n spoof['type'] = 'gene'\n spoof['id'] = cds['id']+\"_gene\"\n spoof['cdss'] = [cds['id']]\n spoof['warnings'] = [warnings['spoofed_gene'].format(cds['id'])]\n self.feature_dict[spoof['id']] = spoof\n cds['parent_gene'] = spoof['id']\n self.spoof_gene_count += 1\n else:\n raise ValueError(warnings['no_spoof'])\n\n self.feature_dict[cds['id']] = cds\n\n if self.is_metagenome:\n with open(prot_fasta_path, 'w') as fid:\n for key, line in prot_fasta.items():\n fid.write('\\n'.join(line))\n # do something with 'untranslatable_prot'", "def inizializzazione(fileInput, geneNames):\n\t\n\tdictTranscript \t= {}\n\tdictGenes \t\t= {}\n\tdictEsoni \t\t= {}\n\tdictIntroni \t= {}\n\tdictGeneChr \t= {}\n\n\t# - Filtraggio file di annotazione in input per 'exon' e per nome gene\n\t# - Calcolo delle coordinate dei geni nei cromosomi\n\t#\n\tlines, dictGeneChr = filtraFileDiAnn(fileInput, geneNames)\n\t\n\t\n\t# Indici all'interno del dizionario degli esoni\n\t#\n\tidx_starts \t= 0\n\tidx_ends \t= 1\n\tidx_strand \t= 2\n\t\n\t# Indici all'interno del dizionario dei Geni\n\t#\n\tidx_transcripts = 2\n\n\n\t# Creazione dei dizionari utili alla risoluzione del problema B\n\t#\n\tfor riga in lines:\n\t\tcromosoma \t\t= riga[0]\n\t\tstart_esone \t= riga[3]\n\t\tend_esone \t\t= riga[4]\n\t\tstrand \t\t\t= riga[6]\n\t\tgeneName \t\t= riga[11]\n\t\ttranscriptName \t= riga[12]\n\t\t\n\t\tTranscriptID \t= riga[9]\n\t\tGeneID \t\t\t= riga[8]\n\t\n\t\t# Creazione del dizionario dei transcritti\n\t\t#\n\t\tdictTranscript[TranscriptID] = [transcriptName, GeneID]\n\t\t\n\t\t# Creazione del dizionario dei geni\n\t\t#\n\t\tif not dictGenes.has_key(GeneID):\t\t\t\t\t\t\t\t\t\t# Se il GeneID non e' presente..\n\t\t\tdictGenes[GeneID] = [geneName, cromosoma, [TranscriptID]]\t\t\t# ..nel dizionario (come key)\n\t\telif TranscriptID not in dictGenes[GeneID][idx_transcripts]:\t\t\t# Se il GeneID e' presente ma non lo e'..\n\t\t\tdictGenes[GeneID][idx_transcripts].append(TranscriptID)\t\t\t\t# ..il TranscriptID questo si aggiunge alla lista\n\t\t\n\t\t# Creazione del dizionario degli esoni\n\t\t#\n\t\tif not dictEsoni.has_key(TranscriptID):\t\t\t\t\t\t \t# Se il TranscriptID non e' presente.. \n\t\t\tdictEsoni[TranscriptID] = [[start_esone],[end_esone],strand] \t# ..nel dizionario (come key)\n\t\telse:\n\t\t\tdictEsoni[TranscriptID][idx_starts].append(start_esone)\t\t\t \t# Il TranscriptID e' gia' presente quindi..\n\t\t\tdictEsoni[TranscriptID][idx_ends].append(end_esone)\t\t\t \t# ..si aggiunge l'esone alla lista degli esoni\n\t\t\t\n\t\t\t\n\t# Creazione del dizionario degli introni\n\t#\n\tfor TranscriptID in dictEsoni:\n\t\tesoniPerTranscript = len(dictEsoni[TranscriptID][idx_starts])\t \t# Si valuta il nr di esoni per TranscriptID corrente\n\t\t\n\t\tif int(esoniPerTranscript) > 1:\n\t\t\tstart_introni \t= []\t\t\t\t\t\t\t\t\t\t\t # Si preparano le variabili necessarie\n\t\t\tend_introni \t= []\n\t\t\t\n\t\t\tstart_esoni \t= []\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tend_esoni \t\t= []\n\t\t\t\n\t\t\t# Si considera lo strand relativo al TranscriptID\n\t\t\t#\n\t\t\tif dictEsoni[TranscriptID][idx_strand] == '+':\t\t\t\t\t \t# Strand positivo -> esoni scritti in ordine crescente\n\t\t\t\tstrand = True\n\t\t\t\tstart_esoni = dictEsoni[TranscriptID][idx_starts]\n\t\t\t\tend_esoni \t= dictEsoni[TranscriptID][idx_ends]\n\t\t\t\t\n\t\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t# Strand negativo -> esoni scritti in ordine inverso..\n\t\t\t\tstrand = False\t\t\t\t\t\t\t\t\t\t\t\t \t# ..e per comodita' sono invertiti in ordine crescente\n\t\t\t\tstart_esoni = dictEsoni[TranscriptID][idx_starts][::-1] \t \n\t\t\t\tend_esoni \t= dictEsoni[TranscriptID][idx_ends][::-1]\n\n\t\t\t# Calcolo delle regioni introniche\n\t\t\t#\n\t\t\ti = 0\n\t\t\twhile i < int(esoniPerTranscript) - 1:\t\t\t\t\t\t\t \t# Per ogni coppia di esoni\n\t\t\t\tif (int(start_esoni[i+1]) - int(end_esoni[i])) > 2:\t\t\t \t# Se la regione tra due esoni consecutivi e' > 2..\n\t\t\t\t\tstart_introni.append(int(end_esoni[i]) + 1)\t\t\t \t# ..(considerando che gli estremi dell'introne sono..\n\t\t\t\t\tend_introni.append(int(start_esoni[i+1]) - 1)\t\t \t \t#..interni a quelli dei due esoni consecutivi correnti)\n\t\t\t\ti += 1\n\t\t\t\n\t\t\tif not strand:\t\t\t\t\t\t\t\t\t\t\t\t \t# Si mantiene traccia del fatto che derivano da un..\n\t\t\t\tstart_introni.reverse()\t\t\t\t\t\t\t\t\t \t# ..TranscriptID con strand negativo..\n\t\t\t\tend_introni.reverse()\t\t\t\t\t\t\t\t\t\t\t# ..(si inverte l'ordine degli introni)\n\t\t\n\t\t\tdictIntroni[TranscriptID] = [start_introni, end_introni]\n\n\n\t# Si eliminano i geni che non presentano regioni introniche:\n\t# \t- dalla lista di tutti i geni si rimuovono quelli che hanno introni;\n\t#\t- dal dizionario si rimuovono quelli rimasti nella lista.\n\t#\n\ttuttiIGeni = geneNames.keys()\n\tfor TranscriptID in dictIntroni:\n\t\tgeneID = dictTranscript[TranscriptID][1]\n\t\tnomeGene = dictGenes[geneID][0]\n\t\t\n\t\tif nomeGene in tuttiIGeni:\n\t\t\ttuttiIGeni.remove(nomeGene)\n\n\n\tfor nomeGene in tuttiIGeni:\n\t\tdel geneNames[nomeGene]\n\t\tprint 'Il gene %s non presenta regioni introniche.' % nomeGene\n\n\n\treturn [dictTranscript, dictGenes, dictEsoni, dictIntroni, dictGeneChr]", "def get_sample_info(lines):\r\n mapping_data, header, comments = parse_mapping_file(lines)\r\n labels = [\"from\", \"to\", \"eweight\", \"consensus_lin\"]\r\n node_labels = [\"node_name\", \"node_disp_name\", \"ntype\", \"degree\",\r\n \"weighted_degree\", \"consensus_lin\"]\r\n cat_by_sample = {}\r\n sample_by_cat = defaultdict(list)\r\n meta_dict = {}\r\n category_labels = header[1:-1]\r\n labels.extend(category_labels)\r\n node_labels.extend(category_labels)\r\n label_list = [[] for c in category_labels]\r\n for r in mapping_data:\r\n categories = r[0:len(category_labels) + 1]\r\n sample = categories[0]\r\n meta_dict[sample] = ['\\t'.join(categories[1:]), 0]\r\n\r\n cat_by_sample[sample] = [(l.strip(), c.strip())\r\n for l, c in zip(category_labels, categories[1:])]\r\n\r\n cat_list = []\r\n for i, (l, c) in enumerate(zip(category_labels, categories[1:])):\r\n if c not in label_list[i]:\r\n label_list[i].append(c)\r\n l = l.strip()\r\n c = c.strip()\r\n cat_list.append((l, c))\r\n sample_by_cat[(l, c)].append(sample)\r\n\r\n cat_by_sample[sample] = cat_list\r\n\r\n return cat_by_sample, sample_by_cat, len(category_labels), meta_dict,\\\r\n labels, node_labels, label_list", "def data_from_result():\n return dict(\n gene=\"BRAF\",\n strand=\"-\",\n tx_pos_range=(1802, 1921),\n alt_pos_range=(140453074, 140453193),\n alt_aln_method=\"splign\",\n tx_exon_id=780494,\n alt_exon_id=1927263\n )", "def build_dictionary_gensim():\r\n\t# if load_dictionary_gensim():\r\n\t#\treturn\r\n\t\r\n\tglobal gensim_dictionary, common_corpus_list\r\n\t\r\n\tprint('\\nbuilding dictionary')\r\n\tgensim_dictionary = gensim.corpora.Dictionary()\r\n\t\r\n\tfor v in common_corpus_list:\r\n\t\tgensim_dictionary.add_documents([v[1].lower().split()])\r\n\t\t\r\n\tgensim_dictionary.save_as_text(paths.path_data_dictionary_txt)\r\n\tgensim_dictionary.save(paths.path_data_dictionary_dict)\r\n\r\n\t# print(gensim_dictionary.token2id)\r\n\tprint(gensim_dictionary)", "def exon_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n exonpos = defline[1:].split(' ')[1]\n seqs[exonpos] = seq\n\n rnaid_to_accession = dict()\n reported_exons = {}\n exons, cdss = [], {}\n start, stop = None, None\n moltypes = ['mRNA', 'tRNA', 'ncRNA', 'transcript', 'primary_transcript',\n 'V_gene_segment', 'D_gene_segment', 'J_gene_segment',\n 'C_gene_segment']\n for entry in gff3:\n for moltype in moltypes:\n if ('\\t%s\\t' % moltype) in entry:\n accession = re.search(r'accession=([^;\\n]+)', entry).group(1)\n tid = re.search(r'ID=([^;\\n]+)', entry).group(1)\n rnaid_to_accession[tid] = accession\n\n if '\\texon\\t' in entry:\n exons.append(entry)\n elif '\\tCDS\\t' in entry:\n fields = entry.split('\\t')\n pos = '%s_%s-%s%s' % (fields[0], fields[3], fields[4], fields[6])\n cdss[pos] = entry\n elif '\\tstart_codon\\t' in entry:\n start = entry\n elif '\\tstop_codon\\t' in entry:\n stop = entry\n elif entry.startswith('###'):\n if len(exons) == 0:\n continue\n xcept = False\n for exonpos in cdss:\n if ';exception=ribosomal slippage' in cdss[exonpos]:\n xcept = True\n if xcept:\n exons, cdss = [], {}\n start, stop = None, None\n continue\n assert start, 'No start codon for exon(s): %s' % exons[0]\n assert stop, 'No stop codon for exon(s): %s' % exons[0]\n for exon in exons:\n fields = exon.split('\\t')\n assert len(\n fields) == 9, 'entry does not have 9 fields: %s' % exon\n mrnaid = re.search(r'Parent=([^;\\n]+)', fields[8]).group(1)\n exonpos = '%s_%s-%s%s' % (fields[0],\n fields[3], fields[4], fields[6])\n if exonpos in reported_exons:\n continue\n exonlength = int(fields[4]) - int(fields[3]) + 1\n exonseq = seqs[exonpos]\n assert len(exonseq) == exonlength, \\\n 'exon \"%s\": length mismatch; gff=%d, fa=%d' % (\n exonpos, exonlength, len(exonseq))\n gccontent = gc_content(exonseq)\n gcskew = gc_skew(exonseq)\n ncontent = n_content(exonseq)\n context = exon_context(exon, start, stop)\n phase = None\n remainder = None\n if context == 'cds':\n cexon = cdss[exonpos]\n phase = int(cexon.split('\\t')[7])\n remainder = (exonlength - phase) % 3\n values = '%s %s %d %.3f %.3f %.3f %s %r %r' % (\n exonpos, rnaid_to_accession[mrnaid], exonlength, gccontent,\n gcskew, ncontent, context, phase, remainder)\n reported_exons[exonpos] = 1\n yield values.split(' ')\n exons, cdss = [], {}\n start, stop = None, None", "def __repr__(self):\r\n return {'name':self.name, 'weight':self.organ_weight_grams, 'vital organ': self.vital_organ, 'organ system': self.organ_system}", "def __repr__(self):\r\n return {'name':self.name, 'weight':self.organ_weight_grams, 'vital organ': self.vital_organ, 'organ system': self.organ_system, 'heart thickness': self.heart_thickness_cm, 'heart breadth': self.heart_breadth_cm, \"heart length\": self.heart_length_cm}", "def _make_information_storable( self, data ):\n\t\tpass", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CHEL').get('abstractTypes')\n exolinks = globalMap.get('CHEL').get('exolinks')\n\n # DataType HalfLifeType\n currentMap = {}\n abstractTypes['HalfLifeType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002'] = currentMap\n loadMaps['CHEL.HalfLifeType'] = currentMap\n currentMap['tag'] = 'CHEL.HalfLifeType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class ChemElement\n currentMap = {}\n abstractTypes['ChemElement'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'] = currentMap\n loadMaps['CHEL.ChemElement'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemElements'\n currentMap['objkey'] = 'symbol'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElement\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemElement.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemElement.atomNumber\n currentMap = {}\n contentMap['atomNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00017'] = currentMap\n loadMaps['CHEL.ChemElement.atomNumber'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.atomNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00017'\n currentMap['name'] = 'atomNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute ChemElement.atomicRadius\n currentMap = {}\n contentMap['atomicRadius'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00018'] = currentMap\n loadMaps['CHEL.ChemElement.atomicRadius'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.atomicRadius'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00018'\n currentMap['name'] = 'atomicRadius'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.covalentRadius\n currentMap = {}\n contentMap['covalentRadius'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00019'] = currentMap\n loadMaps['CHEL.ChemElement.covalentRadius'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.covalentRadius'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00019'\n currentMap['name'] = 'covalentRadius'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.mass\n currentMap = {}\n contentMap['mass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00020'] = currentMap\n loadMaps['CHEL.ChemElement.mass'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.mass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00020'\n currentMap['name'] = 'mass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00021'] = currentMap\n loadMaps['CHEL.ChemElement.name'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00021'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055')\n\n # Attribute ChemElement.symbol\n currentMap = {}\n contentMap['symbol'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00015'] = currentMap\n loadMaps['CHEL.ChemElement.symbol'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.symbol'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00015'\n currentMap['name'] = 'symbol'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055')\n\n # Role ChemElement.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemElement.isotopes\n currentMap = {}\n contentMap['isotopes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00004'] = currentMap\n loadMaps['CHEL.ChemElement.isotopes'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.isotopes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00004'\n currentMap['name'] = 'isotopes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CHEL').get('abstractTypes')\n # End of ChemElement\n\n currentMap = abstractTypes.get('ChemElement')\n aList = ['atomNumber', 'atomicRadius', 'covalentRadius', 'mass', 'name', 'symbol']\n currentMap['headerAttrs'] = aList\n aList = ['isotopes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopes']\n currentMap['children'] = aList\n\n # Class ChemElementStore\n currentMap = {}\n abstractTypes['ChemElementStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'] = currentMap\n loadMaps['CHEL.ChemElementStore'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemElementStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElementStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemElementStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemElementStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute ChemElementStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00024'] = currentMap\n loadMaps['CHEL.ChemElementStore.name'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00024'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ChemElementStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemElementStore.chemElements\n currentMap = {}\n contentMap['chemElements'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00023'] = currentMap\n loadMaps['CHEL.ChemElementStore.chemElements'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore.chemElements'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00023'\n currentMap['name'] = 'chemElements'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CHEL').get('abstractTypes')\n # End of ChemElementStore\n\n currentMap = abstractTypes.get('ChemElementStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['chemElements', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemElements']\n currentMap['children'] = aList\n\n # Class Isotope\n currentMap = {}\n abstractTypes['Isotope'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'] = currentMap\n loadMaps['CHEL.Isotope'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopes'\n currentMap['objkey'] = 'massNumber'\n currentMap['class'] = ccp.api.molecule.ChemElement.Isotope\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotope.abundance\n currentMap = {}\n contentMap['abundance'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00011'] = currentMap\n loadMaps['CHEL.Isotope.abundance'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.abundance'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00011'\n currentMap['name'] = 'abundance'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00058')\n\n # Attribute Isotope.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotope.gyroMagneticRatio\n currentMap = {}\n contentMap['gyroMagneticRatio'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00008'] = currentMap\n loadMaps['CHEL.Isotope.gyroMagneticRatio'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.gyroMagneticRatio'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00008'\n currentMap['name'] = 'gyroMagneticRatio'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.halfLife\n currentMap = {}\n contentMap['halfLife'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00001'] = currentMap\n loadMaps['CHEL.Isotope.halfLife'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLife'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00001'\n currentMap['name'] = 'halfLife'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute Isotope.halfLifeError\n currentMap = {}\n contentMap['halfLifeError'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00002'] = currentMap\n loadMaps['CHEL.Isotope.halfLifeError'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLifeError'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00002'\n currentMap['name'] = 'halfLifeError'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute Isotope.halfLifeType\n currentMap = {}\n contentMap['halfLifeType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00003'] = currentMap\n loadMaps['CHEL.Isotope.halfLifeType'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLifeType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00003'\n currentMap['name'] = 'halfLifeType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'unknown'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002')\n\n # Attribute Isotope.magneticMoment\n currentMap = {}\n contentMap['magneticMoment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00013'] = currentMap\n loadMaps['CHEL.Isotope.magneticMoment'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.magneticMoment'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00013'\n currentMap['name'] = 'magneticMoment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.mass\n currentMap = {}\n contentMap['mass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00010'] = currentMap\n loadMaps['CHEL.Isotope.mass'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.mass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00010'\n currentMap['name'] = 'mass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.massNumber\n currentMap = {}\n contentMap['massNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00007'] = currentMap\n loadMaps['CHEL.Isotope.massNumber'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.massNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00007'\n currentMap['name'] = 'massNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotope.quadrupoleMoment\n currentMap = {}\n contentMap['quadrupoleMoment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00014'] = currentMap\n loadMaps['CHEL.Isotope.quadrupoleMoment'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.quadrupoleMoment'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00014'\n currentMap['name'] = 'quadrupoleMoment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.receptivity\n currentMap = {}\n contentMap['receptivity'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00012'] = currentMap\n loadMaps['CHEL.Isotope.receptivity'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.receptivity'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00012'\n currentMap['name'] = 'receptivity'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.spin\n currentMap = {}\n contentMap['spin'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00009'] = currentMap\n loadMaps['CHEL.Isotope.spin'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.spin'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00009'\n currentMap['name'] = 'spin'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Isotope.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Isotope\n\n currentMap = abstractTypes.get('Isotope')\n aList = ['abundance', 'gyroMagneticRatio', 'halfLife', 'halfLifeError', 'halfLifeType', 'magneticMoment', 'mass', 'massNumber', 'quadrupoleMoment', 'receptivity', 'spin']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to ChemElement\n currentMap = {}\n exolinks['ChemElement'] = currentMap\n loadMaps['CHEL.exo-ChemElement'] = currentMap\n currentMap['tag'] = 'CHEL.exo-ChemElement'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'\n currentMap['name'] = 'ChemElement'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElement\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055'))\n\n # Out-of-package link to ChemElementStore\n currentMap = {}\n exolinks['ChemElementStore'] = currentMap\n loadMaps['CHEL.exo-ChemElementStore'] = currentMap\n currentMap['tag'] = 'CHEL.exo-ChemElementStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'\n currentMap['name'] = 'ChemElementStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElementStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to Isotope\n currentMap = {}\n exolinks['Isotope'] = currentMap\n loadMaps['CHEL.exo-Isotope'] = currentMap\n currentMap['tag'] = 'CHEL.exo-Isotope'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'\n currentMap['name'] = 'Isotope'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.Isotope\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))", "def from_info(self, info: Dict[str, Any]) -> None:", "def __init__(self):\n\t\t\n\t\tself.ps = '%'\n\t\t\n\t\tself.codonTable = { \"UUU\":\"F\", \"UUC\":\"F\", \"UUA\":\"L\", \"UUG\":\"L\",\n\t\t\t\t\t\t\t\"UCU\":\"S\", \"UCC\":\"s\", \"UCA\":\"S\", \"UCG\":\"S\",\n\t\t\t\t\t\t\t\"UAU\":\"Y\", \"UAC\":\"Y\", \"UAA\":\"-\", \"UAG\":\"-\",\n\t\t\t\t\t\t\t\"UGU\":\"C\", \"UGC\":\"C\", \"UGA\":\"-\", \"UGG\":\"W\",\n\t\t\t\t\t\t\t\"CUU\":\"L\", \"CUC\":\"L\", \"CUA\":\"L\", \"CUG\":\"L\",\n\t\t\t\t\t\t\t\"CCU\":\"P\", \"CCC\":\"P\", \"CCA\":\"P\", \"CCG\":\"P\",\n\t\t\t\t\t\t\t\"CAU\":\"H\", \"CAC\":\"H\", \"CAA\":\"Q\", \"CAG\":\"Q\",\n\t\t\t\t\t\t\t\"CGU\":\"R\", \"CGC\":\"R\", \"CGA\":\"R\", \"CGG\":\"R\",\n\t\t\t\t\t\t\t\"AUU\":\"I\", \"AUC\":\"I\", \"AUA\":\"I\", \"AUG\":\"M\",\n\t\t\t\t\t\t\t\"ACU\":\"T\", \"ACC\":\"T\", \"ACA\":\"T\", \"ACG\":\"T\",\n\t\t\t\t\t\t\t\"AAU\":\"N\", \"AAC\":\"N\", \"AAA\":\"K\", \"AAG\":\"K\",\n\t\t\t\t\t\t\t\"AGU\":\"S\", \"AGC\":\"S\", \"AGA\":\"R\", \"AGG\":\"R\",\n\t\t\t\t\t\t\t\"GUU\":\"V\", \"GUC\":\"V\", \"GUA\":\"V\", \"GUG\":\"V\",\n\t\t\t\t\t\t\t\"GCU\":\"A\", \"GCC\":\"A\", \"GCA\":\"A\", \"GCG\":\"A\",\n\t\t\t\t\t\t\t\"GAU\":\"D\", \"GAC\":\"D\", \"GAA\":\"E\", \"GAG\":\"E\",\n\t\t\t\t\t\t\t\"GGU\":\"G\", \"GGC\":\"G\", \"GGA\":\"G\", \"GGG\":\"G\",\n\t\t\t\t\t\t\t}\n\t\t\n\t\t#initialize our codon and amino acid dictionaries\n\t\tself.codon = {}\n\t\tself.aa = {}\n\t\t\n\t\tself.nuc = { \"A\":0, \"T\":0, \"G\":0, \"C\":0, \"U\":0, \"N\":0 }\n\t\t\n\t\t#add keys to codon and aa, and init to zero\n\t\tfor key in self.codonTable:\n\t\t\tself.codon[key] = 0\n\t\t\tself.aa[self.codonTable[key]] = 0\n\t\n\t\t#a list to hold our headers\n\t\tself.header = []", "def mfunc_(d):\r\n _p= {c: k.lower() if c not in ('code', 'label', 'name') else k \r\n for c, k in zip(AGSO_PROPERTIES['props_codes'], d) }\r\n id_= d[ix_].replace('/', '_').replace(\r\n ' ', '_').replace('\"', '').replace(\"'\", '').lower()\r\n return id_, _p", "def npdict(self):\n\n d = {}\n\n # per profile\n d['cruise'] = self.cruise()\n d['day'] = self.day()\n d['latitude'] = self.latitude()\n d['latitude_unc'] = self.latitude_unc()\n d['longitude'] = self.longitude()\n d['longitude_unc'] = self.longitude_unc()\n d['month'] = self.month()\n d['n_levels'] = self.n_levels()\n d['primary_header_keys'] = self.primary_header_keys()\n d['probe_type'] = self.probe_type()\n d['time'] = self.time()\n d['uid'] = self.uid()\n d['year'] = self.year()\n d['PIs'] = self.PIs()\n d['originator_station'] = self.originator_station()\n d['originator_cruise'] = self.originator_cruise()\n d['originator_flag_type'] = self.originator_flag_type()\n d['t_metadata'] = self.t_metadata()\n d['s_metadata'] = self.s_metadata()\n # per level\n d['s'] = self.s()\n d['s_unc'] = self.s_unc()\n d['s_level_qc'] = self.s_level_qc()\n d['s_profile_qc'] = self.s_profile_qc()\n d['s_qc_mask'] = self.s_qc_mask()\n d['t'] = self.t()\n d['t_unc'] = self.t_unc()\n d['t_level_qc'] = self.t_level_qc()\n d['t_profile_qc'] = self.t_profile_qc()\n d['t_qc_mask'] = self.t_qc_mask()\n d['z'] = self.z()\n d['z_unc'] = self.z_unc()\n d['z_level_qc'] = self.z_level_qc()\n d['oxygen'] = self.oxygen()\n d['phosphate'] = self.phosphate()\n d['silicate'] = self.silicate()\n d['pH'] = self.pH()\n d['p'] = self.p()\n\n return d", "def gen_info(middle_db_input):\n\n query = \"\"\n query_add = \"\"\n # Queries will be build from several part: \"select\" statement, followed by\n # what should be selected and what tables to select from. The last part is\n # extended using the information from the webpage request.\n\n # Two types of query body:\n # first to be used for selection by any accession number;\n query_t0 = \"from locus l, cds c, accession a where a.locus_id=l.id and c.locus_id=l.id and \"\n # second to be used in all other queries, as only primary (latest) accession\n # number will be displayed on the webpage.\n query_t1 = query_t0 + \"a.latest_version='T' and \"\n\n # Columns to be selected from the respective tables.\n locus = \"l.whole_seq as locus_sequence, l.chr_location, l.locus_name, l.chr_name\"\n cds = \"c.gene_name, c.product_name, c.product_id, c.seq_location, c.whole_seq, c.translation, c.complement\"\n accession = \"a.accession_num\"\n # Columns to be selected, when the user selects a cytogenic location.\n cyt_loc_cds = \"c.gene_name, c.product_name, c.product_id\"\n cyt_loc = \"l.chr_location\"\n \n # Query construction \n search = middle_db_input[\"name\"]\n # Type 0 (gene identifier) and 1 (product name) contain information on a\n # single element, hence no information repeats would be present in the\n # output; therefore just one query is generated\n if middle_db_input[\"type\"]==0:\n query = \"select \" + accession + \", \" + locus + \", \" + cds + \" \" + query_t1 + \"c.gene_name\" + \"=\" + \"'\"+search+\"'\"\n elif middle_db_input[\"type\"]==1:\n query = \"select \" + accession + \", \" + locus + \", \" + cds + \" \" + query_t1 + \"c.product_name\" + \"=\" + \"'\"+search+\"'\"\n # Type 2 (locus accession number) and 3 (cytogenic location) could have\n # multiple elements - multiple CDS or multiple loci and CDS, respetively).\n # Using one query would lead to information repeats. Using two queries \n # avoids unnecesary repetitions.\n elif middle_db_input[\"type\"]==2:\n query = \"select \" + locus + \" \" + query_t0 + \"a.accession_num\" + \"=\" + \"'\"+search+\"'\"\n query_add = \"select \" + cds + \" \" + query_t0 + \"a.accession_num\" + \"=\" + \"'\"+search+\"'\"\n elif middle_db_input[\"type\"]==3:\n query = \"select \" + cyt_loc_cds + \" \" + query_t1+ \"l.chr_location\" + \" like \" + \"'\"+search+\"%\"+\"'\"\n query_add = \"select \" + accession + \", \" + cyt_loc + \" \" + query_t1+ \"l.chr_location\" + \" like \" + \"'\"+search+\"%\"+\"'\"\n elif middle_db_input[\"type\"]==4:\n search2 = middle_db_input[\"product_id\"]\n query = \"select \" + accession + \", \" + locus + \", \" + cds + \" \" + query_t1 + \"c.product_id\" + \"=\" + \"'\"+search2+\"'\"\n \n\n db = pymysql.connect(db='0a002', user='0a002', passwd='0a002', host='hope', port=3306, cursorclass = pymysql.cursors.DictCursor)\n \n # Creating output from cursors depending on the query type.\n db_middle_output = [middle_db_input]\n if middle_db_input[\"type\"]==0 or middle_db_input[\"type\"]==1:\n cursor = db.cursor()\n q = cursor.execute(query)\n data = cursor.fetchall()\n db_middle_output += data\n elif middle_db_input[\"type\"]==2 or middle_db_input[\"type\"]==3:\n cursor1 = db.cursor()\n cursor2 = db.cursor()\n q1 = cursor1.execute(query)\n q2 = cursor2.execute(query_add)\n unit1 = cursor1.fetchall()\n unit2 = cursor2.fetchall()\n db_middle_output =db_middle_output + list(unit1) + list(unit2)\n # output includes the input dictionary for convenience of the front end.\n elif middle_db_input[\"type\"]==4:\n cursor = db.cursor()\n q = cursor.execute(query)\n data = cursor.fetchall()\n db_middle_output += data\n \n\n return(db_middle_output)" ]
[ "0.65818685", "0.61850566", "0.6020124", "0.58721864", "0.5824307", "0.5692058", "0.56518245", "0.5619209", "0.55818576", "0.5551026", "0.553083", "0.5518843", "0.5459129", "0.54217863", "0.5413675", "0.54074", "0.5400768", "0.5389365", "0.53707016", "0.53672904", "0.53651774", "0.53439254", "0.5332423", "0.5322992", "0.5322961", "0.53070146", "0.53069395", "0.5300498", "0.5294076", "0.5282439", "0.5266366", "0.52605873", "0.52524537", "0.5251155", "0.5227321", "0.5226553", "0.52203006", "0.52173567", "0.5195536", "0.51941264", "0.5194115", "0.5183776", "0.5175653", "0.51744837", "0.5164211", "0.5163406", "0.5162218", "0.51590157", "0.5149242", "0.51476574", "0.5147193", "0.5142598", "0.5140896", "0.5139556", "0.5136873", "0.51334506", "0.51179636", "0.5114886", "0.5102926", "0.51009196", "0.5096022", "0.50908875", "0.5080932", "0.50775135", "0.507454", "0.5069861", "0.5062969", "0.50607765", "0.50600153", "0.50595593", "0.505731", "0.50527805", "0.5051647", "0.5049814", "0.50478387", "0.5044266", "0.5044144", "0.5041002", "0.5039788", "0.50384736", "0.50292337", "0.5024908", "0.50245315", "0.5024498", "0.50218946", "0.5020646", "0.5020318", "0.5020041", "0.5018217", "0.50060636", "0.5004678", "0.5000239", "0.49942395", "0.4988845", "0.4987164", "0.49866766", "0.49864793", "0.498565", "0.4984999", "0.49846536", "0.49829942" ]
0.0
-1
A little shared protein creater from aligned sequences
def shared(self, aligned_a, aligned_b): return "".join([self.delta(aligned_a, aligned_b, i) for i in range(len(aligned_a))])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def proteins_align(self, protein_a, protein_b):\n # Set variables\n first = Seq(self.proteins_dict[protein_a][\"protein\"])\n second = Seq(self.proteins_dict[protein_b][\"protein\"])\n \n # Align proteins\n align = pairwise2.align.globalxx(first, second, one_alignment_only=True)\n aligned_a = align[0].seqA\n aligned_b = align[0].seqB\n \n # Calculate shared string\n shared = self.shared(aligned_a, aligned_b)\n\n # Returns dictionary of shared terms\n return {protein_a: aligned_a, \n protein_b: aligned_b,\n \"shared\": shared,\n \"shared_count\": Counter([x for x in shared.split(\"-\") if x != \"\"]),\n \"percent_simalarity\": align[0].score / len(align[0].seqA),\n \"score\": align[0].score, \n \"levenshtein_distance\": l_dist(str(first), str(second))}", "def generateAlignment(seqs):\n \"\"\"Create temporary file for MUSCLE\"\"\"\n inFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)\n outFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)\n \n \n \"\"\"Creates an align object or pd.Series() with indexing to preserve order but does not appyl padding\"\"\"\n align = padAlignment(seqs, applyPadding=False)\n \"\"\"Put alignments in the tempfiles\"\"\"\n align2fasta(seqs, inFn, applyPadding=False)\n\n muscleCommand = ['muscle', '-in', inFn, '-out', outFn]\n result = subprocess.call(muscleCommand)\n\n \"\"\"If MUSCLE was successful\"\"\"\n if not result:\n outAlign = fasta2align(outFn)\n else:\n print(\"Error in MUSCLE!\")\n raise Exception(\"MUSCLEError\")\n \"\"\"Remove the temporary files\"\"\"\n os.remove(inFn)\n os.remove(outFn)\n \n \"\"\"MUSCLE seqs need to be reorderd using the original index\"\"\"\n outAlign = outAlign.loc[[str(i) for i in align.index]]\n \"\"\"Index was str() through FASTA files so reset index with original index\"\"\"\n outAlign.index = align.index\n \n \"\"\"Check that all seqs are being returned in the correct order\"\"\"\n badSeqs = 0\n if not len(seqs) == len(outAlign):\n print('Different number of output seqs!')\n badSeqs+=1\n\n for i, s1, s2 in zip(np.arange(len(seqs)), seqs, outAlign):\n if not s1.replace('-', '') == s2.replace('-', ''):\n print('%d: %s != %s' % (i, s1, s2))\n badSeqs+=1\n if badSeqs>0:\n raise Exception('Output seqs are different than input seqs! (%d)' % badSeqs)\n\n return outAlign", "def proteinTranslation(seq, geneticCode = STANDARD_GENETIC_CODE):\n\n seq = seq.replace('T','U') # Make sure we have RNA sequence\n proteinSeq = []\n \n i = 0\n while i+2 < len(seq):\n \n codon = seq[i:i+3]\n aminoAcid = geneticCode[codon]\n \n if aminoAcid is None: # Found stop codon\n break\n\n proteinSeq.append(aminoAcid)\n i += 3\n\n return proteinSeq", "def mergeChainedAlignedSegments(chainedAlignedSegments, refSequence, readSequence):\n cAR = pysam.AlignedSegment()\n aR = chainedAlignedSegments[0]\n cAR.query_name = aR.query_name\n \n #Parameters we don't and therefore set properly\n #cAR.flag = aR.flag\n #cAR.mapq = aR.mapq\n #cAR.mrnm = 0\n #cAR.mpos=0\n #cAR.isize=0\n #cAR.qual = \"<\" * len(readSequence)\n #cAR.tags = aR.tags \n cAR.next_reference_id = -1\n cAR.reference_start = aR.reference_start #Reference start\n cAR.is_reverse = aR.is_reverse\n cAR.query_sequence = reverseComplement(readSequence) if cAR.is_reverse else readSequence\n cAR.reference_id = aR.reference_id\n cigarList = []\n pPos = aR.reference_start\n #Iterate from the other end of the sequence if reversed\n pQPos = -(len(readSequence)-1) if cAR.is_reverse else 0 \n \n for aR in chainedAlignedSegments:\n assert cAR.is_reverse == aR.is_reverse\n #Add a deletion representing the preceding unaligned reference positions\n assert aR.reference_start >= pPos\n if aR.reference_start > pPos:\n cigarList.append((2, aR.reference_start - pPos))\n pPos = aR.reference_start \n \n #Add an insertion representing the preceding unaligned read positions\n #make it a soft clip if it is the first chained alignment\n qPos = getFirstNonClippedPositionInRead(aR, readSequence)\n assert qPos >= pQPos\n if qPos > pQPos:\n cigarList.append((4 if aR == chainedAlignedSegments[0] else 1, qPos - pQPos)) \n pQPos = qPos\n \n #Add the operations of the cigar, filtering hard and soft clipping\n for op, length in aR.cigar:\n assert op in (0, 1, 2, 4, 5)\n if op in (0, 1, 2):\n cigarList.append((op, length))\n if op in (0, 2): #Is match or deletion\n pPos += length\n if op in (0, 1): #Is match or insertion\n pQPos += length\n \n assert pPos <= len(refSequence)\n \n #Set reference end coordinate (which is exclusive)\n #cAR.reference_end = pPos #We don't do this because it is set by cigar string\n \n #Now add any trailing, necessary soft clipping\n if cAR.is_reverse:\n assert pQPos <= 1\n if pQPos < 1:\n cigarList.append((4, -pQPos + 1))\n else:\n assert pQPos <= len(readSequence)\n if pQPos < len(readSequence):\n cigarList.append((4, len(readSequence) - pQPos))\n \n cAR.cigar = tuple(cigarList)\n \n #Check ops\n for op, length in cAR.cigar: #We should have no hard clipped ops\n assert op in (0, 1, 2, 4)\n \n #Reference sequence check coordinates\n assert sum([ length for op, length in cigarList if op in (0, 2)]) == cAR.reference_end - cAR.reference_start\n assert cAR.reference_start >= 0 and cAR.reference_start < len(refSequence)\n assert cAR.reference_end >= 0 and cAR.reference_end <= len(refSequence)\n \n #Read sequence check coordinates\n assert cAR.query_alignment_start >= 0 and cAR.query_alignment_start < len(readSequence)\n assert cAR.query_alignment_end >= 0 and cAR.query_alignment_end <= len(readSequence)\n assert cAR.query_alignment_start + sum([ length for op, length in cigarList if op in (0, 1)]) == cAR.query_alignment_end\n \n return cAR", "def thread_dna(\n PROT, NUCL, STOP\n ):\n\n # read in aligned protein fasta file\n format = \"fasta\"\n handle = open(PROT)\n PROTa = list(SeqIO.parse(handle, format))\n\n # read in aligned protein fasta file\n format = \"fasta\"\n handle = open(NUCL)\n NUCLf = list(SeqIO.parse(handle, format))\n\n # initialize dictionary for sequences\n pal2nal = {}\n\n # loop through genes and thread DNA over\n # protein alignment\n for entry in range(0, len(PROTa)):\n # save the gene ID to ID\n ID = ''\n ID = PROTa[entry].id\n # save protein sequence to Pseq\n Pseq = ''\n Pseq = PROTa[entry].seq\n # save nucleotide sequence to Nseq\n Nseq = ''\n Nseq = NUCLf[entry].seq\n\n # # check that Nseq is divisible by three and therefore not a pseudogene\n # if len(Nseq) % 3 != 0:\n # print(\"\")\n # print(ID, \"nucleotide sequence has a length of\", len(Nseq))\n # print(\"and cannot be threaded over the protein alignment.\")\n # print(\"Exiting now...\\n\")\n # sys.exit()\n\n pal2nal[ID] = ''\n GAPcnt = 0\n # loop through the sequence\n for AA in range(0, (int(len(Pseq))+1) - 1, 1):\n # if stops should be included\n if STOP == 'T':\n # if AA is a gap insert a codon of gaps\n if Pseq[AA] == \"-\":\n pal2nal[ID] += ('---')\n GAPcnt+=1\n # if AA is not a gap, insert the corresponding codon\n elif Pseq[AA] != \"-\":\n NTwin=(AA-GAPcnt)*3\n pal2nal[ID] += (Nseq[NTwin:NTwin+3])\n # if stops should not be included\n elif STOP == 'F':\n # if AA is a gap insert a codon of gaps\n if Pseq[AA] == \"-\":\n pal2nal[ID] += ('---')\n GAPcnt+=1\n # if AA is not a gap, insert the corresponding codon\n elif Pseq[AA] != \"-\":\n # if AA is a stop or ambiguous insert a codon of gaps\n if Pseq[AA] == 'X' or Pseq[AA] == '*':\n pal2nal[ID] += ('---')\n else:\n NTwin=(AA-GAPcnt)*3\n pal2nal[ID] += (Nseq[NTwin:NTwin+3])\n\n\n\n ## this commented code will check if the nucleotide window \n ## translates to the corresponding codon\n # if Pseq[AA] != Nseq[NTwin:NTwin+3].translate():\n # print(\"\\nAmino acid position\", AA, \"(\",Pseq[AA],\")\", \"does not correspond to codon\")\n # print(Nseq[NTwin:NTwin+3], \"in nucleotide window\", NTwin,\"-\",NTwin+3)\n # print(Nseq[NTwin:NTwin+3], \"translates to\", Nseq[NTwin:NTwin+3].translate())\n # print(\"Nucleotides cannot be threaded ontop of the protein sequence.\")\n # print(\"Exiting now...\\n\")\n # sys.exit()\n\n # print out threaded DNA alignment\n for entry in range(0, len(PROTa)):\n print(\">{}\\n{}\".format(PROTa[entry].id, pal2nal[PROTa[entry].id]))", "def Translate(self):\n dna_to_protein = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',\n }\n \n length = self.length\n reading = {}\n for i in range(3):\n reading['frame_'+str(i+1)] = tuple([dna_to_protein[self.sequence[index:index+3]] for index in range(i,length-2,3)])\n reverse_strand = Analyze_DNA_Sequence.Complementary(self,'5-3')\n for i in range(3):\n reading['frame_'+str(i+4)] = tuple([dna_to_protein[reverse_strand[index:index+3]] for index in range(i,length-2,3)])\n\n return reading", "def align(self):\n number_of_Xs = 0\n xFront = \"\"\n xEnd = \"\"\n dashFront = \"\"\n dashEnd = \"\"\n\n # Determining if variable amino acids (\"X\") need to be added to the\n\t # beginning of the sequence:\n z = self.hmmStart-self.seqStart\n number_of_Xs = (self.hmmStart-1)-z\n if z > 0:\n dashFront = \"-\"*z\n xFront = \"X\"*number_of_Xs\n elif self.hmmStart-1<=self.seqStart-1:\n xFront = \"X\"*(self.hmmStart-1) \n\n # Determining if variable amino acids (\"X\") need to be added to the \n # end of the sequence:\n number_of_Xs_end = self.hmmLength - self.hmmEnd\n\n # The original sequence length; SPA format includes this\n delimeter = \"|\" #Need to fix can be \"_\" or \"|\" or something else...\n \n distToSeqEnd = self.origSeqLength - seqTo\n if distToSeqEnd >= number_of_Xs_end and number_of_Xs_end != self.hmmLength:\n xEnd = 'X'*number_of_Xs_end\n else:\n if distToSeqEnd < number_of_Xs_end:\n xEnd = 'X'*distToSeqEnd\n \tdashEnd += \"-\"*(number_of_Xs_end-distToSeqEnd)\n \t\n begin = \"{}{}\".format(dashFront, xFront)\n end = \"{}{}\".format(xEnd, dashEnd)\n self.addToFront(begin)\n self.data.extend(end)\n self.original = str(self)", "def dna_to_protein(seq):\n\n # Verify a convertible sequence\n if len(seq) % 3 != 0:\n raise RuntimeError('Total number of bases must be a multiple of 3')\n\n # Iterate through adding the proteins\n protein = ''\n for i in range(0, len(seq), 3):\n protein += bioinfo_dicts.codons[seq[i:i+3]]\n return protein", "def alignprotein(self, sample, analysistype, target, program, index, hit):\n # Initialise lists to store the outputs\n if target not in sample[analysistype].dnaseq:\n sample[analysistype].dnaseq[target] = list()\n sample[analysistype].protseq[target] = list()\n sample[analysistype].ntalign[target] = list()\n sample[analysistype].ntindex[target] = list()\n sample[analysistype].aaidentity[target] = list()\n sample[analysistype].aaalign[target] = list()\n sample[analysistype].aaindex[target] = list()\n # Only BLASTn analyses require additional effort to find the protein sequence\n if program == 'blastn':\n # Convert the extracted, properly-oriented DNA sequence to a Seq object\n sample[analysistype].dnaseq[target].append(Seq(hit['query_sequence']))\n # Create the BLAST-like interleaved outputs with the query and subject sequences\n sample[analysistype].ntalign[target].append(self.interleaveblastresults(query=hit['query_sequence'],\n subject=hit['subject_sequence']))\n # Determine the number and position of SNPs\n count = 0\n ntindex = str()\n # Iterate through every position in the query sequence, and determine if the subject sequence at that\n # position is a match\n for i, bp in enumerate(hit['query_sequence']):\n # If the sequence at the query and subject sequences do not match, store the location\n if bp != hit['subject_sequence'][i]:\n # Append the current location (+1 due to zero indexing)\n ntindex += '{i};'.format(i=i + 1)\n # Increment the count by the length of the current position - should make the output more\n # uniform due to the fact that the numbers are not padded\n count += len(str(i))\n # If there are many SNPs, then insert line breaks for every 15+ characters\n if count >= 15:\n ntindex += '\\n'\n # Reset the character count to 0\n count = 0\n # Remove trailing ';' (or ';' followed by a newline)\n ntindex = ntindex.rstrip(';').replace(';\\n', '\\n') if ntindex else '-'\n # Add the cleaned string to the list\n sample[analysistype].ntindex[target].append(ntindex)\n # Convert the target name to a string without illegal characters - necessary for creating the\n # temporary databases below\n clean_target = ''.join(filter(str.isalnum, target))\n # Set the absolute path, and create the tmp working directory\n tmp_dir = os.path.join(sample[analysistype].reportdir, 'tmp')\n make_path(tmp_dir)\n # Set the absolute path of the FASTA file that will store the subject sequence. Will be used as the\n # database in the tblastx analysis used to translate the query and subject sequence to amino acid\n tmp_subject = os.path.join(tmp_dir, '{sn}_{target}_{at}_db_{index}.fa'\n .format(sn=sample.name,\n target=clean_target,\n at=analysistype,\n index=index))\n # Write the appropriately-converted subject sequence to the database file\n with open(tmp_subject, 'w') as tmp_db:\n SeqIO.write(SeqRecord(Seq(hit['subject_sequence'].replace('-', '')),\n id='{}_{}'.format(sample.name, target),\n description=''), tmp_db, 'fasta')\n # Create a BLAST database from this file\n self.makeblastdb(fasta=tmp_subject)\n # Create the tblastx (translated nt query: translated nt subject) call. Remove any masking. Do not\n # include the 'query' parameter, as it will be supplied below\n tblastx = NcbitblastxCommandline(db=os.path.splitext(tmp_subject)[0],\n evalue=0.1,\n outfmt=15,\n soft_masking=False,\n seg='no')\n # Run the tblastx analysis. Supply the query as stdin. Capture stdout, and stderr\n stdout, stderr = tblastx(stdin=sample[analysistype].targetsequence[target][index].replace('-', ''))\n # Convert the string stdout to JSON format\n json_output = json.loads(stdout)\n # Extract the necessary list of HSPs from the JSON-formatted outputs\n data = json_output['BlastOutput2'][0]['report']['results']['search']['hits'][0]['hsps']\n # Initialise a string to store the extracted amino acid subject sequence\n ref_prot = str()\n for results in data:\n # Attempt to use hit_frame 1 - the .targetsequence attribute was populated with the nt sequence in\n # (hopefully) the correct orientation, so attempt to use that\n if results['hit_frame'] == 1:\n # Populate the .protseq attribute with the Seq-converted amino acid sequence extracted from the\n # report\n sample[analysistype].protseq[target].append(Seq(results['qseq'].upper()))\n # Grab the subject sequence\n ref_prot = results['hseq']\n # Only the first result is required\n break\n # If there were no results with the hit_frame equal to 1, get the best result from the analysis\n if not ref_prot:\n for results in data:\n sample[analysistype].protseq[target].append(Seq(results['qseq'].upper()))\n ref_prot = results['hseq']\n break\n # Clear out the tmp directory\n try:\n shutil.rmtree(tmp_dir)\n except FileNotFoundError:\n pass\n else:\n # Non-blastn analyses will already have the outputs as amino acid sequences. Populate variables as required\n ref_prot = hit['subject_sequence']\n sample[analysistype].protseq[target].append(Seq(hit['query_sequence']))\n # Create the BLAST-like alignment of the amino acid query and subject sequences\n sample[analysistype].aaalign[target]\\\n .append(self.interleaveblastresults(query=sample[analysistype].protseq[target][index],\n subject=ref_prot))\n # Determine the number of matches, as well as the number and location of mismatches\n count = 0\n matches = 0\n aaindex = str()\n # Iterate through the query sequence to determine matching positions\n for i, bp in enumerate(sample[analysistype].protseq[target][index]):\n if bp != ref_prot[i]:\n aaindex += '{i};'.format(i=i + 1)\n count += len(str(i))\n # If there are many SNPs, then insert line breaks for every 10 SNPs\n if count >= 15:\n aaindex += '\\n'\n count = 0\n # Increment the total number of matches\n if bp == ref_prot[i]:\n matches += 1\n # Clean the index string\n aaindex = aaindex.rstrip(';').replace(';\\n', '\\n') if aaindex else '-'\n # Append the cleaned string to the list\n sample[analysistype].aaindex[target].append(aaindex)\n # Determine percent identity between the query and subject amino acid sequence by dividing the number of\n # matches by the total length of the query sequence and multiplying this result by 100. Convert to two\n # decimal places\n pid = float('{:.2f}'.format(matches / len(sample[analysistype].protseq[target][index]) * 100))\n # Append the calculated percent identity to the list\n sample[analysistype].aaidentity[target].append(pid)\n return sample", "def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein", "def clustal_align_protein(rec_1, rec_2, work_dir):\n fasta_file = op.join(work_dir, \"prot-start.fasta\")\n align_file = op.join(work_dir, \"prot.aln\")\n SeqIO.write((rec_1, rec_2), file(fasta_file, \"w\"), \"fasta\")\n\n clustal_cl = ClustalwCommandline(CLUSTALW_BIN(\"clustalw2\"),\n infile=fasta_file, outfile=align_file, outorder=\"INPUT\",\n type=\"PROTEIN\")\n stdout, stderr = clustal_cl()\n\n aln_file = file(clustal_cl.outfile)\n alignment = AlignIO.read(aln_file, \"clustal\")\n print >>sys.stderr, \"\\tDoing clustalw alignment: %s\" % clustal_cl\n return alignment.format(\"fasta\")", "def prob_t_a_given_s(self, alignment_info):\n ...", "def __init__(self, seq, peptide):\r\n self.seq = seq # original DNA sequence\r\n self.peptide = peptide # original peptide sequence\r\n self.allPepSeqs = [] # list to hold all possible nuc sequences based on the peptide sequence\r\n self.codonTable = { # holds all amino acids and their associated codons\r\n 'F': ['TTT', 'TTC'], 'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],\r\n 'Y': ['TAT', 'TAC'], 'C': ['TGT', 'TGC'], 'L': ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],\r\n '-': ['TAA', 'TGA', 'TAG'], 'W': ['TGG'], 'P': ['CCT', 'CCC', 'CCA', 'CCG'],\r\n 'H': ['CAT', 'CAC'], 'R': ['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Q': ['CAA', 'CAG'],\r\n 'I': ['ATT', 'ATC', 'ATA'], 'T': ['ACT', 'ACC', 'ACA', 'ACG'], 'N': ['AAT', 'AAC'],\r\n 'K': ['AAA', 'AAG'], 'M': ['ATG'], 'V': ['GTT', 'GTC', 'GTA', 'GTG'],\r\n 'A': ['GCT', 'GCC', 'GCA', 'GCG'], 'D': ['GAT', 'GAC'], 'G': ['GGT', 'GGC', 'GGA', 'GGG'],\r\n 'E': ['GAA', 'GAG']\r\n }", "def test_align_unaligned_seqs(self):\n res = align_unaligned_seqs(self.seqs1_fp, RNA)\n self.assertEqual(res.toFasta(), self.seqs1_aln)", "def primary(self):\n return Seq(''.join([r.aa for r in self.residues]), protein_alphabet)", "def process_align(self):\n\t\tstm_t_dict = self._process_recog()\n\t\ttrans_t_dict = self._process_trans()\n\t\talign_obj = viterbi_align(stm_t_dict, trans_t_dict, self.label, self.pair_file_path)\n\t\tself.trans_t_dict = align_obj.viterbi(0, len(stm_t_dict)-1, 0, len(trans_t_dict)-1)", "def try_protein(self):\n location = [0, 0]\n fold = 0\n \n # loop over aminoacids of the data and add info to aminoacids object\n for i, char in enumerate(self.data):\n aminoacid_number = i\n aminoacid_type = char \n \n # make aminoacid object and add to aminoacids list\n aminoacid = Aminoacid(aminoacid_type, aminoacid_number, location, fold)\n self.occupied.append(aminoacid.location)\n self.aminoacids.append(aminoacid)\n\n # make a line orientation as default\n location = [0, len(self.data) + i]\n return", "def main():\r\n\timport sys\r\n\r\n\tlistofSequences = FastAreader(sys.stdin).readFasta() \r\n\tPAMSequences = PAMfinder(listofSequences).classController() # Calls on controller class to return desired models.\r\n\tf = open('Guide Sequences.txt','w') \r\n\tfor i in range(len(PAMSequences[0])):\r\n\t\tf.write(PAMSequences[0][i]) # Prints the header sequence into the file.\r\n\t\tf.write('\\n') \r\n\t\tprint(PAMSequences[0][i]) \r\n\t\tfor j in range(len(PAMSequences[1][i])): \r\n\t\t\tif j == 0: \r\n\t\t\t\tf.write(\"Forward Strand PAM Sites:\") \r\n\t\t\t\tf.write('\\n')\r\n\t\t\t\tprint(\"Forward Strand PAM Sites:\") \r\n\t\t\tprint(PAMSequences[1][i][j]) # Prints the forward sequences\r\n\t\t\ty = str(PAMSequences[1][i][j]) # Changes from int to string characters.\r\n\t\t\tx = ''.join(y) # Joining all the string values so we can print to file.\r\n\t\t\tf.write(x) # Write the joined forward sequences to the file.\r\n\t\t\tf.write('\\n')\r\n\t\tfor k in range(len(PAMSequences[2][i])): # For reverse sequences, and follows same logic as forward. \r\n\t\t\tif k == 0:\r\n\t\t\t\tf.write(\"Reverse Strand PAM Sites (in reference to the Top Strand Position):\")\r\n\t\t\t\tf.write('\\n')\r\n\t\t\t\tprint(\"Reverse Strand PAM Sites (in reference to the Top Strand Position):\")\r\n\t\t\tprint(PAMSequences[2][i][k]) # Prints the reverse sequences with the corresponding positions. \r\n\t\t\ta = str(PAMSequences[2][i][k]) # Changes the integer to string characters, allowing for the values to join.\r\n\t\t\tb = ''.join(a)\r\n\t\t\tf.write(b) # Write all of the reverse sequences onto the text file with their positions. \r\n\t\t\tf.write('\\n')\r\n\tf.close() # Close the file.\r", "def makeMotif(UP_seq, MS_seq, motif_size, ps_protein_idx, center_motif_idx, DoS_idx):\n UP_seq_copy = list(UP_seq[max(0, ps_protein_idx - motif_size): ps_protein_idx + motif_size + 1])\n assert len(UP_seq_copy) > motif_size, \"Size seems too small. \" + UP_seq\n\n # If we ran off the end of the sequence at the beginning or at the end, append a gap\n if ps_protein_idx - motif_size < 0:\n for _ in range(motif_size - ps_protein_idx):\n UP_seq_copy.insert(0, \"-\")\n\n elif ps_protein_idx + motif_size + 1 > len(UP_seq):\n for _ in range(ps_protein_idx + motif_size - len(UP_seq) + 1):\n UP_seq_copy.extend(\"-\")\n\n UP_seq_copy[motif_size] = UP_seq_copy[motif_size].lower()\n\n pidx = [str(UP_seq_copy[motif_size]).upper() + str(ps_protein_idx + 1) + \"-p\"]\n\n # Now go through and copy over phosphorylation\n if DoS_idx:\n for ppIDX in DoS_idx:\n position = ppIDX.start() - center_motif_idx\n # If the phosphosite is within the motif\n if abs(position) < motif_size:\n editPos = position + motif_size\n UP_seq_copy[editPos] = UP_seq_copy[editPos].lower()\n assert UP_seq_copy[editPos] == MS_seq[ppIDX.start()], UP_seq_copy[editPos] + \" \" + MS_seq[ppIDX.start()]\n if position != 0:\n pidx.append(str(UP_seq_copy[editPos]).upper() + str(ps_protein_idx + position + 1) + \"-p\")\n\n return \"\".join(UP_seq_copy), pidx", "def prot_sequence_finder(protL):\n \n idDict = prot_id_converter(protL, \"9606\", inpDB = \"genesymbol\",outDB=\"refseqproteingi\")\n seqD = prot_entrez_fetch(idDict, retM=\"gb\", retT=\"fasta\")\n \n protD = {}\n \n for keyS, valueS in idDict.items():\n protD[keyS] = seqD[valueS]\n \n return protD", "def align(aligner, reads):\n counter = 0\n for read in SeqIO.parse(reads, \"fasta\"): \n try:\n alignInfo = next(aligner.map(str(read.seq)))\n print(alignInfo) \n except StopIteration:\n print(read.format(\"fasta\"), end='')", "def Alignsequence(structure1, structure2):\n\n ppb = PPBuilder()\n for pp in ppb.build_peptides(structure1):\n sequence1 = pp.get_sequence()\n for pp in ppb.build_peptides(structure2):\n sequence2 = pp.get_sequence()\n\n alignment = pairwise2.align.globalxx(sequence1, sequence2)\n return alignment", "def align_seqs(self, chain_selection) -> Tuple[str, str]:\n chain = self.chains[chain_selection]\n alignments = pairwise2.align.globalxs(chain[f'{self.wanted_label}_sequence'],\n chain[f'{self.owned_label}_sequence'],\n -1, # open\n -0.1 # extend\n )\n al = alignments[0]\n chain[f'{self.wanted_label}_aln_sequence'] = al[0]\n chain[f'{self.owned_label}_aln_sequence'] = al[1]\n return al[0], al[1]", "def build_alignment(self,score,pieces):\n\t \t# build text\n\t\tself.open_seqs()\n\t\ttext1 = text2 = \"\"\n\t\tend1 = end2 = None\n\t\tfor (start1,start2,length,pctId) in pieces:\n\t\t\tif (end1 != None):\n\t\t\t\tif (start1 == end1): # insertion in sequence 2\n\t\t\t\t\ttext1 += self.seq1_gap * (start2-end2)\n\t\t\t\t\ttext2 += self.seq2_file.get(end2,start2-end2)\n\t\t\t\telse: # insertion in sequence 1\n\t\t\t\t\ttext1 += self.seq1_file.get(end1,start1-end1)\n\t\t\t\t\ttext2 += self.seq2_gap * (start1-end1)\n\n\t\t\ttext1 += self.seq1_file.get(start1,length)\n\t\t\ttext2 += self.seq2_file.get(start2,length)\n\t\t\tend1 = start1 + length\n\t\t\tend2 = start2 + length\n\t\t# create alignment\n\t\tstart1 = pieces[0][0]\n\t\tstart2 = pieces[0][1]\n\t\tend1 = pieces[-1][0] + pieces[-1][2]\n\t\tend2 = pieces[-1][1] + pieces[-1][2]\n\t\tsize1 = end1 - start1\n\t\tsize2 = end2 - start2\n\t\ta = Alignment(score=score,species_to_lengths=self.species_to_lengths)\n\t\t#if (self.seq1_strand == \"-\"): start1 = self.seq1_file.length - end1\n\t\ta.add_component(Component(self.seq1_src,start1,size1,self.seq1_strand,text=text1))\n\t\t#if (self.seq2_strand == \"-\"): start2 = self.seq2_file.length - end2\n\t\ta.add_component(Component(self.seq2_src,start2,size2,self.seq2_strand,text=text2))\n\t\treturn a", "def getMaskedSeq(align, mode='mask', barcode=False, delimiter=default_delimiter):\n seq = align.seq\n\n # Build output sequence\n if mode == 'tag' or not align.align_primer:\n # Do not modify sequence\n out_seq = seq\n elif mode == 'trim':\n # Remove region before primer\n if not align.rev_primer:\n out_seq = seq[align.start:]\n else: \n out_seq = seq[:align.end]\n elif mode == 'cut':\n # Remove primer and preceding region\n if not align.rev_primer:\n out_seq = seq[align.end:]\n else: \n out_seq = seq[:align.start]\n elif mode == 'mask':\n # Mask primer with Ns and remove preceding region\n if not align.rev_primer:\n mask_len = align.end - align.start + align.gaps\n out_seq = 'N' * mask_len + seq[align.end:]\n if hasattr(seq, 'letter_annotations') and \\\n 'phred_quality' in seq.letter_annotations:\n out_seq.letter_annotations['phred_quality'] = \\\n [0] * mask_len + \\\n seq.letter_annotations['phred_quality'][align.end:]\n else:\n mask_len = min(align.end, len(seq)) - align.start + align.gaps\n out_seq = seq[:align.start] + 'N' * mask_len\n if hasattr(seq, 'letter_annotations') and \\\n 'phred_quality' in seq.letter_annotations:\n out_seq.letter_annotations['phred_quality'] = \\\n seq.letter_annotations['phred_quality'][:align.start] + \\\n [0] * mask_len\n \n # Add alignment annotations to output SeqRecord\n out_seq.annotations = seq.annotations \n out_seq.annotations['primer'] = align.primer\n out_seq.annotations['prstart'] = align.start\n out_seq.annotations['error'] = align.error\n\n # Parse seq annotation and create output annotation\n seq_ann = parseAnnotation(seq.description, delimiter=delimiter)\n out_ann = OrderedDict([('SEQORIENT', seq.annotations['seqorient']),\n ('PRIMER', align.primer)])\n \n # Add ID sequence to description\n if barcode:\n seq_code = seq[:align.start].seq if not align.rev_primer \\\n else seq[align.end:].seq\n out_seq.annotations['barcode'] = seq_code\n out_ann['BARCODE'] = seq_code\n \n out_ann = mergeAnnotation(seq_ann, out_ann, delimiter=delimiter)\n out_seq.id = flattenAnnotation(out_ann, delimiter=delimiter)\n out_seq.description = ''\n\n return out_seq", "def pfm(alignment_sbjct, pro_seq):\n\n protein_column = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N',\n 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n\n pfm_matrix = pd.DataFrame(np.zeros((len(pro_seq), len(protein_column))), columns=protein_column)\n\n seq_len = len(pro_seq)\n\n for amino in range(0, seq_len):\n\n for alignm in alignment_sbjct:\n\n if alignm[amino] in protein_column:\n pfm_matrix[alignm[amino]][amino] = pfm_matrix[alignm[amino]][amino] + 1\n\n return pfm_matrix", "def premrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnaacc = re.search(r'accession=([^;\\n]+)', fields[8]).group(1)\n mrnalen = int(fields[4]) - int(fields[3]) + 1\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'pre-mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n mrnaacc = ''\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n elif '\\texon\\t' in entry:\n exoncount += 1\n elif '\\tintron\\t' in entry:\n introncount += 1\n elif '\\tfive_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr5plen += int(fields[4]) - int(fields[3]) + 1\n elif '\\tthree_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr3plen += int(fields[4]) - int(fields[3]) + 1\n elif entry.startswith('###'):\n if mrnaacc != '':\n values = '%s %d %.3f %.3f %.3f %d %d %d %d' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent,\n exoncount, introncount, utr5plen, utr3plen)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n exonlen = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0", "def find_sequence(filename_pdb, filename_txt):\n with open(filename_pdb, \"r\") as pdb_file, open(filename_txt, \"a\") as seq_file:\n ca_lines = []\n sequence = \"\"\n lines = pdb_file.readlines()\n\n amino_acids = {\"ALA\": \"A\", \"GLY\": \"G\", \"GLU\": \"E\", \"ARG\": \"R\",\n \"TRP\": \"W\", \"TYR\": \"Y\", \"SER\": \"S\", \"ASN\": \"N\",\n \"ASP\": \"D\", \"CYS\": \"C\", \"GLN\": \"Q\", \"HIS\": \"H\",\n \"ILE\": \"I\", \"LEU\": \"L\", \"LYS\": \"K\", \"MET\": \"M\",\n \"PHE\": \"F\", \"PRO\": \"P\", \"THR\": \"T\", \"VAL\": \"V\"}\n\n for line in lines:\n if line[12:16].strip() == \"CA\":\n ca_lines.append(line)\n sequence = sequence + amino_acids[line[17:20]]\n\n new_sequence = \"\"\n for aa in sequence:\n new_sequence += aa\n if len(new_sequence.replace(\"\\n\", \"\")) % 70 == 0:\n new_sequence += \"\\n\"\n\n\n seq_file.write(f\">{filename_pdb[11:18]}\\n\")\n seq_file.write(new_sequence)\n seq_file.write(\"\\n\")", "def find_matching_seqs_from_alignment(sequences, ref_sequence):\n\n # if the first sequence (gaps removed) in MSA matches with reference,\n # return this sequence.\n first_seq_in_alignment = sequences[0] \n #first_seq_in_alignment_gaps_removed = first_seq_in_alignment.replace('-','')\n first_seq_in_alignment_gaps_removed = find_and_replace(first_seq_in_alignment, '-','')\n if first_seq_in_alignment_gaps_removed == ref_sequence:\n print('\\n\\tFirst sequence in alignment (gaps removed) matches reference,'\n '\\n\\tSkipping regorous search for matching sequence'\n )\n first_seq = list()\n first_seq.append(first_seq_in_alignment)\n return first_seq\n pairwise_scores = []\n for seq_indx, seq in enumerate(sequences):\n #seq_gaps_removed = seq.replace('-','')\n seq_gaps_removed = find_and_replace(seq, '-', '')\n print(seqs_gaps_removed)\n\n score = align_pairs_local(\n ref_sequence,\n seq_gaps_removed,\n score_only = True,\n )\n score_at_indx = (seq_indx, score)\n pairwise_scores.append(score_at_indx)\n\n seq_indx, max_score = max(pairwise_scores, key=lambda x: x[1])\n matching_seqs_indx = [\n indx for indx, score in pairwise_scores if score == max_score\n ]\n\n best_matching_seqs = [\n sequences[indx] for indx in matching_seqs_indx\n ]\n num_matching_seqs = len(best_matching_seqs)\n if num_matching_seqs > 1 :\n print('\\n\\tFound %d sequences in MSA that match the reference'\n '\\n\\tThe first sequence is taken as matching'% num_matching_seqs\n )\n return best_matching_seqs", "def _process_cdss(self, prot_fasta_path):\n if self.is_metagenome:\n prot_fasta = {} # type: dict\n untranslatable_prot = set()\n for cds_id in self.cdss:\n cds = self.feature_dict[cds_id]\n try:\n prot_seq = str(Seq(cds['dna_sequence']).translate(\n self.code_table, cds=True).strip(\"*\"))\n except TranslationError as e:\n cds['warnings'] = cds.get('warnings', []) + [str(e)]\n # NOTE: we may need a different way of handling this for metagenomes.\n prot_seq = \"\"\n if self.is_metagenome:\n untranslatable_prot.add(cds_id)\n\n if self.is_metagenome:\n if prot_seq != \"\":\n protein_id = \"\"\n if cds.get(\"aliases\"):\n aliases = cds['aliases']\n for key, val in aliases:\n if key == \"protein_id\":\n protein_id = val\n if not protein_id:\n protein_id = cds['id'] # assign to some default\n else:\n # log a warning here?\n pass\n # TODO: update header to reflect what we actually want people\n # to see.\n if protein_id in prot_fasta:\n prot_fasta[protein_id][0] += \"|\" + cds['id']\n else:\n fasta_seq_data = \">\" + protein_id + \" cds_ids:\" + cds['id']\n prot_fasta[protein_id] = [fasta_seq_data, prot_seq]\n else:\n pass\n\n else:\n cds.update({\n \"protein_translation\": prot_seq,\n \"protein_md5\": hashlib.md5(prot_seq.encode('utf8')).hexdigest(),\n \"protein_translation_length\": len(prot_seq),\n })\n\n if 'parent_gene' in cds:\n parent_gene = self.feature_dict[cds['parent_gene']]\n # no propigation for now\n propagate_cds_props_to_gene(cds, parent_gene, self.is_metagenome)\n elif self.generate_genes:\n spoof = copy.copy(cds)\n spoof['type'] = 'gene'\n spoof['id'] = cds['id']+\"_gene\"\n spoof['cdss'] = [cds['id']]\n spoof['warnings'] = [warnings['spoofed_gene'].format(cds['id'])]\n self.feature_dict[spoof['id']] = spoof\n cds['parent_gene'] = spoof['id']\n self.spoof_gene_count += 1\n else:\n raise ValueError(warnings['no_spoof'])\n\n self.feature_dict[cds['id']] = cds\n\n if self.is_metagenome:\n with open(prot_fasta_path, 'w') as fid:\n for key, line in prot_fasta.items():\n fid.write('\\n'.join(line))\n # do something with 'untranslatable_prot'", "def pair_hmm_align_unaligned_seqs(seqs,moltype,params={}):\n \n seqs = LoadSeqs(data=seqs,moltype=moltype,aligned=False)\n try:\n s1, s2 = seqs.values()\n except ValueError:\n raise ValueError,\\\n \"Pairwise aligning of seqs requires exactly two seqs.\"\n \n try:\n gap_open = params['gap_open']\n except KeyError:\n gap_open = 5\n try:\n gap_extend = params['gap_extend']\n except KeyError:\n gap_extend = 2\n try:\n score_matrix = params['score_matrix']\n except KeyError:\n score_matrix = make_dna_scoring_dict(\\\n match=1,transition=-1,transversion=-1)\n \n return global_pairwise(s1,s2,score_matrix,gap_open,gap_extend)", "def createAlignment(sequences, alphabet):\n align = Alignment(alphabet)\n counter = 0\n for sequence in sequences:\n name = \"sequence\" + str(counter)\n align.add_sequence(name, sequence)\n counter+=1\n return align", "def create_protein_sequences_table(self,fn_proteins_fasta_file):\n log.info(\"Creating table of protein sequences ...\")\n self.create_table(self.SequenceTable,self.SequenceFields,\n self.SequenceTypes)\n parser = SeqIO.parse(fn_proteins_fasta_file, \"fasta\")\n data = []\n n_stored = 0\n chunk_size = 1000\n for seq_record in parser:\n description = seq_record.description\n m = re.match(self.protein_record_pattern,description)\n gene_id = m.group(1)\n locus_tag = m.group(2)\n protein_description = m.group(3)\n table_record = [gene_id, locus_tag, protein_description, seq_record.seq.tostring()]\n data.append(table_record)\n # store chunks of data\n if len(data) > chunk_size:\n self.store_data(self.SequenceTable,data)\n n_stored += chunk_size\n log.info(\"Stored %20d sequences\\r\",n_stored)\n data = [] # empty data to avoid using a lot of memory\n # store last chunk\n if len(data) > 0:\n n_stored += len(data)\n self.store_data(self.SequenceTable,data)\n log.info(\"Stored %20d sequences\\r\",n_stored)", "def catAlignments(alignA, alignB):\n\n \"\"\"Create temporary files for MUSCLE to work on the two alignments\"\"\"\n aFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)\n bFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)\n outFn = tempfile.mktemp(prefix='tmp_align', suffix='.fasta', dir=None)\n\n \n \"\"\"Make sure alignments have the same length and are Series objects\"\"\"\n alignA = padAlignment(alignA)\n alignB = padAlignment(alignB)\n\n \"\"\"Put alignments in the tempfiles\"\"\"\n align2fasta(alignA, aFn)\n align2fasta(alignB, bFn)\n\n muscleCommand = ['muscle', '-profile', '-in1', aFn, '-in2', bFn, '-out', outFn]\n result = subprocess.call(muscleCommand)\n\n \"\"\"If MUSCLE was successful\"\"\"\n if not result:\n outAlign = fasta2align(outFn)\n else:\n print(\"Error in MUSCLE!\")\n raise Exception(\"MUSCLEError\")\n \n \"\"\"\n except:\n pass\n os.remove(aFn)\n os.remove(bFn)\n os.remove(outFn)\n raise\n \"\"\"\n \"\"\"Remove the temporary files\"\"\"\n os.remove(aFn)\n os.remove(bFn)\n os.remove(outFn)\n\n return outAlign", "def genPrimerPairs_5Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 5\\' extension half-asstemers')\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[10:12]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_f10 = forwPrimer5_3[:10]\n print(f\"First 10 Nucleotides of forward primer: {forwPrimer_f10}\")\n\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_f10)):\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n\n revPrimer5_3 = revPrimer_f10 + forwPrimer_f10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def write_protein_fasta(args, clusters=None, fasta_dir=None):\n row, concat_fasta_path, frags = args\n dotpath = row[\"path\"]\n phylogeny_dict = {\"prot.idx\": row.name, \"path\": dotpath}\n for phy_prop in [name for name in row.index if name.startswith(\"phy.\")]:\n phylogeny_dict[phy_prop] = row[phy_prop]\n inpath = dotpath_to_path(dotpath)\n prot_info = read_tsv_or_parquet(inpath / PROTEINS_FILE)\n prot_info[\"frag.idx\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.idx\"]\n )\n prot_info[\"frag.is_plas\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.is_plas\"]\n )\n prot_info[\"frag.is_scaf\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.is_scaf\"]\n )\n prot_info[\"frag.is_chr\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.is_chr\"]\n )\n prot_info[\"frag.id\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.id\"]\n )\n # Write out updated protein info\n write_tsv_or_parquet(prot_info, inpath / HOMOLOGY_FILE)\n # include phylogeny info in per-sequence info\n for prop in phylogeny_dict:\n prot_info[prop] = phylogeny_dict[prop]\n # write concatenated sequence info\n if clusters is None:\n fasta_path = concat_fasta_path\n info_to_fasta(None, fasta_path, append=True, infoobj=prot_info)\n else:\n for cluster_id, subframe in clusters.groupby(by=[\"cluster_id\"]):\n cluster_info = prot_info[prot_info.index.isin(subframe[\"members\"])]\n fasta_path = fasta_dir / f\"{cluster_id}.fa\"\n info_to_fasta(None, fasta_path, append=True, infoobj=cluster_info)", "def GetPseudoAAC2(ProteinSequence,lamda=30,weight=0.05,AAP=[_Hydrophobicity,_hydrophilicity]):\n\trightpart=[]\n\tfor i in range(lamda):\n\t\trightpart.append(GetSequenceOrderCorrelationFactor(ProteinSequence,i+1,AAP))\n\t\n\tresult={}\n\ttemp=1+weight*sum(rightpart)\n\tfor index in range(20,20+lamda):\n\t\tresult['PAAC'+str(index+1)]=round(weight*rightpart[index-20]/temp*100,3)\n\t\n\treturn result", "def mafft_multiple_alignment(path, id_protein, output_name):\n\n path_to_templates = path + 'Modeling/cleaned_template_fastas/'\n path_to_target = path + id_protein + '.fasta'\n with open('fastas_for_mafft', 'w') as fastas:\n\n # write target fasta in joint file\n\n target = open(path_to_target)\n for line in target:\n fastas.write(line)\n fastas.write(line)\n target.close()\n\n # write templates fastas in joint file\n\n number_of_fastas = 1 # 1 is for target\n templates = next(os.walk(path_to_templates))[2]\n print(templates)\n for i in templates:\n number_of_fastas += 1\n with open(path_to_templates + i) as template:\n for line in template:\n fastas.write(line)\n path_to_alignment = path + 'Modeling/fasta_alns_and_identities/'\n os.system('mafft --localpair --maxiterate 1000 fastas_for_mafft > ' + path_to_alignment + output_name)\n # os.remove('fastas_for_mafft')\n return number_of_fastas", "def genPrimerPairs_3Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 3\\' extension half-asstemers')\n\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[8:10]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_L10 = forwPrimer5_3[10:]\n print(f\"Last 10 Nucleotides of forward primer: {forwPrimer_L10}\")\n\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_L10[::-1])):\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n\n \"\"\"First 10 Nuc of rev primer must be identical to last 10 Nuc of forward Primer\"\"\"\n revPrimer5_3 = forwPrimer_L10 + revPrimer_L10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def setUp(self):\n self.temp_dir = tempfile.mkdtemp()\n\n self.seqs1 = ('>1\\n'\n 'ACUGCUAGCUAGUAGCGUACGUA\\n'\n '>2\\n'\n 'GCUACGUAGCUAC\\n'\n '>3\\n'\n 'GCGGCUAUUAGAUCGUA\\n')\n self.seqs1_fp = join(self.temp_dir, 'seq1.fa')\n with open(self.seqs1_fp, 'w') as f:\n f.write(self.seqs1)\n self.seqs1_aln = ('>1\\n---acugcuagcuaguagcguacgua\\n'\n '>2\\n------gcuacguagcuac-------\\n'\n '>3\\ngcggcuauuagaucgua---------\\n')\n self.seqs1_aln_fp = join(self.temp_dir, 'seq1_aln.fa')\n with open(self.seqs1_aln_fp, 'w') as f:\n f.write(self.seqs1_aln)\n\n self.seqs2 = ('>a\\nUAGGCUCUGAUAUAAUAGCUCUC\\n'\n '>b\\nUAUCGCUUCGACGAUUCUCUGAUAGAGA\\n'\n '>c\\nUGACUACGCAU\\n')\n self.seqs2_fp = join(self.temp_dir, 'seq2.fa')\n with open(self.seqs2_fp, 'w') as f:\n f.write(self.seqs2)\n\n self.add_seqs_aligned = (\">_seed_1\\n\"\n \"----------acugcuagcuaguagcguacgua\\n\"\n \">_seed_2\\n\"\n \"-------------gcuacguagcuac-------\\n\"\n \">_seed_3\\n\"\n \"-------gcggcuauuagaucgua---------\\n\"\n \">a\\n\"\n \"-------uaggcucugauauaauagcucuc---\\n\"\n \">b\\n\"\n \"uaucgcuucgacgauucucugauagaga-----\\n\"\n \">c\\n\"\n \"-------------------ugacuacgcau---\\n\")\n\n self.align1 = (\">seq_0\\nACUGCUAGCUAGUAGCGUACGUA\\n\"\n \">seq_1\\nGCUACGUAGCUAC----------\\n\"\n \">seq_2\\nGCGGCUAUUAGAU------CGUA\\n\")\n self.align1_fp = join(self.temp_dir, 'align1.fa')\n with open(self.align1_fp, 'w') as f:\n f.write(self.align1)\n self.align2 = (\">a\\nUAGGCUCUGAUAUAAUAGCUCUC---------\\n\"\n \">b\\nUA----UCGCUUCGACGAUUCUCUGAUAGAGA\\n\"\n \">c\\nUG------------ACUACGCAU---------\\n\")\n self.align2_fp = join(self.temp_dir, 'align2.fa')\n with open(self.align2_fp, 'w') as f:\n f.write(self.align2)\n self.align_two_align = (\">seq_0\\n\"\n \"--------------acugcuagcuaguagcguacgua\\n\"\n \">seq_1\\n\"\n \"--------------gcuacguagcuac----------\\n\"\n \">seq_2\\n\"\n \"--------------gcggcuauuagau------cgua\\n\"\n \">a\\n\"\n \"uaggcucugauauaauagcucuc--------------\\n\"\n \">b\\n\"\n \"ua----ucgcuucgacgauucucugauagaga-----\\n\"\n \">c\\n\"\n \"ug------------acuacgcau--------------\\n\")", "def aa(seq):\n global codontable\n seq = seq.upper()\n if codontable is None:\n # TODO: figure out the right place for the pre-computed information here\n bases = ['T', 'C', 'A', 'G']\n codons = [a+b+c for a in bases for b in bases for c in bases]\n codons = codons + list(map(lambda x: x.lower(), codons))\n amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\n amino_acids = amino_acids + amino_acids.lower()\n codontable = dict(zip(codons, amino_acids))\n res = ''\n for i in range(0, len(seq) - 2, 3):\n res += codontable[seq[i:(i+3)]]\n return res", "def test_horizontal_sequence_match(self):\n dna = self._create_dna()\n\n # Existing codon pair\n correct_codon_pair = dna.data[2]\n\n # Another codon pair\n other_pair = self._create_codon_pair()\n\n self.assertFalse(dna.has_sequence(other_pair))\n self.assertTrue(dna.has_sequence(correct_codon_pair))", "def maskPrimers(seq_file, primer_file, mode, align_func, align_args={}, \n max_error=default_max_error, barcode=False,\n out_args=default_out_args, nproc=None, queue_size=None):\n # Define subcommand label dictionary\n cmd_dict = {alignPrimers:'align', scorePrimers:'score'}\n \n # Print parameter info\n log = OrderedDict()\n log['START'] = 'MaskPrimers'\n log['COMMAND'] = cmd_dict.get(align_func, align_func.__name__)\n log['SEQ_FILE'] = os.path.basename(seq_file)\n log['PRIMER_FILE'] = os.path.basename(primer_file)\n log['MODE'] = mode\n log['BARCODE'] = barcode\n log['MAX_ERROR'] = max_error\n if 'start' in align_args: log['START_POS'] = align_args['start']\n if 'max_len' in align_args: log['MAX_LEN'] = align_args['max_len']\n if 'rev_primer' in align_args: log['REV_PRIMER'] = align_args['rev_primer']\n if 'skip_rc' in align_args: log['SKIP_RC'] = align_args['skip_rc']\n if 'gap_penalty' in align_args:\n log['GAP_PENALTY'] = ', '.join([str(x) for x in align_args['gap_penalty']])\n log['NPROC'] = nproc\n printLog(log)\n\n # Create dictionary of primer sequences to pass to maskPrimers\n primers = readPrimerFile(primer_file)\n if 'rev_primer' in align_args and align_args['rev_primer']:\n primers = {k: reverseComplement(v) for k, v in primers.items()}\n\n # Define alignment arguments and compile primers for align mode\n align_args['primers'] = primers \n align_args['score_dict'] = getDNAScoreDict(mask_score=(0, 1), gap_score=(0, 0))\n if align_func is alignPrimers:\n align_args['max_error'] = max_error\n align_args['primers_regex'] = compilePrimers(primers)\n \n # Define sequence masking arguments\n mask_args = {'mode': mode, \n 'barcode': barcode, \n 'delimiter': out_args['delimiter']}\n\n # Define feeder function and arguments\n feed_func = feedSeqQueue\n feed_args = {'seq_file': seq_file}\n # Define worker function and arguments\n work_func = processMPQueue\n work_args = {'align_func': align_func, \n 'align_args': align_args,\n 'mask_args': mask_args,\n 'max_error': max_error}\n \n # Define collector function and arguments\n collect_func = collectSeqQueue\n collect_args = {'seq_file': seq_file,\n 'task_label': 'primers',\n 'out_args': out_args}\n \n # Call process manager\n result = manageProcesses(feed_func, work_func, collect_func, \n feed_args, work_args, collect_args, \n nproc, queue_size)\n\n # Print log\n result['log']['END'] = 'MaskPrimers'\n printLog(result['log'])\n \n return result['out_files']", "def aligned_from_cigar(cigar_text, seq, moltype=DNA):\n if isinstance(seq, str):\n seq = moltype.makeSequence(seq)\n map = cigar_to_map(cigar_text)\n aligned_seq = seq.gappedByMap(map)\n return aligned_seq", "def question2():\n \n # load sequences and scoring matrix\n score_matrix = read_scoring_matrix(PAM50_URL)\n human_seq = \"HSGVNQLGGVFVNGRPLPDSTRQKIVELAHSGARPCDISRILQVSNGCVSKILGRYYETGSIRPRAIGGSKPRVATPEVVSKIAQYKRECPSIFAWEIRDRLLSEGVCTNDNIPSVSSINRVLRNLASEKQQ\"\n frfly_seq = \"HSGVNQLGGVFVGGRPLPDSTRQKIVELAHSGARPCDISRILQVSNGCVSKILGRYYETGSIRPRAIGGSKPRVATAEVVSKISQYKRECPSIFAWEIRDRLLQENVCTNDNIPSVSSINRVLRNLAAQKEQQ\"\n consensus_pax = read_protein(CONSENSUS_PAX_URL)\n \n # compute human and fruitfly global alignment matrix with consensus pax\n human_align_matrix = student.compute_alignment_matrix(human_seq, consensus_pax, score_matrix, True)\n frfly_align_matrix = student.compute_alignment_matrix(frfly_seq, consensus_pax, score_matrix, True)\n \n # compute human and fruitfly global alignment sequences\n score_human, human_align, consensus_align = student.compute_global_alignment(human_seq, consensus_pax, \n score_matrix, human_align_matrix)\n score_fly, frfly_align, consensus_align_2 = student.compute_global_alignment(frfly_seq, consensus_pax,\n score_matrix, frfly_align_matrix)\n \n # compute percentages match for human and fruitfly\n human_count = 0.0\n for index in range(len(human_align)):\n if human_align[index] == consensus_align[index]:\n human_count += 1\n \n frfly_count = 0.0\n for index in range(len(frfly_align)):\n if frfly_align[index] == consensus_align_2[index]:\n frfly_count += 1\n \n print \"% Human: \" + str(human_count / len(human_align) * 100)\n print \"Hmn: \" + human_align\n print \"PAX: \" + consensus_align\n \n print \"\"\n \n print \"% FrFly: \" + str(frfly_count / len(frfly_align) * 100)\n print \"Fly: \" + frfly_align\n print \"PAX: \" + consensus_align_2", "def map_reads(SRA):\n\n #1. bowtie to rRNA\n print(\"Bowtie alignement on contaminant RNA...\")\n cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam'\n output = subprocess.run(cmd_bowtie, shell=True)\n\n # 2. STAR to ref genome\n print(\"STAR alignement to yeast genome...\")\n cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n # 3. Samtools keep uniquely mapped reads and sort\n print(\"Samtools to keep uniquely mapped reads and sort...\")\n cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam'\n output = subprocess.run(cmd_samtools1, shell=True)\n\n cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam'\n output = subprocess.run(cmd_samtools2, shell=True)\n\n cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam'\n output = subprocess.run(cmd_samtools3, shell=True)", "def read_in_file():\n\t# Declare variables\n\treads = []\n\n\t# Get command line arguments\n\targuments = sys.argv\n\targuments_length = len(arguments)\n\n\t# Read file is the first argument\n\tread_file_name = arguments[1]\n\n\t# Process read file \n\tread_file = open(read_file_name, 'r')\n\tfor line in read_file:\n\t\tread_info = line.split()\n\t\tread_string = read_info[2].replace('\\'', '')\n\t\tnew_read = GenerativeRead(read_string, [], read_info[5], read_info[3], None, [], read_info[0], read_info[1], read_info[4]) \n\t\treads.append(new_read)\n\tread_file.close()\n\n\t# Repeat regions file in the second argument\n\trepeat_file_name = arguments[2]\n\n\t# Process repeat file\n\trepeat_file = open(repeat_file_name, 'r')\n\talignments = [[]]\n\talignment_index = -1\n\tprevious_line = ''\n\n\n\tfor line in repeat_file:\n\t\talignment_info = line.split()\n\n\t\t# This consists of a tuple of alignment string, alignment start position and alignment chromosome\n\t\t#new_align = alignment_info[2], alignment_info[4], alignment_info[3]\n\n\t\tnew_align = Alignment(alignment_info[2], None, alignment_info[4], alignment_info[3])\n\n\t\tif previous_line != alignment_info[0]:\n\t\t\t# It is not a repeat\n\t\t\talignment_index = alignment_index + 1\n\t\t\talignments.append([])\n\t\t\tprevious_line = alignment_info[0]\n\n\t\talignments[alignment_index].append(new_align)\n\n\trepeat_file.close()\n\n\t# Associate each read with the other alignments\n\tfor read in reads:\n\t\t# Find the other alignments\n\t\tpos = read.get_position()\n\t\tfound = False\n\t\tfound_index = -1\n\n\t\tfor a_index, alignment_lists in enumerate(alignments):\n\t\t\t# find matching alignments\n\t\t\t# TODO: Don't add alignment already have\n\t\t\t# TODO: Make functional with filter\n\t\t\tfor align in alignment_lists:\n\t\t\t\tif align.get_position() == pos:\n\t\t\t\t\tfound = True\n\t\t\t\t\tfound_index = a_index\n\t\t\t\t\tbreak\n\n\t\t\tif found is True:\n\t\t\t\tbreak\n\n\t\tif found is True:\n\t\t\tfor new_align in alignments[found_index]:\n\t\t\t\tread.add_alignment(new_align)\n\t\t\t\n\n\n\t# SNP files are the remaining ones\n\tsnp_file_names = [arguments[file_id] for file_id in range(3, arguments_length) ]\n\n\t# Process SNP files\n\tfor file_name in snp_file_names:\n\t\tsnp_file = open(file_name, 'r')\n\n\t\tfor line in snp_file:\n\t\t\tsnp_info = line.split()\n\t\t\tsnps = snp_info[3].split('/')\n\t\t\tsnp_pos = int(float(snp_info[2]))\n\n\t\t\t# Ignore alleles that are longer than one base\n\n\t\t\t\n\t\t\tif all(len(x) < 2 for x in snps):\n\n\t\t\t\t# Iterate through reads and determine whether or not it contains this SNP\n\t\t\t\tpos_low = snp_pos - 49\n\t\t\t\n\n\t\t\t\tfor read in reads:\n\t\t\t\t\tpositions = read.get_alignment_positions()\n\n\t\t\t\t\tfor p_index, p in enumerate(positions):\n\t\t\t\t\t\tp = int(float(p))\n\t\t\t\t\t\tif p >= pos_low and p <= snp_pos:\n\t\t\t\t\t\t\t# Get index of snp\n\t\t\t\t\t\t\toffset = snp_pos - p\n\t\t\t\t\t\t\tcalls = [0, 0, 0, 0]\n\t\t\t\t\t\t\tfor snp in snps:\n\t\t\t\t\t\t\t\tcall_index = get_base_num(snp)\n\t\t\t\t\t\t\t\tcalls[call_index] = 1\n\n\t\t\t\t\t\t\t# Add the SNP to the read\n\t\t\t\t\t\t\tread.add_snp(p_index, offset, calls)\n\t\t\t\t\t\t\t\n\t\tsnp_file.close()\n\treturn reads", "def cyclopeptide_sequence(self, spectrum):\n\t# Make dict for faster access\n\tspectrum_dict = dict(zip(spectrum, spectrum))\n # Compute the max weight in spectrum. That has to be the weight of the acid\n max_spectrum_weight = max(spectrum)\n sequences = [\"\"]\n final_sequences = []\n while len(sequences) > 0:\n # Copy sequences to new_seq\n new_seq = copy.deepcopy(sequences)\n sequences = list()\n # Add new amino acid and check for consistency\n for s in new_seq:\n sequence_weight = ProteinTransformer(s).int_weight()\n for a in protein_util.amino_acids_by_weight:\n new_protein = s+a\n new_weight = sequence_weight + protein_util.protein_mass_int[a]\n # Case 1: Check if the new acid is consistent with spectrum\n flag = True\n for i in range(len(s)):\n w = ProteinTransformer(s[i:]+a).int_weight()\n if w not in spectrum_dict:\n flag = False\n break\n if not flag:\n continue\n # Check if new weight attained maximum. If not, just append to pending sequences\n elif new_weight < max_spectrum_weight:\n sequences.append(new_protein)\n # Finally, compare entire new spectrum with given spectrum\n else:\n new_spectrum = ProteinTransformer(new_protein).cyclospectrum()\n if new_spectrum == spectrum:\n final_sequences.append(new_protein)\n \n \n\t# Done. Now return final sequences\n return final_sequences", "def seq_align(string1,string2,mismatch_penalty,gap_penalty):\n\n # define 2x2 matrix\n matrix = []\n for i in range(len(string1)+1):\n if i == 0:\n matrix.append(list([gap_penalty * x for x in range(len(string2)+1)]))\n else:\n matrix.append(list([gap_penalty * i if x == 0 else None for x in range(len(string2)+1)]))\n\n # populate matrix by looping through the strings and finding optimal value for each spot\n for i in range(len(string1)):\n for j in range(len(string2)):\n if string1[i] == string2[j]:\n val1 = 0 + matrix[i][j]\n else:\n val1 = mismatch_penalty + matrix[i][j]\n val2 = gap_penalty + matrix[i][j+1]\n val3 = gap_penalty + matrix[i+1][j]\n min_val = min(val1,val2,val3)\n matrix[i+1][j+1] = min_val\n\n\n # define values to use while retracing\n result_str1 = ''\n result_str2 = ''\n i = len(matrix)-1\n j = len(matrix[0])-1\n\n # trace through matrix to find the optimal character alignment\n while i > 0 and j > 0:\n val1 = matrix[i-1][j-1]\n val2 = matrix[i-1][j]\n val3 = matrix[i][j-1]\n min_val = min(val1,val2,val3)\n if val1 == min_val:\n result_str1 += string1[i-1]\n result_str2 += string2[j-1]\n i -= 1\n j -= 1\n elif val2 == min_val:\n result_str1 += \"-\"\n result_str2 += string2[j-1]\n i -= 1\n else:\n result_str1 += string1[i-1]\n result_str2 += \"-\"\n j -= 1\n\n # for any leftover j values\n if i == 0:\n while j > 0:\n result_str1 += '-'\n result_str2 += string2[j]\n j -=1\n\n # for any leftover i values\n if j == 0:\n while i > 0:\n result_str1 += string1[i]\n result_str2 += \"-\"\n i -= 1\n\n return matrix[len(matrix)-1][len(matrix[0])-1], result_str1[::-1], result_str2[::-1]", "def test_add_seqs_to_alignment(self):\n res = add_seqs_to_alignment(self.seqs2_fp, self.seqs1_aln_fp, RNA)\n self.assertEqual(res.toFasta(), self.add_seqs_aligned)", "def main(argv):\n \n ### gets data from csv, sets variables\n seq1, seq2 = get_seqs('../data/seq.csv')\n \n \n # Assign the longer sequence to s1, and the shorter to s2\n l1, l2 = len(seq1), len(seq2)\n if l1 >= l2:\n s1, s2 = ((l2 - 1) * \".\" + seq1 + (l2 - 1) * \".\"), seq2\n #puts l2-1 \".\"s both sides of l1, allows alignment of all overlap combos\n else:\n s1, s2 = ((l1 - 1) * \".\" + seq2 + (l1 - 1) * \".\"), seq1\n l1, l2 = l2, l1 \n\n # writes alignment(s) with highest score into output file\n my_best_score = -1 #so 0 beats best score\n for i in range(l1 + l2 -1):\n score, matched, shift, end_shift = calculate_score(s1, s2, l1, l2, i)\n #assigns returns from calc_score function to these variables\n if score > my_best_score:\n my_best_score = score\n statement = \"This alignment occurs when the smaller strand (\" + \\\n str(l2) + \"nt in length) attaches from base \" + str(i - l2 + 2) + \\\n \" of the larger strand, with the highest score of \" + str(score) + \\\n \":\\n\"\n #statement explaining the alignment in detail\n best_comparison_highSP = (shift + matched + (l2 - 1) * \".\" + \"\\n\")\n best_comparison_lowSP = (shift + matched + end_shift + \"\\n\")\n best_s2, best_s1 = (shift + s2 + end_shift + \"\\n\"), (s1 + \"\\n\\n\\n\")\n #formats the matching, s1 and s2 lines to line-up neatly\n if i < l1 - 1:\n best_alignment = (str(statement) + str(best_comparison_lowSP) \\\n + str(best_s2) + str(best_s1))\n else:\n best_alignment = (str(statement) + str(best_comparison_highSP) \\\n + str(best_s2) + str(best_s1))\n # uses returned variables to write a statement about the alignment \n # giving its score and startpoint, and assigns 3 lines of alignment \n # (s1, s2 and matching bases) to a variable each for later printing\n f = open('../results/seqs_align.txt', 'w')\n f.write(best_alignment)\n f.close()\n print(\"Done!\")\n return None", "def findmotif(MS_seq, MS_name, ProteomeDict, motif_size):\n MS_seqU = MS_seq.upper()\n try:\n UP_seq = ProteomeDict[MS_name]\n assert MS_seqU in UP_seq, \"check \" + MS_name + \" with seq \" + MS_seq + \". Protein sequence found: \" + UP_seq\n regexPattern = re.compile(MS_seqU)\n MatchObs = list(regexPattern.finditer(UP_seq))\n if \"y\" in MS_seq:\n pY_idx = list(re.compile(\"y\").finditer(MS_seq))\n assert len(pY_idx) != 0\n center_idx = pY_idx[0].start()\n y_idx = center_idx + MatchObs[0].start()\n DoS_idx = None\n if len(pY_idx) > 1:\n DoS_idx = pY_idx[1:]\n assert len(DoS_idx) != 0\n elif \"t\" in MS_seq or \"s\" in MS_seq:\n DoS_idx = list(re.compile(\"y|t|s\").finditer(MS_seq))\n assert len(DoS_idx) != 0\n mappedMotif, pidx = makeMotif(UP_seq, MS_seq, motif_size, y_idx, center_idx, DoS_idx)\n if len(pidx) == 1:\n pos = pidx[0]\n if len(pidx) > 1:\n pos = \";\".join(pidx)\n\n if \"y\" not in MS_seq:\n pTS_idx = list(re.compile(\"t|s\").finditer(MS_seq))\n assert len(pTS_idx) != 0\n center_idx = pTS_idx[0].start()\n ts_idx = center_idx + MatchObs[0].start()\n DoS_idx = None\n if len(pTS_idx) > 1:\n DoS_idx = pTS_idx[1:]\n mappedMotif, pidx = makeMotif(UP_seq, MS_seq, motif_size, ts_idx, center_idx, DoS_idx)\n if len(pidx) == 1:\n pos = pidx[0]\n if len(pidx) > 1:\n pos = \";\".join(pidx)\n\n except BaseException:\n print(MS_name + \" not in ProteomeDict.\")\n raise\n\n return pos, mappedMotif", "def _GetPseudoAAC2(ProteinSequence,lamda=10,weight=0.05):\n\trightpart=[]\n\tfor i in range(lamda):\n\t\trightpart.append(_GetSequenceOrderCorrelationFactor(ProteinSequence,k=i+1))\n\t\n\tresult={}\n\ttemp=1+weight*sum(rightpart)\n\tfor index in range(20,20+lamda):\n\t\tresult['PAAC'+str(index+1)]=round(weight*rightpart[index-20]/temp*100,3)\n\t\n\treturn result", "def assign(dihedrals, pb_ref=REFERENCES):\n pb_seq = \"\"\n # iterate over all residues\n for res in sorted(dihedrals):\n angles = []\n # try to get all eight angles required for PB assignement\n try:\n angles.append(dihedrals[res-2][\"psi\"])\n angles.append(dihedrals[res-1][\"phi\"])\n angles.append(dihedrals[res-1][\"psi\"])\n angles.append(dihedrals[res ][\"phi\"])\n angles.append(dihedrals[res ][\"psi\"])\n angles.append(dihedrals[res+1][\"phi\"])\n angles.append(dihedrals[res+1][\"psi\"])\n angles.append(dihedrals[res+2][\"phi\"])\n # check for bad angles\n # (error while calculating torsion: missing atoms)\n if None in angles:\n pb_seq += \"Z\"\n continue\n\n # cannot get required angles (Nter, Cter or missign residues)\n # -> cannot assign PB\n # jump to next residue\n except KeyError:\n pb_seq += \"Z\"\n continue\n\n # convert to array\n angles = numpy.array(angles)\n\n # compare to reference PB angles\n rmsda_lst = {}\n for block in pb_ref:\n diff = pb_ref[block] - angles\n diff2 = angle_modulo_360_vect(diff)\n rmsda = numpy.sum(diff2**2)\n rmsda_lst[rmsda] = block\n pb_seq += rmsda_lst[min(rmsda_lst)]\n return pb_seq", "def build_seq_data(seq,\n sequence_length,\n initial_primers,\n search_range):\n \n aligned_seq=DNA.make_seq(seq)\n # remove gap characters\n unaligned_seq=str(DNA.make_seq(seq).degap())\n gaps=aligned_seq.gap_maps()\n \n if search_range:\n primer_start = get_corrected_index(seq,int(search_range.split(\":\")[0]))\n primer_end = get_corrected_index(seq,int(search_range.split(\":\")[1]))\n # Correct in case end index is close to the end of the sequence\n if primer_end + sequence_length > len(unaligned_seq):\n primer_end = len(unaligned_seq)-sequence_length+1\n\n else:\n primer_start = 0\n primer_end = len(unaligned_seq)-sequence_length+1\n \n for n in range(primer_start, primer_end):\n seq_slice=unaligned_seq[n:n+sequence_length]\n aligned_index=gaps[0][n]\n unaligned_index=n\n init_key=(seq_slice,aligned_index)\n initial_primers[init_key]=unaligned_index\n \n return initial_primers", "def question1():\n \n # load sequences and scoring matrix\n score_matrix = read_scoring_matrix(PAM50_URL)\n human_eyeless = read_protein(HUMAN_EYELESS_URL)\n fruitfly_eyeless = read_protein(FRUITFLY_EYELESS_URL)\n \n # compute local alignment matrix\n align_matrix = student.compute_alignment_matrix(human_eyeless, fruitfly_eyeless, \n score_matrix, False)\n \n # compute local alignment score and sequences\n score, human_align, fruitfly_align = student.compute_local_alignment(human_eyeless, fruitfly_eyeless,\n score_matrix, align_matrix)\n \n print \"Score: \" + str(score)\n print \"Human: \" + human_align\n print \"FrFly: \" + fruitfly_align\n \n return", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def alignment_org(angle=0.1):\n proposal_id('2023_2', '311564_test')\n yield from alignement_gisaxs_multisample(angle=angle)\n RE.md['ai_0'] = piezo.th.user_setpoint.get()\n proposal_id('2023_2', '311564_Pettersson')", "def translate(self,frame=1):\n if self.seq_type.upper() != \"DNA\":\n return \"Not a DNA sequence (wrong seq_type)\"\n\n aa_output=\"\"\n dna_input=self.seq\n if frame == 0 or frame not in range(-3,4):\n return \"Please specify a correct reading frame number. (+/- 1,2,3)\"\n\n elif frame in range(-3,0):\n dna_input=reverse_complement(dna_input)\n\n for pos in range (abs(frame)-1,len(dna_input)-2,3):\n codon=dna_input[pos:pos+3]\n aa_output += translation_codon(codon)\n\n return myseq(aa_output,\"Protein\")", "def main():\n\n args = get_args()\n seq = args.seq.upper()\n codon_to_aa = {\n 'AAA': 'K',\n 'AAC': 'N',\n 'AAG': 'K',\n 'AAU': 'N',\n 'ACA': 'T',\n 'ACC': 'T',\n 'ACG': 'T',\n 'ACU': 'T',\n 'AGA': 'R',\n 'AGC': 'S',\n 'AGG': 'R',\n 'AGU': 'S',\n 'AUA': 'I',\n 'AUC': 'I',\n 'AUG': 'M',\n 'AUU': 'I',\n 'CAA': 'Q',\n 'CAC': 'H',\n 'CAG': 'Q',\n 'CAU': 'H',\n 'CCA': 'P',\n 'CCC': 'P',\n 'CCG': 'P',\n 'CCU': 'P',\n 'CGA': 'R',\n 'CGC': 'R',\n 'CGG': 'R',\n 'CGU': 'R',\n 'CUA': 'L',\n 'CUC': 'L',\n 'CUG': 'L',\n 'CUU': 'L',\n 'GAA': 'E',\n 'GAC': 'D',\n 'GAG': 'E',\n 'GAU': 'D',\n 'GCA': 'A',\n 'GCC': 'A',\n 'GCG': 'A',\n 'GCU': 'A',\n 'GGA': 'G',\n 'GGC': 'G',\n 'GGG': 'G',\n 'GGU': 'G',\n 'GUA': 'V',\n 'GUC': 'V',\n 'GUG': 'V',\n 'GUU': 'V',\n 'UAA': 'Stop',\n 'UAC': 'Y',\n 'UAG': 'Stop',\n 'UAU': 'Y',\n 'UCA': 'S',\n 'UCC': 'S',\n 'UCG': 'S',\n 'UCU': 'S',\n 'UGA': 'Stop',\n 'UGC': 'C',\n 'UGG': 'W',\n 'UGU': 'C',\n 'UUA': 'L',\n 'UUC': 'F',\n 'UUG': 'L',\n 'UUU': 'F',\n }\n\n k = 3\n\n # 1: for loop\n # protein = ''\n # for codon in [seq[i:i + k] for i in range(0, len(seq), k)]:\n # aa = codon_to_aa.get(codon, '-')\n # if aa == 'Stop':\n # break\n # protein += aa\n\n # 2: list comprehension, slice to remove Stop\n # codons = [seq[i:i + k] for i in range(0, len(seq), k)]\n # aa = [codon_to_aa.get(codon, '-') for codon in codons]\n # if 'Stop' in aa:\n # aa = aa[:aa.index('Stop')]\n # print(''.join(aa))\n\n # 3: L.C. -> map(), slice -> takewhile\n # codons = map(lambda i: seq[i:i + k], range(0, len(seq), k))\n # aa = map(lambda codon: codon_to_aa.get(codon, '-'), codons)\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 4: combine map()\n # aa = map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k)))\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 5: combine all\n # print(''.join(\n # takewhile(\n # lambda c: c != 'Stop',\n # map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k))))))\n\n # 6: Seq\n print(str(Seq(args.seq).translate()).replace('*', ''))", "def _GetPseudoAAC(ProteinSequence,lamda=10,weight=0.05):\n\tres={}\n\tres.update(_GetPseudoAAC1(ProteinSequence,lamda=lamda,weight=weight))\n\tres.update(_GetPseudoAAC2(ProteinSequence,lamda=lamda,weight=weight))\n\treturn res", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def introduce_terminal_gaps(template,aligned_template,aligned_candidate):\n \n # count the 5' gaps in the original aligned template\n original_five_prime_gaps = 0\n for c in template:\n if c == '-':\n original_five_prime_gaps +=1\n else:\n break\n \n # count the 5' gaps already existing in the pairwise aligned template\n # (because we don't need to add these)\n aligned_template_five_prime_gaps = 0\n for c in aligned_template:\n if c == '-':\n aligned_template_five_prime_gaps += 1\n else:\n break\n \n # compute the number of 5' gaps that need to be added to get to the\n # original alignment length\n five_prime_gaps_to_add = \\\n original_five_prime_gaps - aligned_template_five_prime_gaps\n \n # count the 3' gaps in the original aligned template\n original_three_prime_gaps = 0\n for c in reversed(template):\n if c == '-':\n original_three_prime_gaps +=1\n else:\n break\n \n # count the 3' gaps already existing in the pairwise aligned template\n # (because we don't need to add these)\n aligned_template_three_prime_gaps = 0\n for c in reversed(aligned_template):\n if c == '-':\n aligned_template_three_prime_gaps += 1\n else:\n break\n \n # compute the number of 3' gaps that need to be added to get to the\n # original alignment length\n three_prime_gaps_to_add = \\\n original_three_prime_gaps - aligned_template_three_prime_gaps\n\n # return the sequence with the 5' and 3' gaps added\n return DNA.makeSequence(''.join([\\\n '-'*five_prime_gaps_to_add,\\\n str(aligned_candidate),\\\n '-'*three_prime_gaps_to_add]),\\\n Name=aligned_candidate.Name)", "def main(fileToCheck, minLength=-1, maxLength=-1):\n\n # Initialise variables.\n lineCount = 1 # The number of the line being examined. Used for displaying error messages.\n protDescription = True # Whether or not we are currently expecting a line starting with >.\n firstLine = True # Whether or not we are currently examining the first line of the file.\n proteinsInFile = {} # A dictionary indexed by the protein description line of the FASTA file.\n # The value of each entry is the correctly formatted protein sequence corresponding to the index.\n\n # Strip off all excess whitespace, and split the string into the individual lines of the file.\n checking = fileToCheck.rstrip()\n checking = checking.lstrip()\n checking = checking.split('\\n')\n for line in checking:\n line = line.rstrip()\n if firstLine:\n # True if we have just started parsing the file string, and haven;t yet examined any lines.\n if line[0] == '>':\n currentProt = line # Record the description line of the protein which is about to have its sequence inspected.\n currentSeq = '' # Initialise the sequence of the protein.\n protDescription = False # We are now expecting a protein sequence, not a protein description.\n firstLine = False\n else:\n # The first line of the file MUST be a protein description line (i.e. start with '>'). If the line was not\n # the beginning of a protein record, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to start with a >, but instead got: \" + line\n return 1, errorMessage\n elif protDescription:\n # This is true only if a line beginning with a '>' is expected.\n if line[0] == '>':\n # Expected a protein description line, and found a protein description line. This means that the entire sequence\n # of the currentProt protein has been found (i.e. we have finished inspecting the sequence of a protein, and\n # have found the protein to be valid). Now determine if the length of the sequence is within the user\n # specified bounds.\n if minLength == -1:\n if maxLength == -1:\n # If there are no restrictions on the protein sequence length, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n # If there is no minimum length restriction, and the protein sequence is not longer than the maximum\n # sequence length permitted, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) >= minLength:\n if maxLength == -1:\n # If there is no maximum length restriction, and the protein sequence is not shorter than the minimum\n # sequence length permitted, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n # If the protein sequence is not shorter than the minimum sequence length permitted and not longer\n # than the maximum length permitted, then record the protein and its sequence.\n proteinsInFile[currentProt] = currentSeq\n currentProt = line # Record the description line of the protein which is about to have its sequence inspected.\n currentSeq = '' # Initialise the sequence of the protein.\n protDescription = False # We are now expecting a protein sequence, not a protein description.\n else:\n # If the line does not begin with a '>', and it is expected to, it is possible that the amino acid sequence\n # is split over multiple lines.\n if line.isalpha():\n # If every character on the line is a letter, then the line contains a valid portion of the sequence.\n # Add the uppercase version of the sequence portion to the sequence currently being recorded.\n currentSeq += line.upper()\n else:\n # If the line did not contain only letters, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to start with a >, but instead got: \" + line\n return 1, errorMessage\n else:\n # If an amino acid sequence is expected.\n if line.isalpha():\n # If the line is all alphabetic characters, write the line out and indicate that we are expecting a\n # protein description line next (i.e. one beginning with a '>').\n currentSeq += line.upper()\n protDescription = True\n else:\n # If the line did not contain only letters, terminate the program.\n errorMessage = \"Expected line \" + str(lineCount) + \" to contain only letters, but instead got: \" + line\n return 2, errorMessage\n\n lineCount += 1\n\n # Catch the final protein from the file, and determine whether it should be recorded.\n if minLength == -1:\n if maxLength == -1:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) >= minLength:\n if maxLength == -1:\n proteinsInFile[currentProt] = currentSeq\n elif len(currentSeq) <= maxLength:\n proteinsInFile[currentProt] = currentSeq\n\n if len(proteinsInFile.keys()) < 2:\n # There are too few protein sequences entered\n errorMessage = (\"Not enough unique protein sequences have been entered.\" +\n \" This is possibly caused by not enough sequences of the required minimum and maximum length being provided.\"\n )\n return 3, errorMessage\n elif protDescription:\n # Return an indication that the FASTA file is correctly formatted.\n outputString = ''\n for i in proteinsInFile.keys():\n outputString += i + '\\n' + proteinsInFile[i] + '\\n'\n return 0, outputString[:-1]\n else:\n # The file did not end with a protein sequence.\n errorMessage = \"Reached the end of the file, but no protein sequence found for the final protein.\"\n return 3, errorMessage", "def pad_seq_records_for_alignment(seqs: List[SeqLikeType]):\n df = pd.DataFrame({\"seqs\": [SeqLike(seq, seq_type=\"aa\") for seq in seqs]})\n return df.seqs.seq.as_alignment()", "def parse_sam(rows):\n row1, row2 = rows\n mseqs = {}\n failed_list = []\n insert_list = []\n rname = row1['rname']\n qname = row1['qname']\n cigar1 = row1['cigar']\n cigar2 = row2['cigar']\n\n # filtering criteria\n reason = None\n if cigar1 == '*':\n reason = 'R1 unmapped'\n if int(row1['mapq']) < read_mapping_cutoff:\n reason = 'R1 low mapq'\n\n if cigar2 == '*':\n reason = 'R2 unmapped'\n if int(row2['mapq']) < read_mapping_cutoff:\n reason = 'R2 low mapq'\n\n genotype1, genotype2 = None, None\n try:\n genotype1 = row1['rname'].split('-')[1][0]\n genotype2 = row2['rname'].split('-')[1][0]\n except:\n reason = 'discordant map'\n pass\n\n if genotype1 != genotype2:\n reason = 'map conflict'\n\n if reason:\n failed_list.append({'qname': qname,\n 'rname1': row1['rname'],\n 'rname2': row2['rname'],\n 'reason': reason})\n else:\n pos1 = int(row1['pos'])-1 # convert 1-index to 0-index\n _, seq1, qual1, inserts = apply_cigar(cigar1, row1['seq'], row1['qual'])\n \n # report insertions relative to sample consensus\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row1['flag']) else 'R',\n 'refname': rname,\n 'pos': pos1+left,\n 'insert': iseq,\n 'qual': iqual})\n \n seq1 = '-'*pos1 + seq1 # pad sequence on left\n qual1 = '!'*pos1 + qual1 # assign lowest quality to gap prefix so it does not override mate\n \n \n # now process the mate\n pos2 = int(row2['pos'])-1 # convert 1-index to 0-index\n _, seq2, qual2, inserts = apply_cigar(cigar2, row2['seq'], row2['qual'])\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row2['flag']) else 'R',\n 'refname': rname,\n 'pos': pos2+left,\n 'insert': iseq,\n 'qual': iqual})\n seq2 = '-'*pos2 + seq2\n qual2 = '!'*pos2 + qual2\n \n # merge reads\n for qcut in sam2aln_q_cutoffs:\n mseq = merge_pairs(seq1, seq2, qual1, qual2, qcut)\n prop_N = mseq.count('N') / float(len(mseq.strip('-')))\n if prop_N > max_prop_N:\n # fail read pair\n failed_list.append({'qname': qname,\n 'reason': 'merge failure'})\n continue\n mseqs[qcut] = mseq\n\n return rname, mseqs, insert_list, failed_list", "def GetPseudoAAC1(ProteinSequence,lamda=30,weight=0.05,AAP=[_Hydrophobicity,_hydrophilicity]):\n\trightpart=0.0\n\tfor i in range(lamda):\n\t\trightpart=rightpart+GetSequenceOrderCorrelationFactor(ProteinSequence,i+1,AAP)\n\tAAC=GetAAComposition(ProteinSequence)\n\t\n\tresult={}\n\ttemp=1+weight*rightpart\n\tfor index,i in enumerate(AALetter):\n\t\tresult['PAAC'+str(index+1)]=round(AAC[i]/temp,3)\n\t\n\treturn result", "def centerStar_align(refName, dictofSeq):\n dictofFinalStr = {}\n refString = dictofSeq.pop(refName)\n #remove the center sequence from the list of sequence so it won't align to itself\n centerString = refString\n #construct a pointer to center squence\n for name in dictofSeq:\n alignment = sequence_align(centerString, dictofSeq.get(name))\n centerString = alignment[0]\n #print(centerString)\n strAligned = alignment[1]\n #print(strAligned)\n dictofFinalStr[name] = strAligned\n #print(len(listofFinalStr))\n\n for seq in dictofFinalStr:\n #Aligns all the sequence to the final center sequence with all the gaps inserted\n finalScore = gap_align(centerString, dictofFinalStr[seq])\n finalStr = finalScore\n dictofFinalStr[seq] = finalStr\n\n dictofFinalStr[refName] = (centerString)\n return dictofFinalStr", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.inframe_cds_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def filter_aligned_codons(aln):\n\n ind = find_aligned_codons(aln)\n return subalign(aln, ind)", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-P\", dest=\"pdblist\", help=\"pdblist\")\n\tparser.add_option(\"-t\", dest=\"transpose\", help=\"transpose\", action=\"store_true\")\n\tparser.add_option(\"-n\", dest=\"number\", help=\"number\", action=\"store_true\")\n\tparser.add_option(\"-r\", dest=\"range\", help=\"range\")\n\tparser.add_option(\"-s\", dest=\"selection\", help=\"selection\")\n\tparser.set_description(main.__doc__)\n\t(options,args) = parser.parse_args()\n\n\tpdbfiles = []\n\tif options.pdblist:\n\t\tpdbfiles = files_from_list(options.pdblist)\n\telif options.pdbfile:\n\t\tpdbfiles.append(options.pdbfile)\t\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\t\n\tif options.selection:\n\t\tsele = Selection()\n\t\tsele.makeSelection(options.selection)\n\n\tseq_min = 1\n\tseq_max = 1\n\tif options.range:\n\t\t(min,max) = string.split(arg, \"-\")\n\t\tseq_min = int(min)\n\t\tseq_max = int(max)\n\n\tprotein = Molecule()\n\tSeq = \"\"\n\tfor pdb in pdbfiles:\n\t\tprotein.readPDB(pdb)\n\t\tif options.selection:\n\t\t\tnewmol = sele.apply_selection(protein)\n\t\t\tSeq = newmol.sequence()\n\t\telse:\n\t\t\tSeq = protein.sequence()\n\n\t\tif options.range:\n\t\t\tSeq = Seq[seq_min:seq_max]\n\n\t\tif options.transpose:\n\t\t\tfor i in range(len(Seq)):\n\t\t\t\tprint Seq[i]\n\t\telse:\n\t\t\tprint Seq\n\n\t\tprotein.clear()", "def create_protein(self):\n while self.try_protein() == False:\n self.clear_protein()", "def sequence(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['sequence']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n label = \"SEQ\"\n for t in ['C','L']:\n run_label = label+'_'+t\n t1Mag_label = '{0}1MAG'.format(t)\n t2Mag_label = '{0}2MAG'.format(t)\n t3Mag_label = '{0}3MAG'.format(t)\n t1Ang_label = '{0}1ANG'.format(t)\n t2Ang_label = '{0}2ANG'.format(t)\n t3Ang_label = '{0}3ANG'.format(t)\n distillate_label = \"{0}-ALL\".format(t)\n\n # header\n inigen.emit_run_header(run_label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_1Mag_label = t1Mag_label\n dep_1Mag_name = fields['deps'][0]\n dep_1Mag_uuid = self.uuid_map[t1Mag_label]\n\n dep_2Mag_label = t2Mag_label\n dep_2Mag_name = fields['deps'][1]\n dep_2Mag_uuid = self.uuid_map[t2Mag_label]\n\n dep_3Mag_label = t3Mag_label\n dep_3Mag_name = fields['deps'][2]\n dep_3Mag_uuid = self.uuid_map[t3Mag_label]\n\n dep_1Ang_label = t1Ang_label\n dep_1Ang_name = fields['deps'][3]\n dep_1Ang_uuid = self.uuid_map[t1Ang_label]\n\n dep_2Ang_label = t2Ang_label\n dep_2Ang_name = fields['deps'][4]\n dep_2Ang_uuid = self.uuid_map[t2Ang_label]\n\n dep_3Ang_label = t3Ang_label\n dep_3Ang_name = fields['deps'][5]\n dep_3Ang_uuid = self.uuid_map[t3Ang_label]\n \n deps = [[dep_1Mag_label, dep_1Mag_name, dep_1Mag_uuid],\n [dep_2Mag_label, dep_2Mag_name, dep_2Mag_uuid],\n [dep_3Mag_label, dep_3Mag_name, dep_3Mag_uuid],\n [dep_1Ang_label, dep_1Ang_name, dep_1Ang_uuid],\n [dep_2Ang_label, dep_2Ang_name, dep_2Ang_uuid],\n [dep_3Ang_label, dep_3Ang_name, dep_3Ang_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"SEQ\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[\"ZER_{0}ANG\".format(t)] = emitted[-9][-36:]\n output_uuid_map[\"ZER_{0}MAG\".format(t)] = emitted[-8][-36:]\n output_uuid_map[\"POS_{0}ANG\".format(t)] = emitted[-7][-36:]\n output_uuid_map[\"POS_{0}MAG\".format(t)] = emitted[-6][-36:]\n output_uuid_map[\"NEG_{0}ANG\".format(t)] = emitted[-5][-36:]\n output_uuid_map[\"NEG_{0}MAG\".format(t)] = emitted[-4][-36:]\n output_uuid_map[\"UNB_{0}NEG\".format(t)] = emitted[-3][-36:]\n output_uuid_map[\"UNB_{0}ZER\".format(t)] = emitted[-2][-36:]\n\n filename = \"{0}/SEQ_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def _self_align(self):\n logging.info(\"Splitting palindrome.\")\n logging.debug(\"Making reverse complement sequences of reads in \" +\n \"{i} to {o}\".format(i=self.ori_all_reads_fasta,\n o=self.rc_all_reads_fasta))\n num_reads = revcmp_fasta(self.ori_all_reads_fasta,\n self.rc_all_reads_fasta)\n\n reads_per_split = max(1, int(num_reads/self.nproc) + 1)\n logging.debug(\"Splitting {f} to small files each containing {n} reads.\".\n format(f=self.ori_all_reads_fasta, n=reads_per_split))\n fs = FastaSplitter(input_fasta=self.ori_all_reads_fasta,\n reads_per_split=reads_per_split,\n out_dir=self.out_dir,\n out_prefix=\"reads.split.\")\n fs.split()\n sp_fasta_files = fs.out_fns\n\n logging.debug(\"Splitting {f} to smaller files.\".\n format(f=self.rc_all_reads_fasta))\n rc_fs = FastaSplitter(input_fasta=self.rc_all_reads_fasta,\n reads_per_split=reads_per_split,\n out_dir=self.out_dir,\n out_prefix=\"rc_reads.split.\")\n rc_fs.split()\n rc_sp_fasta_files = rc_fs.out_fns\n\n logging.debug(\"Aligning each read in {i} to its revese compelement \" +\n \"read using sdpMatcher.\".format(i=self.ori_all_reads_fasta))\n\n sdps = [\"{f}.sdp\".format(f=f) for f in sp_fasta_files]\n jobs = []\n for f, rc_f, sdp in zip(sp_fasta_files, rc_sp_fasta_files, sdps):\n cmd = \"sdpMatcher {f} {rc_f} \".format(f=f, rc_f=rc_f) + \\\n \"10 -local > {sdp} \".format(sdp=sdp)\n logging.debug(\"CMD: {cmd}\".format(cmd=cmd))\n jobs.append(cmd)\n\n pool = Pool(processes=self.nproc)\n rets = pool.map(backticks, jobs)\n pool.close()\n pool.join()\n\n for i, job in enumerate(jobs):\n if rets[i][1] != 0:\n errMsg = \"Job {j} failed.\".format(j=job) + str(rets[i][2])\n raise RuntimeError(errMsg)\n\n logging.debug(\"Concatenating all sdp outputs to {f}\".\n format(f=self.sdp_out_file))\n cat_files(src=sdps, dst=self.sdp_out_file)\n\n logging.debug(\"Cleaning intermediate fasta & sdp files.\")\n fs.rmOutFNs()\n rc_fs.rmOutFNs()\n\n for f in sdps:\n os.remove(f)", "def GetPseudoAAC(ProteinSequence,lamda=30,weight=0.05,AAP=[_Hydrophobicity,_hydrophilicity]):\n\tres={}\n\tres.update(GetPseudoAAC1(ProteinSequence,lamda,weight,AAP))\n\tres.update(GetPseudoAAC2(ProteinSequence,lamda,weight,AAP))\n\treturn res", "def add_guide_alignment(self):\n test_sam = self.get_signalalign_events(sam=True)\n events = self.get_resegment_basecall()\n cigar_labels = create_labels_from_guide_alignment(events=events, sam_string=test_sam,\n kmer_index=self.kmer_index)\n for i, block in enumerate(cigar_labels):\n # print(block)\n self.aligned_signal.add_label(block, name=\"guide_alignment{}\".format(i), label_type='guide')\n return True", "def find_specific_primer_matches(primers,\n integer_mapped_seq,\n deletion_threshold,\n seq_count,\n sequence_length,\n label,\n unaligned_seq,\n region_slice,\n seq):\n \n primer_len=sequence_length\n overall_length=region_slice+primer_len\n bad_primers=[]\n seq_length=len(integer_mapped_seq)\n \n if len(unaligned_seq)==0:\n raise_(ValueError,('unaligned sequence contains no data.'))\n \n for p in range(len(primers)):\n corrected_index = get_corrected_index(seq,primers[p].aligned_index)\n start_index = corrected_index\n end_index = corrected_index + primer_len\n \n \n # skip test if testing beyond the end of the sequence\n if end_index > seq_length:\n continue\n # Will return all non-zeros with perfect base pair matching\n seq_bitwise = bitwise_and(primers[p].numeric_seq,\n integer_mapped_seq[start_index:end_index])\n if len(seq_bitwise.nonzero()[0])==primer_len:\n primers[p].non_specific_hits +=1\n if primers[p].non_specific_hits>deletion_threshold:\n bad_primers.append(p)\n\n \n del_primers(primers,bad_primers)\n return primers", "def align_two_seqs(template, candidate,\n align_unaligned_seqs_f=muscle_align_unaligned_seqs,\n params={},moltype=DNA):\n # Load the sequences into a form useful to align_unaligned_seq_f\n seqs = [('template',str(template)), ('candidate',str(candidate))]\n # Align the sequences\n aln = align_unaligned_seqs_f(seqs,moltype,params=params)\n # Extract the sequences from the alignment object and return them\n return aln.getGappedSeq('template'), aln.getGappedSeq('candidate')", "def alignment_to_cigar_blocks(ref_aligned, read_aligned):\n expanded_sequence = []\n for ref_char, read_char in zip(ref_aligned, read_aligned):\n if ref_char == '-':\n expanded_sequence.append('I')\n elif read_char == '-':\n expanded_sequence.append('D')\n elif ref_char == read_char:\n #expanded_sequence.append('=')\n expanded_sequence.append('M')\n else:\n #expanded_sequence.append('X')\n expanded_sequence.append('M')\n sequence, counts = utilities.decompose_homopolymer_sequence(expanded_sequence)\n return [[count, char] for char, count in zip(sequence, counts)]", "def coding_strand_to_AA(dna):\n protein=''\n for i in range(0,len(dna),3):\n\t if dna[i:i+3] in aa_table.keys():\n\t \tprotein += aa_table[dna[i:i+3]]\n return protein", "def join(args):\n from jcvi.formats.agp import OO, Phases, build\n from jcvi.formats.sizes import Sizes\n\n p = OptionParser(join.__doc__)\n p.add_option(\"--newid\", default=None, help=\"New sequence ID\")\n p.add_option(\n \"--gapsize\",\n default=100,\n type=\"int\",\n help=\"Number of N's in between the sequences\",\n )\n p.add_option(\"--gaptype\", default=\"contig\", help=\"Gap type to use in the AGP file\")\n p.add_option(\n \"--evidence\", default=\"\", help=\"Linkage evidence to report in the AGP file\"\n )\n p.add_option(\"--oo\", help=\"Use .oo file generated by bambus\")\n opts, args = p.parse_args(args)\n\n nargs = len(args)\n if nargs not in (1, 2):\n sys.exit(not p.print_help())\n\n if nargs == 2:\n fastafile, phasefile = args\n phases = DictFile(phasefile)\n phases = dict((a, Phases[int(b)]) for a, b in phases.items())\n else:\n (fastafile,) = args\n phases = {}\n\n sizes = Sizes(fastafile)\n prefix = fastafile.rsplit(\".\", 1)[0]\n agpfile = prefix + \".agp\"\n newid = opts.newid\n oo = opts.oo\n\n o = OO(oo, sizes.mapping)\n\n if oo:\n seen = o.contigs\n # The leftover contigs not in the oo file\n logging.debug(\n \"A total of {0} contigs ({1} in `{2}`)\".format(len(sizes), len(seen), oo)\n )\n\n for ctg, size in sizes.iter_sizes():\n if ctg in seen:\n continue\n o.add(ctg, ctg, size)\n\n else:\n if newid:\n for ctg, size in sizes.iter_sizes():\n o.add(newid, ctg, size)\n else:\n for scaffold_number, (ctg, size) in enumerate(sizes.iter_sizes()):\n object_id = \"scaffold{0:03d}\".format(scaffold_number + 1)\n o.add(object_id, ctg, size)\n\n fw = open(agpfile, \"w\")\n o.write_AGP(\n fw,\n gapsize=opts.gapsize,\n gaptype=opts.gaptype,\n evidence=opts.evidence,\n phases=phases,\n )\n fw.close()\n\n joinedfastafile = prefix + \".joined.fasta\"\n build([agpfile, fastafile, joinedfastafile])\n\n return joinedfastafile", "def MatchProtNames(ProteomeDict, MS_names, MS_seqs):\n matchedNames, seqs, Xidx = [], [], []\n counter = 0\n for i, MS_seq in enumerate(MS_seqs):\n MS_seqU = MS_seq.upper()\n MS_name = MS_names[i].strip()\n if MS_name in ProteomeDict and MS_seqU in ProteomeDict[MS_name]:\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(MS_name)\n else:\n try:\n newname = getKeysByValue(ProteomeDict, MS_seqU)[0]\n assert MS_seqU in ProteomeDict[newname]\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(newname)\n except BaseException:\n print(MS_name, MS_seqU)\n counter += 1\n continue\n\n assert counter == 0, \"Proteome is missing %s peptides\" % (counter)\n assert len(matchedNames) == len(seqs)\n return matchedNames, seqs, Xidx", "def mrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnalen += int(fields[4]) - int(fields[3]) + 1\n accmatch = re.search(r'accession=([^;\\n]+)', fields[8])\n assert accmatch, 'Unable to parse mRNA accession: %s' % fields[8]\n mrnaacc = accmatch.group(1)\n elif entry.startswith('###'):\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'mature mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n else:\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n values = '%s %d %.3f %.3f %.3f' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0", "def placePeptide(sequence, phiPsis, model=\"scratch\", position=None,\n\t\t\t\t\t\trotlib=None, chainID='A'):\n\n\tif not sequence:\n\t\traise ValueError(\"No sequence supplied\")\n\tsequence = sequence.upper()\n\tif not sequence.isupper():\n\t\traise ValueError(\"Sequence contains non-alphabetic characters\")\n\tfrom chimera.resCode import protein1to3\n\tfor c in sequence:\n\t\tif c not in protein1to3:\n\t\t\traise ValueError(\"Unrecognized protein 1-letter code:\"\n\t\t\t\t\t\t\t\t\" %s\" % c)\n\tif len(sequence) != len(phiPsis):\n\t\traise ValueError(\"Number of phi/psis not equal to\"\n\t\t\t\t\t\t\t\" sequence length\")\n\tif isinstance(model, basestring):\n\t\tmodel = _newModel(model)\n\tneedFocus = False\n\tif position is None:\n\t\tif len(chimera.openModels.list()) == 1:\n\t\t\tneedFocus = True\n\t\txf = model.openState.xform\n\t\tposition = xf.inverse().apply(\n\t\t\t\tPoint(*chimera.viewer.camera.center))\n\tprev = [None] * 3\n\tpos = 1\n\tfrom Midas.addAA import DIST_N_C, DIST_CA_N, DIST_C_CA, DIST_C_O\n\tfrom chimera.molEdit import findPt, addAtom, addDihedralAtom\n\tserialNumber = None\n\tresidues = []\n\tfor c, phiPsi in zip(sequence, phiPsis):\n\t\tphi, psi = phiPsi\n\t\twhile model.findResidue(chimera.MolResId(chainID, pos)):\n\t\t\tpos += 1\n\t\tr = model.newResidue(protein1to3[c], chainID, pos, ' ')\n\t\tresidues.append(r)\n\t\tfor backbone, dist, angle, dihed in (\n\t\t\t\t('N', DIST_N_C, 116.6, psi),\n\t\t\t\t('CA', DIST_CA_N, 121.9, 180.0),\n\t\t\t\t('C', DIST_C_CA, 110.1, phi)):\n\t\t\tif prev[0] == None:\n\t\t\t\tpt = Point(0.0, 0.0, 0.0)\n\t\t\telif prev[1] == None:\n\t\t\t\tpt = Point(dist, 0.0, 0.0)\n\t\t\telif prev[2] == None:\n\t\t\t\tpt = findPt(prev[0].coord(), prev[1].coord(),\n\t\t\t\t\tPoint(0.0, 1.0, 0.0), dist, angle, 0.0)\n\t\t\telse:\n\t\t\t\tpt = findPt(prev[0].coord(), prev[1].coord(),\n\t\t\t\t\tprev[2].coord(), dist, angle, dihed)\n\t\t\ta = addAtom(backbone, Element(backbone[0]), r, pt,\n\t\t\t\tserialNumber=serialNumber, bondedTo=prev[0])\n\t\t\tserialNumber = a.serialNumber + 1\n\t\t\tprev = [a] + prev[:2]\n\t\to = addDihedralAtom(\"O\", Element(\"O\"), prev[0], prev[1],\n\t\t\tprev[2], DIST_C_O, 120.4, 180.0 + psi, bonded=True)\n\t# C terminus O/OXT at different angle than mainchain O\n\tmodel.deleteAtom(o)\n\taddDihedralAtom(\"O\", Element(\"O\"), prev[0], prev[1],\n\t\t\tprev[2], DIST_C_O, 117.0, 180.0 + psi, bonded=True)\n\taddDihedralAtom(\"OXT\", Element(\"O\"), prev[0], prev[1], prev[2],\n\t\t\t\t\tDIST_C_O, 117.0, psi, bonded=True)\n\tfrom Rotamers import useBestRotamers\n\t# have to process one by one, otherwise side-chain clashes will occur\n\tkw = {}\n\tif rotlib:\n\t\tkw['lib'] = rotlib\n\tfor r in residues:\n\t\tuseBestRotamers(\"same\", [r], criteria=\"cp\", log=False, **kw)\n\t\t\t\t\n\t# find peptide center\n\tcoords = []\n\tfor r in residues:\n\t\tcoords.extend([a.coord() for a in r.atoms])\n\tcenter = Point(coords)\n\tcorrection = position - center\n\tfor r in residues:\n\t\tfor a in r.atoms:\n\t\t\ta.setCoord(a.coord() + correction)\n\tfrom Midas import ksdssp\n\tksdssp([model])\n\tif needFocus:\n\t\tchimera.runCommand(\"focus\")\n\treturn residues", "def guess_align(aln):\n \n if \"pep\" in [guess_seq(seq) for seq in aln.itervalues()]:\n return \"pep\"\n else:\n return \"dna\"", "def coding_strand_to_AA(dna):\n #inital conditions\n protein = ''\n i = 0\n\n #for the length of DNA, translate each codon in an ORF to an amino acid\n while i < (len(dna)-2):\n codon = dna[i:i+3] \n amino_acid = aa_table[codon]\n protein= protein + amino_acid\n i += 3\n\n #return the string of amino acids\n return protein", "def ComputeDistMatrix(dict_alignedSequences):\r\n \r\n # check if dictionary with keys as tuples containing integers and values as tuples containing strings\r\n check = True \r\n #1 Check Input is dict\r\n if isinstance(dict_alignedSequences, dict) == False:\r\n check = False\r\n \r\n #2 Check are the keys and values tuples. Do the keys only contain integers and the vlaues only strings\r\n i = 0\r\n while len(dict_alignedSequences) > i:\r\n #checking for keys and values as tuples\r\n if isinstance(list(dict_alignedSequences.keys())[i], tuple) == False or isinstance(list(dict_alignedSequences.values())[i], tuple) == False:\r\n check = False\r\n break\r\n #checking keys for integers\r\n if isinstance(list(dict_alignedSequences.keys())[i][0], int) == False or isinstance(list(dict_alignedSequences.keys())[i][1], int) == False:\r\n check = False\r\n break\r\n #checking values for strings\r\n if isinstance(list(dict_alignedSequences.values())[i][0], str) == False or isinstance(list(dict_alignedSequences.values())[i][1], str) == False:\r\n check = False\r\n break\r\n \r\n #increment the counter for while loop\r\n i += 1\r\n \r\n #3 Check sequences contain aligned DNA and are of equal length\r\n for key in dict_alignedSequences:\r\n if is_aligned_dna(dict_alignedSequences[key][0]) == False or is_aligned_dna(dict_alignedSequences[key][1]) == False:\r\n check = False\r\n break\r\n if len(dict_alignedSequences[key][0]) != len(dict_alignedSequences[key][1]):\r\n check = False\r\n break\r\n \r\n #final evalauation if data is usable\r\n if check == False:\r\n raise TypeError ('malformed input')\r\n \r\n #get number of sequences\r\n matrixdim = howmany_sequences(dict_alignedSequences)\r\n #initialize dist matrix\r\n distMatrix = init_Dist_Matrix(matrixdim)\r\n \r\n \r\n for i in dict_alignedSequences.keys():\r\n # useing the key i to get the corisponding aligned sequences \r\n seq = dict_alignedSequences[i]\r\n #calculate distances between the sequences\r\n distance = calculate_distance(seq[0],seq[1])\r\n #markdown result at the corrsiponding place in the distmatrix\r\n distMatrix[i[0]][i[1]] = distance\r\n distMatrix[i[1]][i[0]] = distance\r\n \r\n return(distMatrix)", "def complementary_seq(self):\n if not self.data['DNAseq']:\n self.complement_seq_var.set(0)\n self.warning('No DNA sequence loaded','You have to load a DNA sequence first')\n return\n compl={'A':'T','T':'A','C':'G','G':'C'}\n comDNA=''\n for base in self.data['DNAseq']:\n comDNA=comDNA+compl[base]\n self.data['DNAseq']=comDNA\n\n # Update\n self.update_sequence_window()\n return", "def process(self, name_and_sequence):\n name, sequence = name_and_sequence\n yield preprocess_utils.get_pacbio_molecule_name(name), sequence", "def BLAST_alignment(species, index_query, index_alignment, index_identity, prot):\n alignments = {}\n seq_id = []\n boo = True\n with open(blastpPath + '/BLAST_%s_mouse' % species) as f:\n for line in f:\n if boo:\n if line[0] != '#':\n query = re.split(\"\\||\\t\", line)[index_query]\n iden = float(re.split(\"\\||\\t\", line)[index_identity])\n if query in prot:\n seq_id.append(iden)\n boo = False\n if line[0] == '#':\n boo = True\n\n return np.array(seq_id)", "def parse_protein_sequences(protein_seq_files):\n all_sequences_objs = []\n for fasta in protein_seq_files:\n with open(fasta, 'r') as f:\n sequences_in_fasta = SeqIO.parse(f, 'fasta', IUPAC.protein)\n for sequence_obj in sequences_in_fasta:\n all_sequences_objs.append(sequence_obj)\n return all_sequences_objs", "def transcript_sequence(species,aceVersion,log=0):\n \n os.chdir(os.environ['PYDATA']+'/%s/log'%species)\n logFile=open('%s_ace_transcripts.txt'%species,'w')\n t1=time.time()\n #create ace transcript_sequence\n path=os.environ['PYDATA']+\"/\"+species+\"/aceview/\"+species+\"_transcript_sequence.bkdb\"\n if os.path.exists(path):\n os.remove(path)\n transcriptDB=bsddb.btopen(path,'w')\n \n #test if mRNAs sequences are in one file or in several chromosome files\n try:\n sequenceFile = open('%s/%s_%s/AceView.ncbi_37.all_mrnas_dna.fasta' %(os.environ['ACEDATA'],species,aceVersion.lower()),'r')\n chrFlag=0 \n except: \n chrFlag=1 \n \n if chrFlag: \n #open database for relation between chromosome and Ensembl region\n path=os.environ['PYDATA']+'/'+species+'/ensembl/'+species+'_region_by_chromosome.bkdb'\n chrDB=bsddb.btopen(path,'r')\n chromosomes=chrDB.keys()\n tscriptNb=0 \n for chromosome in chromosomes:\n print 'processing chromosome: '+chromosome\n try: \n sequenceFile = open('%s/%s_%s/x1.all_mrnas_fasta.%s.fasta' %(os.environ['ACEDATA'],species,aceVersion.lower(),chromosome),'r') \n region=chrDB[chromosome] \n geneName='' \n transcriptName=''\n sequence='' \n for lines in sequenceFile:\n tscriptNb=tscriptNb+1 \n line = lines.split('\\n')[0]\n if not line:\n #save last transcript\n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,chromosome=chromosome,region=region,sequence=sequence),protocol=-1)\n break\n # get some informations \n if line[0]=='>': \n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,chromosome=chromosome,region=region,sequence=sequence),protocol=-1)\n transcriptName = line.split(':')[1] \n shortName=transcriptName.split(aceVersion)[0] \n transcriptLetter=shortName.split('.')[-1]\n geneName=shortName.split('.'+transcriptLetter)[0] \n sequence='' \n else:\n # Construct sequence\n sequence=sequence+line\n except:\n logFile.write('no AceView files %s/x1.all_mrnas_fasta.%s.fasta' %(os.environ['ACEDATA'],chromosome)) \n transcriptDB.close()\n chrDB.close()\n else: \n tscriptNb=0 \n sequenceFile = open('%s/%s_%s/AceView.ncbi_37.all_mrnas_dna.fasta' %(os.environ['ACEDATA'],species,aceVersion.lower()),'r') \n geneName='' \n transcriptName=''\n sequence='' \n for lines in sequenceFile:\n tscriptNb=tscriptNb+1 \n line = lines.split('\\n')[0]\n if not line:\n #save last transcript\n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,sequence=sequence),protocol=-1)\n break\n # get some informations \n if line[0]=='>': \n if geneName != '': \n #save previous transcript\n transcriptDB[transcriptName]=cPickle.dumps(ps_class.Transcript(ID=transcriptName,geneID=geneName,sequence=sequence),protocol=-1)\n transcriptName = line.split(':')[1] \n shortName=transcriptName.split(aceVersion)[0] \n transcriptLetter=shortName.split('.')[-1]\n geneName=shortName.split('.'+transcriptLetter)[0] \n sequence='' \n else:\n # Construct sequence\n sequence=sequence+line \n transcriptDB.close()\n \n t2=time.time()\n if log!=0:\n log.write('\\t%u\\t%.2f\\n'%(tscriptNb,t2-t1))", "def pairs(sequences, ordering = None, material = 'rna',\n dangles = 'some', T = 37, multi = True, pseudo = False,\n sodium = 1.0, magnesium = 0.0, cutoff = 0.001):\n \n ## Set up command-line arguments and input\n args, cmd_input = \\\n setup_nupack_input(exec_name = 'pairs', sequences = sequences, ordering = ordering,\n material = material, sodium = sodium, magnesium = magnesium,\n dangles = dangles, T = T, multi = multi, pseudo = pseudo)\n if multi:\n suffix = '.epairs'\n else:\n suffix = '.ppairs'\n \n ## Perform call\n output = call_with_file(args, cmd_input, suffix)\n\n ## Parse and return output\n pair_probs = []\n for l in filter(lambda x: x[0].isdigit(), output):\n if len(l.split()) > 1:\n i,j,p = l.split()\n pair_probs.append(tuple( (int(i),int(j),float(p)) ))\n\n return pair_probs" ]
[ "0.6774844", "0.6548914", "0.6453903", "0.63102025", "0.62329227", "0.62122506", "0.62016445", "0.6198148", "0.61716217", "0.61462194", "0.6104765", "0.60956174", "0.6076317", "0.605018", "0.6017549", "0.60125715", "0.60021424", "0.5987413", "0.598054", "0.59300274", "0.5926972", "0.5923603", "0.5923525", "0.5904142", "0.5877252", "0.5852384", "0.5849563", "0.5846382", "0.58281505", "0.5818985", "0.58177155", "0.5814012", "0.58044374", "0.5801504", "0.5797758", "0.57968014", "0.5768446", "0.5749357", "0.57410663", "0.57282484", "0.5726137", "0.5720364", "0.5720266", "0.57198507", "0.5715218", "0.57121235", "0.5701708", "0.56947386", "0.56853294", "0.5682476", "0.567762", "0.5670653", "0.5668629", "0.5657969", "0.5646472", "0.5645912", "0.5645641", "0.5645641", "0.5645641", "0.5645641", "0.5645641", "0.5645641", "0.5645641", "0.5645641", "0.5645641", "0.5644958", "0.564483", "0.5640268", "0.5617777", "0.56160605", "0.5613074", "0.561236", "0.56121105", "0.5612", "0.55966663", "0.5591083", "0.5588052", "0.5585981", "0.558192", "0.5581004", "0.5579552", "0.55723333", "0.55694443", "0.55676913", "0.5565572", "0.5561327", "0.5553347", "0.55444664", "0.5543364", "0.5541945", "0.5540163", "0.55242795", "0.55170834", "0.55118287", "0.5504488", "0.550084", "0.55001444", "0.5498834", "0.5497105", "0.5497092", "0.5494881" ]
0.0
-1
Aligns to proteins with BioPython
def proteins_align(self, protein_a, protein_b): # Set variables first = Seq(self.proteins_dict[protein_a]["protein"]) second = Seq(self.proteins_dict[protein_b]["protein"]) # Align proteins align = pairwise2.align.globalxx(first, second, one_alignment_only=True) aligned_a = align[0].seqA aligned_b = align[0].seqB # Calculate shared string shared = self.shared(aligned_a, aligned_b) # Returns dictionary of shared terms return {protein_a: aligned_a, protein_b: aligned_b, "shared": shared, "shared_count": Counter([x for x in shared.split("-") if x != ""]), "percent_simalarity": align[0].score / len(align[0].seqA), "score": align[0].score, "levenshtein_distance": l_dist(str(first), str(second))}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _annotate(reads, mirbase_ref, precursors):\n for r in reads:\n for p in reads[r].precursors:\n start = reads[r].precursors[p].start + 1 # convert to 1base\n end = start + len(reads[r].sequence)\n for mature in mirbase_ref[p]:\n mi = mirbase_ref[p][mature]\n is_iso = _coord(reads[r].sequence, start, mi, precursors[p], reads[r].precursors[p])\n logger.debug((\"{r} {p} {start} {is_iso} {mature} {mi} {mature_s}\").format(s=reads[r].sequence, mature_s=precursors[p][mi[0]-1:mi[1]], **locals()))\n if is_iso:\n reads[r].precursors[p].mirna = mature\n break\n return reads", "def Translate(self):\n dna_to_protein = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',\n }\n \n length = self.length\n reading = {}\n for i in range(3):\n reading['frame_'+str(i+1)] = tuple([dna_to_protein[self.sequence[index:index+3]] for index in range(i,length-2,3)])\n reverse_strand = Analyze_DNA_Sequence.Complementary(self,'5-3')\n for i in range(3):\n reading['frame_'+str(i+4)] = tuple([dna_to_protein[reverse_strand[index:index+3]] for index in range(i,length-2,3)])\n\n return reading", "def process_align(self):\n\t\tstm_t_dict = self._process_recog()\n\t\ttrans_t_dict = self._process_trans()\n\t\talign_obj = viterbi_align(stm_t_dict, trans_t_dict, self.label, self.pair_file_path)\n\t\tself.trans_t_dict = align_obj.viterbi(0, len(stm_t_dict)-1, 0, len(trans_t_dict)-1)", "def add_guide_alignment(self):\n test_sam = self.get_signalalign_events(sam=True)\n events = self.get_resegment_basecall()\n cigar_labels = create_labels_from_guide_alignment(events=events, sam_string=test_sam,\n kmer_index=self.kmer_index)\n for i, block in enumerate(cigar_labels):\n # print(block)\n self.aligned_signal.add_label(block, name=\"guide_alignment{}\".format(i), label_type='guide')\n return True", "def filter_aligned_codons(aln):\n\n ind = find_aligned_codons(aln)\n return subalign(aln, ind)", "def consensusCalling(spot, args):\n def readTrim(read, start, end):\n \"\"\"\n Trims a pysam.AlignedRead to only include the sequence that's aligned (or should be aligned)\n between start and end on reference\n returns the sequence and quality\n \"\"\"\n score = 0\n if not read.is_unmapped:\n regTrim = 0\n upS = read.cigar[0][1] if read.cigar[0][0] == 4 else 0\n dnS = read.cigar[-1][1] if read.cigar[-1][0] == 4 else 0\n \n trimS = None\n trimE = None\n if start > read.pos:\n for queryPos, targetPos in read.aligned_pairs:\n if trimS is None and targetPos >= start:\n trimS = queryPos\n else:\n score += abs(read.pos - start)\n if end < read.aend:\n for queryPos, targetPos in read.aligned_pairs[::-1]:\n if trimE is None and targetPos <= end:\n trimE = queryPos\n else:\n score += abs(read.aend-end)\n \n if trimS is not None:\n trimS = max(0, trimS) + upS\n else:\n trimS = 0\n \n if trimE is not None:\n trimE = min(len(read.seq), trimE) - dnS\n else:\n trimE = len(read.seq)\n seq = read.seq[trimS:trimE]\n qual = read.qual[trimS:trimE]\n if not read.is_reverse:\n seq = seq.translate(revComp)[::-1]\n qual = qual[::-1]\n \n return seq, qual\n \n #END readTrim\n \n chrom, start, end = spot.chrom, spot.start, spot.end\n buffer = args.buffer\n bam = args.bam\n #work\n supportReads = []\n spanReads = []\n #Fetch reads and trim\n totCnt = 0\n for read in bam.fetch(chrom, start-buffer, end+buffer):\n seq, qual = readTrim(read, start-buffer, end+buffer)\n if read.pos < start-300 and read.aend > end+300:\n spanReads.append((len(seq), seq, qual))\n else:\n supportReads.append((seq, qual))\n totCnt += 1\n \n if len(spanReads) == 0:\n logging.info(\"noone spans - consensus aborted. %s\" % (str(spot)))\n spot.tags[\"noSpan\"] = True\n return [spot]\n \n spanReads.sort(reverse=True)\n refread = spanReads[0]\n logging.debug(\"%d reads %d support\" % (totCnt, len(supportReads)))\n supportReads.extend([(x[1], x[2]) for x in spanReads[1:]])\n #read that spans most of the region goes first\n #use the rest for cleaning\n \n #building consensus sequence\n foutreads = NamedTemporaryFile(suffix=\".fastq\")\n for id, i in enumerate(supportReads):\n foutreads.write(\"@%d\\n%s\\n+\\n%s\\n\" % (id, i[0], i[1]))\n foutreads.flush()\n \n foutref = NamedTemporaryFile(suffix=\".fasta\")\n foutref.write(\">%s:%d-%d\\n%s\"%(\"ecoli\", start, end, refread[1]))\n foutref.flush()\n \n alignOut = NamedTemporaryFile(suffix=\".m5\")\n \n blasr(foutreads.name, foutref.name, bestn=1, nproc=1, outname=alignOut.name)\n #shutil.copyfile(foutreads.name, \"sup.fastq\")\n #shutil.copyfile(foutref.name, \"base.fasta\")\n #shutil.copyfile(alignOut.name, \"align.m5\")\n if not args.pbdagcon:\n aligns = M5File(alignOut.name)\n con = \">con\\n%s\\n\" % consensus(aligns).sequence\n else:\n logging.debug(\"pbdagcon\")\n r, con, e = exe(\"pbdagcon -m 25 -c 1 -t 0 %s\" % (alignOut.name))\n logging.debug(str(r) + \" - \" + str(e))\n con = con[con.index(\"\\n\")+1:]\n logging.debug(\"MySeq: \" + con)\n #Check if con is blank\n \n conOut = NamedTemporaryFile(suffix=\".fasta\")\n conOut.write(con)\n conOut.flush()\n refOut = NamedTemporaryFile(suffix=\".fasta\")\n refOut.write(\">%s:%d-%d\\n%s\\n\" % (chrom, start, end, \\\n args.reference.fetch(chrom, start-buffer, end+buffer)))\n refOut.flush()\n \n #map consensus to refregion\n varSam = NamedTemporaryFile(suffix=\".sam\")\n cmd = \"blasr %s %s -sam -bestn 1 -affineAlign -out %s\" % (conOut.name, refOut.name, varSam.name)\n logging.debug(cmd)\n logging.debug(exe(cmd))\n \n foutreads.close()\n foutref.close()\n alignOut.close()\n\n #convert sam to bam\n input = pysam.Samfile(varSam.name)\n varBam = NamedTemporaryFile(suffix=\".bam\")\n output = pysam.Samfile(varBam.name, 'wb', template=input)\n nReads = 0\n for read in input:\n output.write(read)\n nReads += 1\n logging.info(\"%d consensus reads created\" % (nReads))\n varSam.close()\n input.close()\n output.close()\n \n #do pileup for sequence\n pysam.sort(varBam.name, varBam.name[:-4])\n pysam.index(varBam.name)\n bam = pysam.Samfile(varBam.name, 'rb')\n \n mySpots = []\n for pos in bam.pileup():\n size = pos.pileups[0].indel\n if abs(size) < args.minIndelSize or size == 0:\n continue\n newspot = copy.deepcopy(spot)\n if size > 0:\n newspot.start = pos.pos + start - buffer\n newspot.end = pos.pos + start - buffer\n align = pos.pileups[0]\n newspot.tags[\"seq\"] = align.alignment.seq[align.qpos : align.qpos + align.indel]\n newspot.size = size\n newspot.tags[\"label\"] = \"INS\"\n mySpots.append(newspot)\n elif size < 0:\n newspot.start = pos.pos + start - buffer\n newspot.end = pos.pos + abs(size) + start - buffer\n #newspot.tags[\"seq\"] = args.reference.fetch(chrom, pos.pos, pos.pos + abs(size))\n newspot.size = -size\n newspot.tags[\"label\"] = \"DEL\"\n mySpots.append(newspot)\n bam.close()\n varBam.close()\n logging.debug(\"%d spots found\" % (len(mySpots)))\n return mySpots", "def align(args) :\n from aligner import align_reads\n align_reads(args)", "def align(self):\n ...", "def alignment_org(angle=0.1):\n proposal_id('2023_2', '311564_test')\n yield from alignement_gisaxs_multisample(angle=angle)\n RE.md['ai_0'] = piezo.th.user_setpoint.get()\n proposal_id('2023_2', '311564_Pettersson')", "def clustal_align_protein(rec_1, rec_2, work_dir):\n fasta_file = op.join(work_dir, \"prot-start.fasta\")\n align_file = op.join(work_dir, \"prot.aln\")\n SeqIO.write((rec_1, rec_2), file(fasta_file, \"w\"), \"fasta\")\n\n clustal_cl = ClustalwCommandline(CLUSTALW_BIN(\"clustalw2\"),\n infile=fasta_file, outfile=align_file, outorder=\"INPUT\",\n type=\"PROTEIN\")\n stdout, stderr = clustal_cl()\n\n aln_file = file(clustal_cl.outfile)\n alignment = AlignIO.read(aln_file, \"clustal\")\n print >>sys.stderr, \"\\tDoing clustalw alignment: %s\" % clustal_cl\n return alignment.format(\"fasta\")", "def MapAlignment(entry, map_a2b):\n\n is_positive = entry.mSbjctStrand == \"+\"\n\n if is_positive:\n sbjct_pos = entry.mSbjctGenomeFrom + 1\n else:\n # no -1, as it starts on the residue\n sbjct_pos = map_a2b.getRowTo() - entry.mSbjctGenomeFrom\n\n last_mapped_pos = map_a2b.mapRowToCol(sbjct_pos)\n\n if last_mapped_pos == 0:\n raise ValueError, \"unmappable starting residue %i\" % sbjct_pos\n\n new_alignment = []\n\n if is_positive:\n entry.mSbjctGenomeFrom = last_mapped_pos - 1\n else:\n entry.mSbjctGenomeFrom = map_a2b.getColTo() - last_mapped_pos\n\n total_d = 0\n for state, l_query, l_sbjct in entry.mMapPeptide2Genome[:-1]:\n\n if is_positive:\n sbjct_pos += l_sbjct\n else:\n sbjct_pos -= l_sbjct\n\n mapped_pos = map_a2b.mapRowToCol(sbjct_pos)\n\n if mapped_pos == 0:\n for x in 1, 2:\n if map_a2b.mapRowToCol(sbjct_pos + x):\n sbjct_pos += x\n mapped_pos = map_a2b.mapRowToCol(sbjct_pos)\n break\n else:\n raise ValueError, \"unmappable residue %i\" % sbjct_pos\n\n d = abs(mapped_pos - last_mapped_pos)\n total_d += d\n new_alignment.append((state, l_query, d))\n\n last_mapped_pos = mapped_pos\n\n state, l_query, l_sbjct = entry.mMapPeptide2Genome[-1]\n\n # process last state, map only to last residue\n if is_positive:\n sbjct_pos += l_sbjct - 1\n else:\n sbjct_pos -= l_sbjct - 1\n\n mapped_pos = map_a2b.mapRowToCol(sbjct_pos)\n\n if mapped_pos == 0:\n raise ValueError, \"unmappable residue %i\" % sbjct_pos\n\n d = abs(mapped_pos - last_mapped_pos) + 1\n total_d += d\n\n new_alignment.append((state, l_query, d))\n\n entry.mSbjctGenomeTo = entry.mSbjctGenomeFrom + total_d\n\n entry.mMapPeptide2Genome = new_alignment", "def BLAST_alignment(species, index_query, index_alignment, index_identity, prot):\n alignments = {}\n seq_id = []\n boo = True\n with open(blastpPath + '/BLAST_%s_mouse' % species) as f:\n for line in f:\n if boo:\n if line[0] != '#':\n query = re.split(\"\\||\\t\", line)[index_query]\n iden = float(re.split(\"\\||\\t\", line)[index_identity])\n if query in prot:\n seq_id.append(iden)\n boo = False\n if line[0] == '#':\n boo = True\n\n return np.array(seq_id)", "def posprocess(x):\n invalid = [\"nan\",\"None\",\"\"] # Caracteres inválidos\n\n x['hg19_chr'] = x['hg19_chr'].replace(23,\"X\") # Remplazamos el número 23 por la X\n bases_inv = {v: k for k, v in bases.items()} # Creamos un diccionario inverso para las bases\n x['ref'] = x['ref'].replace(bases_inv) # Sustituimos el código de las bases por las bases de referencia\n x['alt'] = x['alt'].replace(bases_inv) # Sustituimos el código de las bases por las bases alternativas\n if 'aaref' in x.keys() and 'aaalt' in x.keys():\n amino_inv = {v: k for k, v in aminoacids.items()}\n x['aaref'] = x['aaref'].replace(amino_inv)\n x['aaalt'] = x['aaalt'].replace(amino_inv)\n for e in invalid:\n if 'aaref' in x.keys() and 'aaalt' in x.keys():\n x['aaref'] = x['aaref'].replace(e,\"Sin datos\")\n x['aaalt'] = x['aaalt'].replace(e,\"Sin datos\")\n x.fillna(\"Sin datos\")\n x['code'] = x['prediction'].apply(lambda y: code_class[str(y)])\n x['prediction'] = x['prediction'].apply(lambda y: prediction[y])\n x = x.rename(columns={\"hg19_pos(1-based)\":\"hg19_pos\"})\n x['ClinPred_Score'] = x['ClinPred_Score'].round(6)\n x['BayesDel_addAF_score'] = x['BayesDel_addAF_score'].round(6)\n x['BayesDel_noAF_score'] = x['BayesDel_noAF_score'].round(6)\n return x", "def back_translate(aln_file, seqdict):\n aln = SeqIO.parse(aln_file.name, 'fasta')\n bt_seq = []\n for prot_seq in aln:\n codon = 0\n bt = ''\n nuc = seqdict[prot_seq.id]\n for aa in prot_seq:\n if aa == '-':\n bt += '---'\n else:\n bt += nuc[codon*3:(codon*3)+3]\n codon += 1\n bt_seq.append(bt)\n return bt_seq", "def align(self):\n flag=0\n input=None\n level=None\n board=None\n ainps={'L0':[],'L1':[],'L2':[],'H0':[]} \n for i in self.inputs:\n if(i.inputnumber.var.get() == 1):\n if i.inpnumall == rareradio:\n input=i.inpnum\n level=i.level\n board=i.board\n print 'Rare chosen:',level,' ',input\n ainps[i.level].append(i.inpnum)\n flag=flag+1\n #print 'ainps:',ainps \n if flag < 2 :\n print \"Align: less then 2 inputs chosen. \" \n return\n if input==None:\n cmd=\"setRareFlag(0,0,0)\"\n else:\n mode='0'\n if level == 'H0': mode = '1'\n cmd=\"setRareFlag(\"+board+','+input+','+mode+\")\"\n print \"seting rare: \",cmd\n output=self.vb.io.execute(cmd,log=\"yes\",applout=\"<>\") \n self.align=Corel(self.vb,ainps)\n self.align.croscor()", "def bioinfo():\n\n pass", "def align(aligner, reads):\n counter = 0\n for read in SeqIO.parse(reads, \"fasta\"): \n try:\n alignInfo = next(aligner.map(str(read.seq)))\n print(alignInfo) \n except StopIteration:\n print(read.format(\"fasta\"), end='')", "def initiate(self, DNA, Pol, Hel):", "def align_preprocessed(self, img):\n aligner = FaceAligner(self.args.wing_path, self.args.lm_path, self.args.img_size)\n return aligner.align(img)", "def ProteinRead(pdb_file, Include_dAA = True, IncludeWATER = False):\n # structure from input file or fetched if not present\n if(pdb_file[-4:] == '.pdb' or pdb_file[-3:] == '.gz'):\n ppdb = PandasPdb().read_pdb(pdb_file)\n else:\n ppdb = PandasPdb().fetch_pdb(pdb_file)\n \n # lists for standard and d-AA used to save structure to dataset \n standardAA = ['ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']\n d_AA = ['DAL','DAR','DSG','DAS','DCY','DGN','DGL','GLY','DHI','DIL','DLE','DLY','MED','DPN','DPR','DSN','DTH','DTR','DTY','DVA']#scan takes into account only standard amino acids\n\n for aa in standardAA: #ATOM entries, excluding water molecules \n if(aa==standardAA[0]):\n ppdb_ATOM = ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == aa] \n else:\n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == aa]], ignore_index=True) \n\n if(Include_dAA):\n for i in range(0,len(d_AA)): \n if(d_AA[i]!='GLY'):\n ppdb_d_AA = pd.concat([ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == d_AA[i]],ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == d_AA[i]]], ignore_index=True)\n pd.options.mode.chained_assignment = None \n ppdb_d_AA['residue_name'].iloc[:] = standardAA[i] #dAA considered as standard one for scan \n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb_d_AA], ignore_index=True) \n\n ppdb_PROTEIN = ppdb_ATOM #protein atoms saved here \n ppdb_WATER = pd.concat([ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == 'HOH'],ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == 'HOH'],ppdb.df['HETATM'][ppdb.df['HETATM']['residue_name'] == 'WAT'],ppdb.df['ATOM'][ppdb.df['ATOM']['residue_name'] == 'WAT']], ignore_index=True) #oxygen atoms of water molecules\n #can be both HETATM (standard pdb file) or ATOM (vmd output)\n if(len(ppdb_WATER)>0 and IncludeWATER):\n pd.options.mode.chained_assignment = None \n ppdb_WATER['residue_name'].iloc[:] = 'HOH'\n ppdb_WATER['chain_id'].iloc[:] = 'water'\n ppdb_ATOM = pd.concat([ppdb_ATOM, ppdb_WATER], ignore_index=True)\n\n Chains = []\n for i in range(0,len(ppdb_ATOM)):\n if(ppdb_ATOM['chain_id'].iloc[i] in Chains):\n continue\n else:\n Chains.append(ppdb_ATOM['chain_id'].iloc[i]) \n return ppdb_ATOM, Chains", "def alignprotein(self, sample, analysistype, target, program, index, hit):\n # Initialise lists to store the outputs\n if target not in sample[analysistype].dnaseq:\n sample[analysistype].dnaseq[target] = list()\n sample[analysistype].protseq[target] = list()\n sample[analysistype].ntalign[target] = list()\n sample[analysistype].ntindex[target] = list()\n sample[analysistype].aaidentity[target] = list()\n sample[analysistype].aaalign[target] = list()\n sample[analysistype].aaindex[target] = list()\n # Only BLASTn analyses require additional effort to find the protein sequence\n if program == 'blastn':\n # Convert the extracted, properly-oriented DNA sequence to a Seq object\n sample[analysistype].dnaseq[target].append(Seq(hit['query_sequence']))\n # Create the BLAST-like interleaved outputs with the query and subject sequences\n sample[analysistype].ntalign[target].append(self.interleaveblastresults(query=hit['query_sequence'],\n subject=hit['subject_sequence']))\n # Determine the number and position of SNPs\n count = 0\n ntindex = str()\n # Iterate through every position in the query sequence, and determine if the subject sequence at that\n # position is a match\n for i, bp in enumerate(hit['query_sequence']):\n # If the sequence at the query and subject sequences do not match, store the location\n if bp != hit['subject_sequence'][i]:\n # Append the current location (+1 due to zero indexing)\n ntindex += '{i};'.format(i=i + 1)\n # Increment the count by the length of the current position - should make the output more\n # uniform due to the fact that the numbers are not padded\n count += len(str(i))\n # If there are many SNPs, then insert line breaks for every 15+ characters\n if count >= 15:\n ntindex += '\\n'\n # Reset the character count to 0\n count = 0\n # Remove trailing ';' (or ';' followed by a newline)\n ntindex = ntindex.rstrip(';').replace(';\\n', '\\n') if ntindex else '-'\n # Add the cleaned string to the list\n sample[analysistype].ntindex[target].append(ntindex)\n # Convert the target name to a string without illegal characters - necessary for creating the\n # temporary databases below\n clean_target = ''.join(filter(str.isalnum, target))\n # Set the absolute path, and create the tmp working directory\n tmp_dir = os.path.join(sample[analysistype].reportdir, 'tmp')\n make_path(tmp_dir)\n # Set the absolute path of the FASTA file that will store the subject sequence. Will be used as the\n # database in the tblastx analysis used to translate the query and subject sequence to amino acid\n tmp_subject = os.path.join(tmp_dir, '{sn}_{target}_{at}_db_{index}.fa'\n .format(sn=sample.name,\n target=clean_target,\n at=analysistype,\n index=index))\n # Write the appropriately-converted subject sequence to the database file\n with open(tmp_subject, 'w') as tmp_db:\n SeqIO.write(SeqRecord(Seq(hit['subject_sequence'].replace('-', '')),\n id='{}_{}'.format(sample.name, target),\n description=''), tmp_db, 'fasta')\n # Create a BLAST database from this file\n self.makeblastdb(fasta=tmp_subject)\n # Create the tblastx (translated nt query: translated nt subject) call. Remove any masking. Do not\n # include the 'query' parameter, as it will be supplied below\n tblastx = NcbitblastxCommandline(db=os.path.splitext(tmp_subject)[0],\n evalue=0.1,\n outfmt=15,\n soft_masking=False,\n seg='no')\n # Run the tblastx analysis. Supply the query as stdin. Capture stdout, and stderr\n stdout, stderr = tblastx(stdin=sample[analysistype].targetsequence[target][index].replace('-', ''))\n # Convert the string stdout to JSON format\n json_output = json.loads(stdout)\n # Extract the necessary list of HSPs from the JSON-formatted outputs\n data = json_output['BlastOutput2'][0]['report']['results']['search']['hits'][0]['hsps']\n # Initialise a string to store the extracted amino acid subject sequence\n ref_prot = str()\n for results in data:\n # Attempt to use hit_frame 1 - the .targetsequence attribute was populated with the nt sequence in\n # (hopefully) the correct orientation, so attempt to use that\n if results['hit_frame'] == 1:\n # Populate the .protseq attribute with the Seq-converted amino acid sequence extracted from the\n # report\n sample[analysistype].protseq[target].append(Seq(results['qseq'].upper()))\n # Grab the subject sequence\n ref_prot = results['hseq']\n # Only the first result is required\n break\n # If there were no results with the hit_frame equal to 1, get the best result from the analysis\n if not ref_prot:\n for results in data:\n sample[analysistype].protseq[target].append(Seq(results['qseq'].upper()))\n ref_prot = results['hseq']\n break\n # Clear out the tmp directory\n try:\n shutil.rmtree(tmp_dir)\n except FileNotFoundError:\n pass\n else:\n # Non-blastn analyses will already have the outputs as amino acid sequences. Populate variables as required\n ref_prot = hit['subject_sequence']\n sample[analysistype].protseq[target].append(Seq(hit['query_sequence']))\n # Create the BLAST-like alignment of the amino acid query and subject sequences\n sample[analysistype].aaalign[target]\\\n .append(self.interleaveblastresults(query=sample[analysistype].protseq[target][index],\n subject=ref_prot))\n # Determine the number of matches, as well as the number and location of mismatches\n count = 0\n matches = 0\n aaindex = str()\n # Iterate through the query sequence to determine matching positions\n for i, bp in enumerate(sample[analysistype].protseq[target][index]):\n if bp != ref_prot[i]:\n aaindex += '{i};'.format(i=i + 1)\n count += len(str(i))\n # If there are many SNPs, then insert line breaks for every 10 SNPs\n if count >= 15:\n aaindex += '\\n'\n count = 0\n # Increment the total number of matches\n if bp == ref_prot[i]:\n matches += 1\n # Clean the index string\n aaindex = aaindex.rstrip(';').replace(';\\n', '\\n') if aaindex else '-'\n # Append the cleaned string to the list\n sample[analysistype].aaindex[target].append(aaindex)\n # Determine percent identity between the query and subject amino acid sequence by dividing the number of\n # matches by the total length of the query sequence and multiplying this result by 100. Convert to two\n # decimal places\n pid = float('{:.2f}'.format(matches / len(sample[analysistype].protseq[target][index]) * 100))\n # Append the calculated percent identity to the list\n sample[analysistype].aaidentity[target].append(pid)\n return sample", "def merge_in(self, other, convert_to_string=True):\n assert isinstance(other, ExtendedAlignment)\n #_LOG.debug(\"Merging started ...\")\n if other.is_empty():\n return\n me = 0\n she = 0 # Assumption: alignments are female!\n me_len = self.get_length() if not self.is_empty() else 0\n she_len = other.get_length()\n insertion = -1\n\n merged_insertion_columns = 0\n\n ''' Add sequences from her to my alignment '''\n for f in other.fragments:\n self.fragments.add(f)\n if convert_to_string:\n self.from_string_to_bytearray()\n\n selfother = {}\n for k, v in other.items():\n # assert(k not in self,\n # \"Merging overlapping alignments not implemented\")\n if k not in self:\n selfother[k] = bytearray(v, encoding=\"utf8\")\n while True:\n ''' Check exit conditions'''\n if me == me_len and she == she_len:\n break\n\n ''' Check the 5 possible statuses between she and I '''\n if she != she_len and other.is_insertion_column(she):\n if me != me_len and self.is_insertion_column(me):\n ''' We both have a series of insertion columns'''\n start = me\n while(me != me_len and self.is_insertion_column(me) and\n she != she_len and other.is_insertion_column(she)):\n me += 1\n she += 1\n merged_insertion_columns += 1\n run = me - start\n self.col_labels[start:me] = list(range(\n insertion, insertion-run, -1))\n else:\n ''' Hers is a series of insertion columns'''\n start = she\n while she != she_len and other.is_insertion_column(she):\n she += 1\n run = she - start\n ins = bytearray(b\"-\") * run\n for seq in self.values():\n seq[me:me] = ins\n self._col_labels[me:me] = list(range(\n insertion, insertion - run, -1))\n insertion -= run\n me += run\n me_len += run\n elif me != me_len and self.is_insertion_column(me):\n ''' Mine is a series of insertion column'''\n start = me\n while me != me_len and self.is_insertion_column(me):\n me += 1\n run = me - start\n ins = bytearray(b\"-\") * run\n for v in selfother.values():\n v[start:start] = ins\n self.col_labels[start:me] = list(\n range(insertion, insertion-run, -1))\n insertion -= run\n elif(she == she_len or (me != me_len and\n self.col_labels[me] < other.col_labels[she])):\n ''' My column is not present (i.e. was allgap) in the\n \"other\"'''\n start = me\n while(me < me_len and (she == she_len or me != me_len and\n self.col_labels[me] < other.col_labels[she])):\n me += 1\n run = me - start\n ins = bytearray(b\"-\") * run\n for v in selfother.values():\n v[start:start] = ins\n elif(me == me_len or (she != she_len and\n self.col_labels[me] > other.col_labels[she])):\n ''' Her column is not present (i.e. was allgap) in \"me\"'''\n start = she\n while(she < she_len and (me == me_len or she != she_len and\n self.col_labels[me] > other.col_labels[she])):\n she += 1\n run = she - start\n ins = bytearray(b\"-\") * run\n for seq in self.values():\n seq[me:me] = ins\n self._col_labels[me:me] = other.col_labels[start:she]\n me += run\n me_len += run\n elif self.col_labels[me] == other.col_labels[she]:\n ''' A shared column'''\n while(me < me_len and she < she_len and\n self.col_labels[me] == other.col_labels[she]):\n she += 1\n me += 1\n else:\n raise \"hmmm, we thought this should be impossible? %d %d\" % (\n me, she)\n\n self.update(selfother)\n\n if convert_to_string:\n self.from_bytearray_to_string()\n #_LOG.debug(\"Merging finished ...\")\n\n return merged_insertion_columns", "def align_one_reads_to_assembly(self, ctx):\n # ctx is the context object\n #BEGIN align_one_reads_to_assembly\n #END align_one_reads_to_assembly\n pass", "def dna_to_protein(seq):\n\n # Verify a convertible sequence\n if len(seq) % 3 != 0:\n raise RuntimeError('Total number of bases must be a multiple of 3')\n\n # Iterate through adding the proteins\n protein = ''\n for i in range(0, len(seq), 3):\n protein += bioinfo_dicts.codons[seq[i:i+3]]\n return protein", "def attach_barcode(sam, output):\n \n if output is None:\n output = sam.replace('.sam', '_bcqt.sam')\n infile = pysam.AlignmentFile(sam, \"r\")\n outfile = pysam.AlignmentFile(output, \"wh\", template=infile)\n for read in infile.fetch():\n id_sam = read.query_name\n sep_si = id_sam.index(':')\n# TODO Abort and raise exception if randomer info is not kept properly in the \n# read's name.\n bc_seq = id_sam[0:sep_si]\n sep_qi = sep_si + 1 + len(bc_seq)\n bc_pqs = id_sam[sep_si + 1: sep_qi]\n read.set_tag('BC', bc_seq)\n read.set_tag('QT', bc_pqs)\n read.query_name = id_sam[sep_qi+1:]\n outfile.write(read)\n outfile.close()\n infile.close()", "def mergeChainedAlignedSegments(chainedAlignedSegments, refSequence, readSequence):\n cAR = pysam.AlignedSegment()\n aR = chainedAlignedSegments[0]\n cAR.query_name = aR.query_name\n \n #Parameters we don't and therefore set properly\n #cAR.flag = aR.flag\n #cAR.mapq = aR.mapq\n #cAR.mrnm = 0\n #cAR.mpos=0\n #cAR.isize=0\n #cAR.qual = \"<\" * len(readSequence)\n #cAR.tags = aR.tags \n cAR.next_reference_id = -1\n cAR.reference_start = aR.reference_start #Reference start\n cAR.is_reverse = aR.is_reverse\n cAR.query_sequence = reverseComplement(readSequence) if cAR.is_reverse else readSequence\n cAR.reference_id = aR.reference_id\n cigarList = []\n pPos = aR.reference_start\n #Iterate from the other end of the sequence if reversed\n pQPos = -(len(readSequence)-1) if cAR.is_reverse else 0 \n \n for aR in chainedAlignedSegments:\n assert cAR.is_reverse == aR.is_reverse\n #Add a deletion representing the preceding unaligned reference positions\n assert aR.reference_start >= pPos\n if aR.reference_start > pPos:\n cigarList.append((2, aR.reference_start - pPos))\n pPos = aR.reference_start \n \n #Add an insertion representing the preceding unaligned read positions\n #make it a soft clip if it is the first chained alignment\n qPos = getFirstNonClippedPositionInRead(aR, readSequence)\n assert qPos >= pQPos\n if qPos > pQPos:\n cigarList.append((4 if aR == chainedAlignedSegments[0] else 1, qPos - pQPos)) \n pQPos = qPos\n \n #Add the operations of the cigar, filtering hard and soft clipping\n for op, length in aR.cigar:\n assert op in (0, 1, 2, 4, 5)\n if op in (0, 1, 2):\n cigarList.append((op, length))\n if op in (0, 2): #Is match or deletion\n pPos += length\n if op in (0, 1): #Is match or insertion\n pQPos += length\n \n assert pPos <= len(refSequence)\n \n #Set reference end coordinate (which is exclusive)\n #cAR.reference_end = pPos #We don't do this because it is set by cigar string\n \n #Now add any trailing, necessary soft clipping\n if cAR.is_reverse:\n assert pQPos <= 1\n if pQPos < 1:\n cigarList.append((4, -pQPos + 1))\n else:\n assert pQPos <= len(readSequence)\n if pQPos < len(readSequence):\n cigarList.append((4, len(readSequence) - pQPos))\n \n cAR.cigar = tuple(cigarList)\n \n #Check ops\n for op, length in cAR.cigar: #We should have no hard clipped ops\n assert op in (0, 1, 2, 4)\n \n #Reference sequence check coordinates\n assert sum([ length for op, length in cigarList if op in (0, 2)]) == cAR.reference_end - cAR.reference_start\n assert cAR.reference_start >= 0 and cAR.reference_start < len(refSequence)\n assert cAR.reference_end >= 0 and cAR.reference_end <= len(refSequence)\n \n #Read sequence check coordinates\n assert cAR.query_alignment_start >= 0 and cAR.query_alignment_start < len(readSequence)\n assert cAR.query_alignment_end >= 0 and cAR.query_alignment_end <= len(readSequence)\n assert cAR.query_alignment_start + sum([ length for op, length in cigarList if op in (0, 1)]) == cAR.query_alignment_end\n \n return cAR", "def convert_propbank(detail=True):\n\n out_dir = \"../data/wsj_propbank/\"\n os.system(\"rm -rf %s\" % (out_dir, ))\n os.system(\"mkdir -p %s\" % (out_dir, ))\n\n pb_instances = propbank.instances()\n # Count at first\n verb2idx = {}\n verb2frames = {}\n for i in range(0, len(pb_instances)):\n inst = pb_instances[i]\n verb_lemma, frame = inst.roleset.split(\".\")\n if verb_lemma not in verb2idx:\n verb2idx[verb_lemma] = []\n verb2idx[verb_lemma].append(i)\n if verb_lemma not in verb2frames:\n verb2frames[verb_lemma] = []\n if frame not in verb2frames[verb_lemma]:\n verb2frames[verb_lemma].append(frame)\n verb_nums = len(verb2idx.keys())\n verb_counter = 0\n\n pair_label = {'-LRB-':'(', '-RRB-':')', '-LCB-':'(', '-RCB-':')'}\n for verb_lemma, idxs in verb2idx.items():\n verb_counter += 1\n if len(verb2frames[verb_lemma]) < 2:\n continue\n fh = open(\"%s/%s\" % (out_dir, verb_lemma), \"w\")\n if detail:\n print(\"processing %s(%s/%s)\"\n % (verb_lemma, verb_counter, verb_nums))\n for i in idxs:\n inst = pb_instances[i]\n fileid = inst.fileid\n sent_num = inst.sentnum\n verb_pos = inst.wordnum\n verb_lemma, frame = inst.roleset.split(\".\")\n section = [x for x in fileid if x.isdigit()][0:2]\n section = \"\".join(section)\n fileid_for_ptb = \"WSJ/%s/%s\" % (section, fileid.upper())\n\n tagged_sent = ptb.tagged_sents(fileid_for_ptb)[sent_num]\n # Change tagged_sent from [tuples] to [list]\n tagged_sent = [[x[0], x[1]]for x in tagged_sent]\n verb_bak = tagged_sent[verb_pos][0]\n verb_identifier = \"verb_identifier_xxxxx\"\n tagged_sent[verb_pos][0] = verb_identifier\n sent = []\n for (token, tag)in tagged_sent:\n if tag != '-NONE-':\n if token in pair_label:\n token = pair_label[token]\n sent.append(token)\n sent = \" \".join(sent)\n sent_toks = nltk.sent_tokenize(sent)\n candidate_sent = None\n for sent_tok in sent_toks:\n if sent_tok.find(verb_identifier) >= 0:\n candidate_sent = sent_tok\n left_sent, right_sent = candidate_sent.split(verb_identifier)\n left_sent = left_sent.strip()\n right_sent = right_sent.strip()\n out_line = \"%s\\t%s\\t%s\\t%s\" % (frame, left_sent, verb_bak, right_sent)\n out_line = remove_punctuations(out_line)\n print(out_line, file=fh)\n fh.close()", "def main(argv=None):\n\n if not argv:\n argv = sys.argv\n\n # setup command line parser\n parser = E.ArgumentParser(description=__doc__)\n\n parser.add_argument(\"--version\", action='version', version=\"1.0\")\n\n parser.add_argument(\"-m\", \"--merge-pairs\", dest=\"merge_pairs\",\n action=\"store_true\",\n help=\"merge paired-ended reads and output interval \"\n \"for entire fragment. \")\n\n parser.add_argument(\"--max-insert-size\", dest=\"max_insert_size\", type=int,\n help=\"only merge paired-end reads if they are less than \"\n \"# bases apart. \"\n \" 0 turns off this filter. \")\n\n parser.add_argument(\"--min-insert-size\", dest=\"min_insert_size\", type=int,\n help=\"only merge paired-end reads if they are at \"\n \"least # bases apart. \"\n \" 0 turns off this filter. \")\n\n parser.add_argument(\"--bed-format\", dest=\"bed_format\", type=str,\n choices=('3', '4', '5', '6'),\n help=\"bed format to output. \")\n\n parser.set_defaults(\n region=None,\n call_peaks=None,\n merge_pairs=None,\n min_insert_size=0,\n max_insert_size=0,\n bed_format='6',\n )\n\n (args, unknown) = E.start(parser, argv=argv, unknowns=True)\n\n if len(unknown) == 0:\n unknown.append(\"-\")\n\n samfile = pysam.AlignmentFile(unknown[0], \"rb\")\n\n args.bed_format = int(args.bed_format)\n\n if args.merge_pairs is not None:\n counter = merge_pairs(samfile,\n args.stdout,\n min_insert_size=args.min_insert_size,\n max_insert_size=args.max_insert_size,\n bed_format=args.bed_format)\n\n E.info(\"category\\tcounts\\n%s\\n\" % counter.asTable())\n\n else:\n # use until_eof. Files from stdin have no index\n it = samfile.fetch(until_eof=True)\n\n # more comfortable cigar parsing will\n # come with the next pysam release\n BAM_CMATCH = 0\n BAM_CDEL = 2\n BAM_CREF_SKIP = 3\n take = (BAM_CMATCH, BAM_CDEL, BAM_CREF_SKIP)\n outfile = args.stdout\n\n for read in it:\n if read.is_unmapped:\n continue\n\n t = 0\n for op, l in read.cigar:\n if op in take:\n t += l\n\n if read.is_reverse:\n strand = \"-\"\n else:\n strand = \"+\"\n outfile.write(\"%s\\t%d\\t%d\\t%s\\t%d\\t%c\\n\" %\n (read.reference_name,\n read.pos,\n read.pos + t,\n read.qname,\n read.mapq,\n strand))\n\n E.stop()", "def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein", "def align_probas(self, probas, generator):\n if probas.ndim <= 2:\n return probas\n\n num_class = probas.shape[-1]\n output = np.zeros((generator.num_samples, num_class), dtype=probas.dtype)\n\n for idxs, p in zip(generator.idx_pairs, probas):\n output[idxs[0]:idxs[1],:] += p\n\n output /= np.linalg.norm(output, axis=1, keepdims=True)\n\n return output", "def updateResidueProbAnnotation(residueProb):\n\n for resonance in residueProb.resonanceGroup.resonances:\n updateResonanceAnnotation(resonance)", "def try_protein(self):\n location = [0, 0]\n fold = 0\n \n # loop over aminoacids of the data and add info to aminoacids object\n for i, char in enumerate(self.data):\n aminoacid_number = i\n aminoacid_type = char \n \n # make aminoacid object and add to aminoacids list\n aminoacid = Aminoacid(aminoacid_type, aminoacid_number, location, fold)\n self.occupied.append(aminoacid.location)\n self.aminoacids.append(aminoacid)\n\n # make a line orientation as default\n location = [0, len(self.data) + i]\n return", "def remerge_subset():\n import wbia\n\n ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n ibs2 = wbia.opendb('PZ_Master1')\n\n gids1, gids2 = ibs1.images(), ibs2.images()\n idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids)\n isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2)\n\n assert all(\n set.issubset(set(a1), set(a2))\n for a1, a2 in zip(isect_gids1.annot_uuids, isect_gids2.annot_uuids)\n )\n\n annot_uuids = ut.flatten(isect_gids1.annot_uuids)\n # aids1 = ibs1.annots(ibs1.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n # aids2 = ibs2.annots(ibs2.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n aids1 = ibs1.annots(uuids=annot_uuids, asarray=True)\n aids2 = ibs2.annots(uuids=annot_uuids, asarray=True)\n import numpy as np\n\n to_aids2 = dict(zip(aids1, aids2))\n # to_aids1 = dict(zip(aids2, aids1))\n\n # Step 1) Update individual annot properties\n # These annots need updates\n # np.where(aids1.visual_uuids != aids2.visual_uuids)\n # np.where(aids1.semantic_uuids != aids2.semantic_uuids)\n\n annot_unary_props = [\n # 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'unary_tags']\n 'yaws',\n 'bboxes',\n 'thetas',\n 'qual',\n 'species',\n 'case_tags',\n 'multiple',\n 'age_months_est_max',\n 'age_months_est_min', # 'sex_texts'\n ]\n to_change = {}\n for key in annot_unary_props:\n prop1 = getattr(aids1, key)\n prop2 = getattr(aids2, key)\n diff_idxs = set(np.where(prop1 != prop2)[0])\n if diff_idxs:\n diff_prop1 = ut.take(prop1, diff_idxs)\n diff_prop2 = ut.take(prop2, diff_idxs)\n logger.info('key = {!r}'.format(key))\n logger.info('diff_prop1 = {!r}'.format(diff_prop1))\n logger.info('diff_prop2 = {!r}'.format(diff_prop2))\n to_change[key] = diff_idxs\n if to_change:\n changed_idxs = ut.unique(ut.flatten(to_change.values()))\n logger.info('Found %d annots that need updated properties' % len(changed_idxs))\n logger.info('changing unary attributes: {!r}'.format(to_change))\n if False and ut.are_you_sure('apply change'):\n for key, idxs in to_change.items():\n subaids1 = aids1.take(idxs)\n subaids2 = aids2.take(idxs)\n prop1 = getattr(subaids1, key)\n # prop2 = getattr(subaids2, key)\n setattr(subaids2, key, prop1)\n else:\n logger.info('Annot properties are in sync. Nothing to change')\n\n # Step 2) Update annotmatch - pairwise relationships\n infr1 = wbia.AnnotInference(aids=aids1.aids, ibs=ibs1, verbose=3, autoinit=False)\n\n # infr2 = wbia.AnnotInference(aids=ibs2.annots().aids, ibs=ibs2, verbose=3)\n aids2 = ibs2.get_valid_aids(is_known=True)\n infr2 = wbia.AnnotInference(aids=aids2, ibs=ibs2, verbose=3)\n infr2.reset_feedback('annotmatch', apply=True)\n\n # map feedback from ibs1 onto ibs2 using ibs2 aids.\n fb1 = infr1.read_wbia_annotmatch_feedback()\n fb1_t = {(to_aids2[u], to_aids2[v]): val for (u, v), val in fb1.items()}\n fb1_df_t = infr2._pandas_feedback_format(fb1_t).drop('am_rowid', axis=1)\n\n # Add transformed feedback into ibs2\n infr2.add_feedback_from(fb1_df_t)\n\n # Now ensure that dummy connectivity exists to preserve origninal names\n # from wbia.algo.graph import nx_utils\n # for (u, v) in infr2.find_mst_edges('name_label'):\n # infr2.draw_aids((u, v))\n # cc1 = infr2.pos_graph.connected_to(u)\n # cc2 = infr2.pos_graph.connected_to(v)\n # logger.info(nx_utils.edges_cross(infr2.graph, cc1, cc2))\n # infr2.neg_redundancy(cc1, cc2)\n # infr2.pos_redundancy(cc2)\n\n infr2.relabel_using_reviews(rectify=True)\n infr2.apply_nondynamic_update()\n\n if False:\n infr2.wbia_delta_info()\n infr2.wbia_name_group_delta_info()\n\n if len(list(infr2.inconsistent_components())) > 0:\n raise NotImplementedError('need to fix inconsistencies first')\n # Make it so it just loops until inconsistencies are resolved\n infr2.prioritize()\n infr2.qt_review_loop()\n else:\n infr2.write_wbia_staging_feedback()\n infr2.write_wbia_annotmatch_feedback()\n infr2.write_wbia_name_assignment()\n\n # if False:\n # # Fix any inconsistency\n # infr2.start_qt_interface(loop=False)\n # test_nodes = [5344, 5430, 5349, 5334, 5383, 2280, 2265, 2234, 5399,\n # 5338, 2654]\n # import networkx as nx\n # nx.is_connected(infr2.graph.subgraph(test_nodes))\n # # infr = wbia.AnnotInference(aids=test_nodes, ibs=ibs2, verbose=5)\n\n # # randomly sample some new labels to verify\n # import wbia.guitool as gt\n # from wbia.gui import inspect_gui\n # gt.ensure_qapp()\n # ut.qtensure()\n # old_groups = ut.group_items(name_delta.index.tolist(), name_delta['old_name'])\n # del old_groups['____']\n\n # new_groups = ut.group_items(name_delta.index.tolist(), name_delta['new_name'])\n\n # from wbia.algo.hots import simulate\n # c = simulate.compare_groups(\n # list(new_groups.values()),\n # list(old_groups.values()),\n # )\n # ut.map_vals(len, c)\n # for aids in c['pred_splits']:\n # old_nids = ibs2.get_annot_nids(aids)\n # new_nids = ut.take_column(infr2.gen_node_attrs('name_label', aids), 1)\n # split_aids = ut.take_column(ut.group_items(aids, new_nids).values(), 0)\n # aid1, aid2 = split_aids[0:2]\n\n # if False:\n # inspect_gui.show_vsone_tuner(ibs2, aid1, aid2)\n # infr2.start_qt_interface(loop=False)\n\n # if False:\n # # import wbia\n # ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n # infr1 = wbia.AnnotInference(aids='all', ibs=ibs1, verbose=3)\n # infr1.initialize_graph()\n # # infr1.reset_feedback('staging')\n # infr1.reset_feedback('annotmatch')\n # infr1.apply_feedback_edges()\n # infr1.relabel_using_reviews()\n # infr1.apply_review_inference()\n # infr1.start_qt_interface(loop=False)\n # delta = infr2.match_state_delta()\n # logger.info('delta = %r' % (delta,))\n\n # infr2.ensure_mst()\n # infr2.relabel_using_reviews()\n # infr2.apply_review_inference()\n\n # mst_edges = infr2.find_mst_edges()\n # set(infr2.graph.edges()).intersection(mst_edges)\n\n return\n \"\"\"\n TODO:\n Task 2:\n Build AnnotInfr for ibs2 then add all decision from\n ibs1 to the internal feedback dict.\n\n Ensure that all other (esp old name-id related) edges are correctly\n placed, then overrite with new vals (\n make sure implicit vals do not cuase conflicts with new\n explicit vals, but old explicit vals should cause a conflict).\n Then just commit to staging and then commit to annotmatch and\n re-infer the names.\n \"\"\"\n\n # Print some info about the delta\n # def _to_tup(x):\n # return tuple(x) if isinstance(x, list) else x\n # changetype_list = list(zip(\n # delta['old_decision'], delta['new_decision'],\n # map(_to_tup, delta['old_tags']),\n # map(_to_tup, delta['new_tags'])))\n # changetype_hist = ut.dict_hist(changetype_list, ordered=True)\n # logger.info(ut.align(ut.repr4(changetype_hist), ':'))\n\n # import pandas as pd\n # pd.options.display.max_rows = 20\n # pd.options.display.max_columns = 40\n # pd.options.display.width = 160\n # pd.options.display.float_format = lambda x: '%.4f' % (x,)\n\n # a, b = 86, 6265\n # c, d = to_aids1[a], to_aids1[b]\n # inspect_gui.show_vsone_tuner(ibs2, a, b)\n # inspect_gui.show_vsone_tuner(ibs1, to_aids1[a], to_aids1[b])\n # am1 = ibs1.get_annotmatch_rowids_between([to_aids1[a]],\n # [to_aids1[b]])\n # am2 = ibs2.get_annotmatch_rowids_between([a], [b])\n # logger.info(ibs1.db.get_table_csv('annotmatch', rowids=am1))\n # logger.info(ibs2.db.get_table_csv('annotmatch', rowids=am2))\n\n # inspect_gui.show_vsone_tuner(ibs2, 8, 242)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 103)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 6265)", "def maskPrimers(seq_file, primer_file, mode, align_func, align_args={}, \n max_error=default_max_error, barcode=False,\n out_args=default_out_args, nproc=None, queue_size=None):\n # Define subcommand label dictionary\n cmd_dict = {alignPrimers:'align', scorePrimers:'score'}\n \n # Print parameter info\n log = OrderedDict()\n log['START'] = 'MaskPrimers'\n log['COMMAND'] = cmd_dict.get(align_func, align_func.__name__)\n log['SEQ_FILE'] = os.path.basename(seq_file)\n log['PRIMER_FILE'] = os.path.basename(primer_file)\n log['MODE'] = mode\n log['BARCODE'] = barcode\n log['MAX_ERROR'] = max_error\n if 'start' in align_args: log['START_POS'] = align_args['start']\n if 'max_len' in align_args: log['MAX_LEN'] = align_args['max_len']\n if 'rev_primer' in align_args: log['REV_PRIMER'] = align_args['rev_primer']\n if 'skip_rc' in align_args: log['SKIP_RC'] = align_args['skip_rc']\n if 'gap_penalty' in align_args:\n log['GAP_PENALTY'] = ', '.join([str(x) for x in align_args['gap_penalty']])\n log['NPROC'] = nproc\n printLog(log)\n\n # Create dictionary of primer sequences to pass to maskPrimers\n primers = readPrimerFile(primer_file)\n if 'rev_primer' in align_args and align_args['rev_primer']:\n primers = {k: reverseComplement(v) for k, v in primers.items()}\n\n # Define alignment arguments and compile primers for align mode\n align_args['primers'] = primers \n align_args['score_dict'] = getDNAScoreDict(mask_score=(0, 1), gap_score=(0, 0))\n if align_func is alignPrimers:\n align_args['max_error'] = max_error\n align_args['primers_regex'] = compilePrimers(primers)\n \n # Define sequence masking arguments\n mask_args = {'mode': mode, \n 'barcode': barcode, \n 'delimiter': out_args['delimiter']}\n\n # Define feeder function and arguments\n feed_func = feedSeqQueue\n feed_args = {'seq_file': seq_file}\n # Define worker function and arguments\n work_func = processMPQueue\n work_args = {'align_func': align_func, \n 'align_args': align_args,\n 'mask_args': mask_args,\n 'max_error': max_error}\n \n # Define collector function and arguments\n collect_func = collectSeqQueue\n collect_args = {'seq_file': seq_file,\n 'task_label': 'primers',\n 'out_args': out_args}\n \n # Call process manager\n result = manageProcesses(feed_func, work_func, collect_func, \n feed_args, work_args, collect_args, \n nproc, queue_size)\n\n # Print log\n result['log']['END'] = 'MaskPrimers'\n printLog(result['log'])\n \n return result['out_files']", "def pepComp(align,useConsensus=True):\n if useConsensus:\n ref = consensus(align)\n else:\n ref = identifyMindist(align)\n out = []\n for seq in align:\n out.append(''.join([aa.upper() if aa.upper()==refaa.upper() else aa.lower() for aa, refaa in zip(seq, ref)]))\n return out", "def main():\n args = get_args()\n FILE = args.FILE\n annotations = args.annotations\n outfile = args.outfile\n \n \n if not os.path.isfile(FILE):\n die('\"{}\" is not a file'.format(FILE))\n if not os.path.isfile(annotations):\n die('\"{}\" is not a file'.format(annotations))\n if os.path.isfile(FILE) and os.path.isfile(annotations):\n reader = csv.DictReader(open(FILE), delimiter = '\\t', fieldnames = (\"qseqid\", \"sseqid\", \"pident\", \"length\", \"mismatch\", \"gapopen\", \"qstart\", \"qend\", \"sstart\", \"send\", \"evalue\", \"bitscore\"))\n reader_a = csv.DictReader(open(annotations), fieldnames = (\"centroid\", \"domain\", \"kingdom\", \"phylum\", \"class\", \"order\", \"genus\", \"species\"))\n reader_b = csv.reader(open(annotations, 'r'))\n anno_dict = {}\n for row in reader_b:\n key1 = row[0]\n anno_dict[key1] = row[1:]\n\n #print(anno_dict)\n \n \"\"\"for dct in map(dict, reader_a):\n genus = (f\"{dct['genus']}\")\n species = (f\"{dct['species']}\")\n if genus == \"\": \n print(\"NA\")\n else:\n print(genus)\n if species == \"\":\n print(\"NA\")\n else:\n print(species)\"\"\"\n for dct in map(dict, reader):\n seq_id = (f\"{dct['sseqid']}\") \n pident = (f\"{dct['pident']}\")\n #print(seq_id)\n for dct_a in map(dict, reader_a):\n genus = (f\"{dct_a['genus']}\")\n species = (f\"{dct_a['species']}\")\n if any(seq_id == key for key in anno_dict): \n \"\"\"print(seq_id)\n print(pident)\n print(genus)\n print(species)\n #find a way to print genus and species of seq_id\n \"\"\"\n \n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"for line_a in reader_a:\n an_id = (line_a['centroid']) \n print('\"{}\" is an_id'.format(an_id)) \n for line in reader:\n seq_id = (line['sseqid'])\n print('\"{}\" is seq_id'.format(seq_id))\n if seq_id == an_id:\n print(\"hi\")\n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"\n #pprint.pprint(dict_list)\n #pprint.pprint(dict_list_a)\n #for key, value in d1.items():\n #if key is 'sseqid':\n #print(value)\n #print(dict_list_a['centroid']) ", "def write_protein_fasta(args, clusters=None, fasta_dir=None):\n row, concat_fasta_path, frags = args\n dotpath = row[\"path\"]\n phylogeny_dict = {\"prot.idx\": row.name, \"path\": dotpath}\n for phy_prop in [name for name in row.index if name.startswith(\"phy.\")]:\n phylogeny_dict[phy_prop] = row[phy_prop]\n inpath = dotpath_to_path(dotpath)\n prot_info = read_tsv_or_parquet(inpath / PROTEINS_FILE)\n prot_info[\"frag.idx\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.idx\"]\n )\n prot_info[\"frag.is_plas\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.is_plas\"]\n )\n prot_info[\"frag.is_scaf\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.is_scaf\"]\n )\n prot_info[\"frag.is_chr\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.is_chr\"]\n )\n prot_info[\"frag.id\"] = prot_info[\"frag.id\"].map(\n lambda oid: frags.loc[oid][\"frag.id\"]\n )\n # Write out updated protein info\n write_tsv_or_parquet(prot_info, inpath / HOMOLOGY_FILE)\n # include phylogeny info in per-sequence info\n for prop in phylogeny_dict:\n prot_info[prop] = phylogeny_dict[prop]\n # write concatenated sequence info\n if clusters is None:\n fasta_path = concat_fasta_path\n info_to_fasta(None, fasta_path, append=True, infoobj=prot_info)\n else:\n for cluster_id, subframe in clusters.groupby(by=[\"cluster_id\"]):\n cluster_info = prot_info[prot_info.index.isin(subframe[\"members\"])]\n fasta_path = fasta_dir / f\"{cluster_id}.fa\"\n info_to_fasta(None, fasta_path, append=True, infoobj=cluster_info)", "def fix_annotmatch_pzmaster1():\n import wbia\n\n ibs = wbia.opendb('PZ_Master1')\n infr = wbia.AnnotInference(ibs=ibs, aids=ibs.get_valid_aids(), verbose=5)\n infr.initialize_graph()\n annots = ibs.annots()\n aid_to_nid = ut.dzip(annots.aids, annots.nids)\n\n if False:\n infr.reset_feedback()\n infr.ensure_mst()\n infr.apply_feedback_edges()\n infr.relabel_using_reviews()\n infr.start_qt_interface()\n\n # Get annotmatch rowids that agree with current labeling\n if False:\n annotmatch = ibs.db.get_table_as_pandas('annotmatch')\n import pandas as pd\n\n flags1 = pd.isnull(annotmatch['annotmatch_evidence_decision'])\n flags2 = annotmatch['annotmatch_tag_text'] == ''\n bad_part = annotmatch[flags1 & flags2]\n rowids = bad_part.index.tolist()\n ibs.delete_annotmatch(rowids)\n\n if False:\n # Delete bidirectional annotmatches\n annotmatch = ibs.db.get_table_as_pandas('annotmatch')\n df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2'])\n\n # Find entires that have both directions\n pairs1 = annotmatch[['annot_rowid1', 'annot_rowid2']].values\n f_edges = {tuple(p) for p in pairs1}\n b_edges = {tuple(p[::-1]) for p in pairs1}\n isect_edges = {tuple(sorted(p)) for p in b_edges.intersection(f_edges)}\n isect_edges1 = list(isect_edges)\n isect_edges2 = [p[::-1] for p in isect_edges]\n\n # cols = ['annotmatch_evidence_decision', 'annotmatch_tag_text']\n import pandas as pd\n\n custom_ = {\n (559, 4909): (False, ['photobomb']),\n (7918, 8041): (False, ['photobomb']),\n (6634, 6754): (False, ['photobomb']),\n (3707, 3727): (False, ['photobomb']),\n (86, 103): (False, ['photobomb']),\n }\n extra_ = {}\n\n fixme_edges = []\n\n d1 = df.loc[isect_edges1].reset_index(drop=False)\n d2 = df.loc[isect_edges2].reset_index(drop=False)\n flags = d1['annotmatch_evidence_decision'] != d2['annotmatch_evidence_decision']\n from wbia.tag_funcs import _parse_tags\n\n for f, r1, r2 in zip(flags, d1.iterrows(), d2.iterrows()):\n v1, v2 = r1[1], r2[1]\n aid1 = v1['annot_rowid1']\n aid2 = v1['annot_rowid2']\n truth_real = (\n ibs.const.EVIDENCE_DECISION.POSITIVE\n if aid_to_nid[aid1] == aid_to_nid[aid2]\n else ibs.const.EVIDENCE_DECISION.NEGATIVE\n )\n truth1 = v1['annotmatch_evidence_decision']\n truth2 = v2['annotmatch_evidence_decision']\n t1 = _parse_tags(v1['annotmatch_tag_text'])\n t2 = _parse_tags(v2['annotmatch_tag_text'])\n newtag = ut.union_ordered(t1, t2)\n if (aid1, aid2) in custom_:\n continue\n fixme_flag = False\n if not pd.isnull(truth1):\n if truth_real != truth1:\n fixme_flag = True\n if not pd.isnull(truth2):\n if truth_real != truth2:\n fixme_flag = True\n if fixme_flag:\n logger.info('newtag = {!r}'.format(newtag))\n logger.info('truth_real = {!r}'.format(truth_real))\n logger.info('truth1 = {!r}'.format(truth1))\n logger.info('truth2 = {!r}'.format(truth2))\n logger.info('aid1 = {!r}'.format(aid1))\n logger.info('aid2 = {!r}'.format(aid2))\n fixme_edges.append((aid1, aid2))\n else:\n extra_[(aid1, aid2)] = (truth_real, newtag)\n\n extra_.update(custom_)\n new_pairs = extra_.keys()\n new_truths = ut.take_column(ut.dict_take(extra_, new_pairs), 0)\n new_tags = ut.take_column(ut.dict_take(extra_, new_pairs), 1)\n new_tag_texts = [';'.join(t) for t in new_tags]\n aids1, aids2 = ut.listT(new_pairs)\n\n # Delete the old\n ibs.delete_annotmatch(\n d1['annotmatch_rowid'].values.tolist()\n + d2['annotmatch_rowid'].values.tolist()\n )\n\n # Add the new\n ams = ibs.add_annotmatch_undirected(aids1, aids2)\n ibs.set_annotmatch_evidence_decision(ams, new_truths)\n ibs.set_annotmatch_tag_text(ams, new_tag_texts)\n\n if False:\n import wbia.guitool as gt\n\n gt.ensure_qapp()\n ut.qtensure()\n from wbia.gui import inspect_gui\n\n inspect_gui.show_vsone_tuner(ibs, aid1, aid2)\n\n # pairs2 = pairs1.T[::-1].T\n # idx1, idx2 = ut.isect_indices(list(map(tuple, pairs1)),\n # list(map(tuple, pairs2)))\n # r_edges = list(set(map(tuple, map(sorted, pairs1[idx1]))))\n # unique_pairs = list(set(map(tuple, map(sorted, pairs1[idx1]))))\n # df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2'])\n\n x = ut.ddict(list)\n annotmatch = ibs.db.get_table_as_pandas('annotmatch')\n import ubelt as ub\n\n _iter = annotmatch.iterrows()\n prog = ub.ProgIter(_iter, length=len(annotmatch))\n for k, m in prog:\n aid1 = m['annot_rowid1']\n aid2 = m['annot_rowid2']\n if m['annotmatch_evidence_decision'] == ibs.const.EVIDENCE_DECISION.POSITIVE:\n if aid_to_nid[aid1] == aid_to_nid[aid2]:\n x['agree1'].append(k)\n else:\n x['disagree1'].append(k)\n elif m['annotmatch_evidence_decision'] == ibs.const.EVIDENCE_DECISION.NEGATIVE:\n if aid_to_nid[aid1] == aid_to_nid[aid2]:\n x['disagree2'].append(k)\n else:\n x['agree2'].append(k)\n\n ub.map_vals(len, x)\n ut.dict_hist(annotmatch.loc[x['disagree1']]['annotmatch_tag_text'])\n\n disagree1 = annotmatch.loc[x['disagree1']]\n pb_disagree1 = disagree1[disagree1['annotmatch_tag_text'] == 'photobomb']\n aids1 = pb_disagree1['annot_rowid1'].values.tolist()\n aids2 = pb_disagree1['annot_rowid2'].values.tolist()\n aid_pairs = list(zip(aids1, aids2))\n infr = wbia.AnnotInference.from_pairs(aid_pairs, ibs=ibs, verbose=5)\n if False:\n feedback = infr.read_wbia_annotmatch_feedback(edges=infr.edges())\n infr.external_feedback = feedback\n infr.apply_feedback_edges()\n infr.start_qt_interface(loop=False)\n\n # Delete these values\n if False:\n nonpb_disagree1 = disagree1[disagree1['annotmatch_tag_text'] != 'photobomb']\n disagree2 = annotmatch.loc[x['disagree2']]\n ibs.delete_annotmatch(nonpb_disagree1['annotmatch_rowid'])\n ibs.delete_annotmatch(disagree2['annotmatch_rowid'])\n\n # ut.dict_hist(disagree1['annotmatch_tag_text'])\n import networkx as nx\n\n graph = nx.Graph()\n graph.add_edges_from(zip(pb_disagree1['annot_rowid1'], pb_disagree1['annot_rowid2']))\n list(nx.connected_components(graph))\n\n set(annotmatch.loc[x['disagree2']]['annotmatch_tag_text'])\n\n # aid1, aid2 = 2585, 1875\n # # pd.unique(annotmatch['annotmatch_evidence_decision'])\n # from wbia.gui import inspect_gui\n # inspect_gui.show_vsone_tuner(ibs, aid1, aid2)\n # from vtool import inspect_matches\n\n # aid1, aid2 = 2108, 2040\n\n # pd.unique(annotmatch['annotmatch_tag_text'])\n\n # infr.reset_feedback()\n # infr.relabel_using_reviews()", "def annotate_ISM(data_df, REFERENCE, position_list, reference_genbank_name=\"data/covid-19-genbank.gb\"):\n seq_list = data_df['sequence'].values.tolist()\n \n seq_index = []\n index = 0\n for base in REFERENCE[1]:\n if base == '-':\n seq_index.append(index)\n else:\n index += 1\n seq_index.append(index)\n reference_local_index_map = np.array(seq_index)\n mapped_reference_index = []\n for index, entropy in position_list:\n mapped_reference_index.append((index, reference_local_index_map[index], entropy))\n REFERENCE_ISM = ''.join([REFERENCE[1][item[0]] for item in position_list])\n logging.info('Reference ISM: {}.'.format(REFERENCE_ISM))\n \n gene_dict = load_gene_dict(reference_genbank_name)\n reference_raw = REFERENCE[1].replace('-', '')\n res = OrderedDict()\n res['Ref position'] = []\n res['Entropy'] = []\n res['Gene'] = []\n res['Is silent'] = []\n res['AA position'] = []\n for align_index, ref_index, entropy in mapped_reference_index:\n codon, codon_idx, name, codon_pos = find_SNP(ref_index, gene_dict, reference_raw)\n base_freq = Counter([item[align_index] for item in seq_list]).most_common()\n for alt_base, count in base_freq:\n if alt_base != reference_raw[ref_index-1]:\n break\n if codon is None:\n if_silence = True\n else:\n alt_codon = list(codon)\n alt_codon[codon_idx] = alt_base\n alt_codon = ''.join(alt_codon)\n ref_aa = translate(codon)\n ism_aa = translate(alt_codon)\n if ref_aa == ism_aa:\n if_silence = True\n else:\n if_silence = False\n res['Ref position'].append(ref_index)\n res['Entropy'].append(entropy)\n if name is None:\n name = 'Non-coding'\n res['Gene'].append(name)\n res['Is silent'].append(if_silence)\n if codon_pos is None:\n res['AA position'].append('NaN')\n else:\n res['AA position'].append('{}{}{}'.format(ref_aa, codon_pos, ism_aa))\n annotation_df = pd.DataFrame.from_dict(res)\n return annotation_df", "def align(filename, prog, outfile):\n ra = RunAlign()\n ra.run_align(filename, prog, outfile)", "def process_bam(bam, output_dp):\r\n bam_fn = os.path.basename(bam)\r\n coverage_fp = os.path.join(output_dp, bam_fn.replace('.bam', '_coverage.csv'))\r\n reads_fp = os.path.join(output_dp, bam_fn.replace('.bam', '_reads.csv'))\r\n\r\n samfile = pysam.AlignmentFile(bam, \"rb\")\r\n contigs_size = get_ref_lens(samfile)\r\n coverage = coverage_vectors(contigs_size)\r\n\r\n read_output = open(reads_fp, 'w+')\r\n read_output.write('read_length,mapq,start,end,reference')\r\n for l in samfile.fetch():\r\n if l.mapq < 10: continue\r\n if l.rlen < 50: continue\r\n read_output.write('\\n{},{},{},{},{}'.format(l.rlen, l.mapq,\r\n l.reference_start, l.reference_end, samfile.getrname(l.reference_id).split(',')[0]))\r\n coverage[samfile.getrname(l.tid)][\"nb_reads\"] += 1\r\n coverage[samfile.getrname(l.reference_id)][\"positions\"][l.reference_start:l.reference_end] = 1\r\n coverage[samfile.getrname(l.tid)][\"nb_bp\"] += l.rlen\r\n read_output.close()\r\n\r\n coverage_prop = {}\r\n for contig,vector in coverage.items():\r\n if vector['nb_bp'] == 0: # no reads, so output blank file\r\n output = pandas.DataFrame()\r\n output.to_csv(coverage_fp, index=False)\r\n continue\r\n temp = {}\r\n for i in contigs_size:\r\n if contig == i[\"Seq\"]:\r\n temp[\"length\"] = i[\"Length\"]\r\n temp[\"ratio_covered\"] = np.sum(vector[\"positions\"])/float(len(vector[\"positions\"]))\r\n temp[\"number_reads\"] = vector[\"nb_reads\"]\r\n temp[\"number_bp\"] = vector[\"nb_bp\"]\r\n if vector[\"nb_reads\"] > 0 :\r\n coverage_prop[contig] = temp\r\n\r\n output = pandas.DataFrame(coverage_prop).transpose()\r\n output = output.sort_values(['number_bp','ratio_covered'],ascending=[0,0])\r\n output.to_csv(coverage_fp, index=False)\r\n samfile.close()\r\n return coverage_fp, reads_fp", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-s\", dest=\"statefile\", help=\"statefile\")\n\tparser.add_option(\"-o\", dest=\"outname\", help=\"outname\")\n\tparser.add_option(\"-l\", dest=\"ligcutoff\", help=\"gridlig cutoff\", default=2.5)\n\tparser.add_option(\"-b\", dest=\"bbcutoff\", help=\"gridbb cutoff\", default=2.0)\n\tparser.set_description(main.__doc__)\n\t(options, args) = parser.parse_args()\n\n\tif not options.pdbfile or not options.statefile or not options.outname:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\t# get output filename\n\tcols = options.outname.split(\".\")\n\toutgridlig = cols[0] + \".gridlig\"\n\toutgridbb = cols[0] + \".gridbb\"\n\n\t# get backbone from protein\n\tprotein = Molecule()\n\tprotein.readPDB(options.pdbfile)\n\t\n\tsele = Selection()\n\tsele.makeSelection(\"BB\")\n\tbb = sele.apply_selection(protein).atomList()\n\n\t# read in previous statefile information\n\ttry:\n\t\tSTATEFILE = open(options.statefile)\n\texcept:\n\t\tprint \"unable to open statefile\"\n\t\tsys.exit()\n\n\tgridlig_file = \"\"\n\tgridbb_file = \"\"\n\tfor line in STATEFILE.readlines():\n\t\tcols = line.split()\n\t\tif cols[0] == \"gridlig:\":\n\t\t\tgridlig_file = cols[1]\n\t\tif cols[0] == \"gridbb:\":\n\t\t\tgridbb_file = cols[1]\n\n\tgridlig = grid()\n\tgridbb = grid()\n\n\tgridlig.read(gridlig_file)\n\tgridbb.read(gridbb_file)\n\n\tgridlig.setFullOccupied()\n\tgridbb.setFullOccupied()\n\n\tligcutoff = float(options.ligcutoff)\n\tbbcutoff = float(options.bbcutoff)\n\tgridTrimInclude(gridbb, bb, bbcutoff)\n\tgridTrimExclude(gridlig, bb, ligcutoff)\n\n\tgridlig.write(outgridlig)\n\tgridbb.write(outgridbb)", "def prob_t_a_given_s(self, alignment_info):\n ...", "def convert_roi_align(g, op, block):\n\n rois = g.get_node(op.input(\"ROIs\")[0])\n spatial_scale = op.attr(\"spatial_scale\")\n if op.attr(\"aligned\"):\n offset = _expr.const(0.5, dtype=\"float32\")\n roi_offset = _op.divide(offset, _expr.const(spatial_scale, dtype=\"float32\"))\n rois = _op.subtract(rois, roi_offset)\n num_rois = infer_shape(rois)[0]\n zero_node = _expr.const(0, dtype=\"int32\")\n batch_index = _op.full(zero_node, [num_rois, 1], dtype=\"float32\")\n rois = _op.concatenate([batch_index, rois], axis=1)\n out = _op.vision.roi_align(\n g.get_node(op.input(\"X\")[0]),\n rois,\n pooled_size=[op.attr(\"pooled_height\"), op.attr(\"pooled_width\")],\n spatial_scale=spatial_scale,\n sample_ratio=op.attr(\"sampling_ratio\"),\n mode=\"avg\",\n )\n g.add_node(op.output(\"Out\")[0], out)", "def _load_pascal_annotation(self, index):\n image = index\n im_path = self.image_path_from_index(image)\n im = cv2.imread(im_path)\n width = im.shape[1]\n height = im.shape[0]\n num_objs = 0\n for ix, obj in enumerate(image.objects):\n if image.objects[ix].x > width - 2 or image.objects[ix].y > height - 2:\n continue \n assert(image.objects[ix].width > 0)\n assert(image.objects[ix].height > 0)\n\n num_objs += 1\n\n boxes = np.zeros((num_objs, 4), dtype=np.float32)\n\n partial_entity_class = np.zeros((num_objs, 96), dtype=np.int32)\n partial_relation_class = np.zeros((num_objs, num_objs, 43), dtype=np.int32)\n gt_classes = np.zeros((0, num_objs, 1), dtype=np.int32)\n overlaps = np.zeros((0, num_objs, self.num_classes), dtype=np.int64)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n queries = np.zeros((0, 235), dtype=np.float32)\n # Load object bounding boxes into a data frame.\n index = 0\n \n for ix, obj in enumerate(image.objects):\n if image.objects[ix].x > width - 2 or image.objects[ix].y > height - 2:\n continue\n # Make pixel indexes 0-based\n x1_offset = 0.0#image.objects[ix].width * (-0.1)\n x2_offset = 0.0#image.objects[ix].width * 0.1\n y1_offset = 0.0#image.objects[ix].height * (-0.1)\n y2_offset = 0.0#image.objects[ix].height * 0.1\n boxes[index][0] = max((image.objects[ix].x + x1_offset), 0.0)\n boxes[index][1] = max((image.objects[ix].y + y1_offset), 0.0)\n boxes[index][2] = min((image.objects[ix].x + x2_offset + image.objects[ix].width), width - 1)\n boxes[index][3] = min((image.objects[ix].y + y2_offset + image.objects[ix].height), height - 1)\n seg_areas[index] = (boxes[index][2] - boxes[index][0] + 1.0) * (boxes[index][3] - boxes[index][1] + 1.0)\n index += 1\n assert (boxes[:, 2] > boxes[:, 0]).all()\n assert (boxes[:, 3]\t > boxes[:, 1]).all() \n #load gt classes\n \n i_index = 0\n for i in range(image.objects_labels.shape[0]):\n if image.objects[i].x > width - 2 or image.objects[i].y > height - 2:\n continue\n partial_entity_class[i_index] = image.objects_labels[i]\n \n j_index = 0\n for j in range(image.objects_labels.shape[0]):\n if image.objects[j].x > width - 2 or image.objects[j].y > height - 2:\n continue\n partial_relation_class[i_index, j_index] = image.predicates_labels[i, j]\n j_index += 1\n i_index += 1\n seen = []\n for query_index in range(image.queries_gt.shape[0]):\n query_gt_classes = np.zeros((1, num_objs, 1), dtype=np.int32)\n query_overlaps = np.zeros((1, num_objs, self.num_classes), dtype=np.int64)\n query_overlaps[0, :, 3] = 1\n query_gt_classes[0, :, 0] = 3\n if image.one_hot_relations_gt[query_index][-1] == 1:\n # print \"negative triplet\"\n continue\n\n sub = image.one_hot_relations_gt[query_index][:96]\n obj = image.one_hot_relations_gt[query_index][96:96 * 2]\n rel = image.one_hot_relations_gt[query_index][96 * 2:]\n key = str(np.argmax(sub)) + \"_\" + str(np.argmax(rel)) + \"_\" + str(np.argmax(obj))\n if key in seen:\n continue\n seen.append(key)\n\n found = False\n i_index = 0\n for i in range(image.objects_labels.shape[0]):\n if image.objects[i].x > width - 2 or image.objects[i].y > height - 2:\n continue\n if not np.array_equal(image.objects_labels[i], sub):\n i_index += 1\n continue\n j_index = 0\n for j in range(image.objects_labels.shape[0]):\n if image.objects[j].x > width - 2 or image.objects[j].y > height - 2:\n continue \n\n if not np.array_equal(image.objects_labels[j], obj):\n j_index += 1\n continue\n if np.array_equal(rel, image.predicates_labels[i, j]):\n query_gt_classes[0, i_index, 0] = 1\n query_overlaps[0, i_index, 1] = 1\n query_overlaps[0, i_index, 3] = 0\n query_gt_classes[0, j_index, 0] = 2\n query_overlaps[0, j_index, 2] = 1\n query_overlaps[0, j_index, 3] = 0\n \n #partial_entity_class[i_index] = sub\n #partial_entity_class[j_index] = obj\n #partial_relation_class[i_index, j_index] = rel\n \n found = True\n j_index += 1\n i_index += 1\n if not found:\n continue\n gt_classes = np.concatenate((gt_classes, query_gt_classes), axis=0)\n overlaps = np.concatenate((overlaps, query_overlaps), axis=0)\n queries = np.concatenate((queries, image.one_hot_relations_gt[query_index].reshape([1,-1])), axis=0)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas,\n 'query' : queries,\n 'partial_entity_class' : partial_entity_class,\n 'partial_relation_class' : partial_relation_class,\n 'orig_image': None}", "def __init__(self, reads1, reads2):\n print \"Start Analysis...\"\n self.alignment()\n self.sai_to_sam()\n self.sam_to_bam()\n #self.clean_files()", "def run(self):\n contig_file = self.data.contigfiles[0]\n reads = self.data.readfiles\n\n ## Index contigs using IS algorithm\n prefix = os.path.join(self.outpath, 'bt2')\n cmd_args = [self.build_bin, '-f', contig_file, prefix]\n self.arast_popen(cmd_args, overrides=False)\n\n ## Align reads\n samfile = os.path.join(self.outpath, 'align.sam')\n cmd_args = [self.executable, '-x', prefix, '-S', samfile,\n '-p', self.process_threads_allowed]\n if len(reads) == 2:\n cmd_args += ['-1', reads[0], '-2', reads[1]]\n elif len(reads) == 1:\n cmd_args += ['-U', reads[0]]\n else:\n raise Exception('Bowtie plugin error')\n self.arast_popen(cmd_args, overrides=False)\n\n if not os.path.exists(samfile):\n raise Exception('Unable to complete alignment')\n return {'alignment': samfile}", "def PrepareReceptor(pdb,padding=4,outpath=\"\"):\n print(\"STOP CALLING THIS FUNCTION\")\n exit()\n com = oechem.OEGraphMol()\n ifs = oechem.oemolistream()\n if ifs.open(pdb):\n oechem.OEReadPDBFile(ifs, com)\n ifs.close()\n\n \"\"\"\n Sorry, this requires some explanation. Openeye wasn't recognizing the previously docked ligand, so I tried to find other ways.\n The next blocks of code take our system and split it based on its connected components, for which its REQUIRED that our protein\n only has a single chain. It assumes that the last component is the ligand. It then creates the ligand (lig) and protein (prot)\n as separate molecules. Next, it finds the minimum and maximum 3D coordinates of the current ligand and produces a box around\n it with the specified padding. Finally it uses this box to create a 'receptor' object into which ligands can be docked.\n Only the receptor is returned.\n Openeye's docking shouldn't be this involved, but I couldn't get it to run the typical 'hybrid' docking without error.\n \"\"\"\n oechem.OEDetermineConnectivity(com)\n nparts, connect = oechem.OEDetermineComponents(com)\n if(nparts != 2):\n print(\"ERR in dock_conf::prepareReceptor. PDB doesn't have 2 connected components\")\n exit()\n ## TODO: What is a good way to catch errors?\n # Get apo\n pred = oechem.OEPartPredAtom(connect)\n pred.SelectPart(nparts)\n lig = oechem.OEGraphMol()\n oechem.OESubsetMol(lig, com, pred)\n print(lig)\n \n # Get protein\n pred = oechem.OEPartPredAtom(connect)\n pred.SelectPart(1)\n prot = oechem.OEGraphMol()\n oechem.OESubsetMol(prot, com, pred)\n \n # Get box dimensions by iterating over ligand\n x_min = y_min = z_min = float('inf')\n x_max = y_max = z_max = -float('inf')\n crd = lig.GetCoords()\n print(\"CRD\", crd)\n for atm in crd:\n x,y,z = crd[atm]\n if x < x_min:\n x_min = x\n if y < y_min:\n y_min = y\n if z < z_min:\n z_min = z\n if x > x_max:\n x_max = x\n if y > y_max:\n y_max = y\n if z > z_max:\n z_max = z\n x_min -= padding\n y_min -= padding\n z_min -= padding\n x_max += padding\n y_max += padding\n z_max += padding\n print(x_min,y_min,z_max, y_max)\n # Now prepare the receptor\n receptor = oechem.OEGraphMol()\n box = oedocking.OEBox()\n box.Setup(x_max, y_max, z_max, x_min, y_min, z_min)\n oedocking.OEMakeReceptor(receptor, prot, box)\n \n if not outpath == \"\":\n oedocking.OEWriteReceptorFile(receptor,f'{outpath}/receptor.oeb')\n return receptor", "def __init__(self, seq, peptide):\r\n self.seq = seq # original DNA sequence\r\n self.peptide = peptide # original peptide sequence\r\n self.allPepSeqs = [] # list to hold all possible nuc sequences based on the peptide sequence\r\n self.codonTable = { # holds all amino acids and their associated codons\r\n 'F': ['TTT', 'TTC'], 'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],\r\n 'Y': ['TAT', 'TAC'], 'C': ['TGT', 'TGC'], 'L': ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],\r\n '-': ['TAA', 'TGA', 'TAG'], 'W': ['TGG'], 'P': ['CCT', 'CCC', 'CCA', 'CCG'],\r\n 'H': ['CAT', 'CAC'], 'R': ['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Q': ['CAA', 'CAG'],\r\n 'I': ['ATT', 'ATC', 'ATA'], 'T': ['ACT', 'ACC', 'ACA', 'ACG'], 'N': ['AAT', 'AAC'],\r\n 'K': ['AAA', 'AAG'], 'M': ['ATG'], 'V': ['GTT', 'GTC', 'GTA', 'GTG'],\r\n 'A': ['GCT', 'GCC', 'GCA', 'GCG'], 'D': ['GAT', 'GAC'], 'G': ['GGT', 'GGC', 'GGA', 'GGG'],\r\n 'E': ['GAA', 'GAG']\r\n }", "def process_read(self, ref, read, ref_offset=0):\n\n if read.alignment.mapping_quality < self.config.min_mapq:\n return\n\n ref_pos = read.alignment.position.position - ref_offset\n read_pos = 0\n # Use set(), as some cigar operations might generate duplicated positions,\n # E.g. for insertions, it extends the candidate positions to\n # [ins_pos - ins_len, ins_pos + ins_len] which might overlap with some\n # nearby mismatches.\n positions = set()\n for cigar in read.alignment.cigar:\n # Break if it reached the end of reference sequence.\n if ref_pos >= len(ref):\n break\n if cigar.operation not in utils.CIGAR_OPS:\n raise ValueError('Unexpected CIGAR operation', cigar, read)\n\n if cigar.operation == cigar_pb2.CigarUnit.ALIGNMENT_MATCH:\n positions.update(\n self._process_align_match(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MISMATCH:\n positions.update(\n self._process_seq_mismatch(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.INSERT:\n positions.update(\n self._process_insert(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.CLIP_SOFT:\n positions.update(\n self._process_soft_clip(cigar, ref, read, ref_pos, read_pos))\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.DELETE or\n cigar.operation == cigar_pb2.CigarUnit.SKIP):\n positions.update(\n self._process_delete(cigar, ref, read, ref_pos, read_pos))\n ref_pos += cigar.operation_length\n elif cigar.operation == cigar_pb2.CigarUnit.SEQUENCE_MATCH:\n ref_pos += cigar.operation_length\n read_pos += cigar.operation_length\n elif (cigar.operation == cigar_pb2.CigarUnit.CLIP_HARD or\n cigar.operation == cigar_pb2.CigarUnit.PAD):\n pass\n\n # Yield positions within the range\n for pos in sorted(positions):\n if pos >= 0 and pos < len(ref):\n yield pos", "def readalign(self, opt, fh):\n## print \"entering readalign:\", opt\n edgeInfo = {}\n for p in opt:\n (key, value) = p.split('=')\n edgeInfo[key] = value\n\n s = fh.readline().split()\n## print s;\n if(len(s) == 7 and s[0] == 's'):\n vseq = self._vseq(len(s[6]))\n self.mAlign += vseq\n while len(s) == 7 and s[0] == 's':\n # Add the sequence name to the dictionary,\n # then add a corresponding node to the mapping.\n if s[1] not in self.sequences:\n self.sequences[s[1]] = AnonSequence(int(s[5]), s[1])\n self.mAlign += self.sequences[s[1]]\n\n # PROCESS THE KNOWN INTERVALS\n if(s[4] == '-'):\n ns = self.sequences[s[1]][-int(s[2]):-int(s[2]) - int(s[3])]\n self.sequences[s[1]].seqsplice(reverse_complement(\n s[6].replace('-', '')), ns.start, ns.stop)\n else:\n ns = self.sequences[s[1]][int(s[2]):int(s[2]) + int(s[3])]\n self.sequences[s[1]].seqsplice(s[6].replace('-', ''),\n ns.start, ns.stop)\n\n for inter in refIntervals(s[6]):\n self.mAlign[vseq[inter[0]:inter[1]]][ns[inter[2]:inter[3]]] = \\\n (inter[4])\n self.mAlign[ns[inter[2]:inter[3]]][vseq[inter[0]:inter[1]]] = \\\n (inter[4])\n\n s = fh.readline().split()", "def _makeimap(self):\n self.map_['source'] = 'NAOJ'\n self.map_['provider'] = 'NRO'\n self.map_['instrument'] = 'NORH'\n self.map_['phyobs'] = ''", "def alignMono(entry,prec=1E-4,seed_index=0,supercell=2,\n c_mag=50,dist_from_plane=3):\n\n\n\n\n new_latt,fit_fracs_both= getNewLattice(entry,dim=2,prec=prec,\n seed_index=seed_index,\n supercell=supercell,c_mag=c_mag)\n\n fit_fracs = np.array([np.array(x)+[0,0,.5] for x in fit_fracs_both[0]])\n final_sites = np.dot(new_latt.T,fit_fracs.T).T\n # Create new lattice matricies\n lat1 = np.array([new_latt[0],new_latt[1],new_latt[2]])\n lat2 = np.array([new_latt[1],new_latt[0],new_latt[2]])\n\n # Generate atomic fractions\n new_fracs1 = np.linalg.solve(lat1.T,np.array(final_sites).T).T\n new_fracs2 = np.linalg.solve(lat2.T,np.array(final_sites).T).T\n species = fit_fracs_both[1]\n return([species,new_fracs1,lat1],[species,new_fracs2,lat2])", "def preprocess(fasta_files, qual_files, mapping_file,\r\n barcode_type=\"golay_12\",\r\n min_seq_len=200, max_seq_len=1000, min_qual_score=25, starting_ix=1,\r\n keep_primer=False, max_ambig=0, max_primer_mm=0, trim_seq_len=False,\r\n dir_prefix='.', max_bc_errors=2, max_homopolymer=4,\r\n retain_unassigned_reads=False, keep_barcode=False,\r\n attempt_bc_correction=True, qual_score_window=0,\r\n disable_primer_check=False, reverse_primers='disable',\r\n reverse_primer_mismatches=0,\r\n record_qual_scores=False, discard_bad_windows=False,\r\n median_length_filtering=None, added_demultiplex_field=None,\r\n truncate_ambi_bases=False):\r\n\r\n if max_seq_len < 10:\r\n raise ValueError(\"Max sequence must be >= 10\")\r\n if min_seq_len >= max_seq_len:\r\n raise ValueError(\"Min len cannot be >= max len\")\r\n if min_qual_score < 0:\r\n raise ValueError(\"Min qual score must be > 0\")\r\n if starting_ix < 1:\r\n raise ValueError(\"Starting index must be > 0.\")\r\n if max_ambig < 0:\r\n raise ValueError(\"Max ambig chars must be >= 0.\")\r\n if max_primer_mm < 0:\r\n raise ValueError(\"Max primer mismatches must be >= 0.\")\r\n if reverse_primers not in ['disable', 'truncate_only', 'truncate_remove']:\r\n raise ValueError(\"reverse_primers parameter must be 'disable', \" +\r\n \"truncate_only, or truncate_remove.\")\r\n\r\n create_dir(dir_prefix, fail_on_exist=False)\r\n\r\n# try:\r\n# stat(dir_prefix)\r\n# except OSError:\r\n# mkdir(dir_prefix)\r\n\r\n \"\"\"# Generate primer sequence patterns - changing to mapping file primers.\r\n all_primer_seqs, primer_seq_len = \\\r\n get_primer_seqs(primer_seq_pats.split(',')) \"\"\"\r\n\r\n # Check mapping file and get barcode mapping\r\n map_file = open(mapping_file, 'U')\r\n headers, id_map, valid_map, warnings, errors, \\\r\n primer_seqs_lens, all_primers = check_map(\r\n map_file, disable_primer_check,\r\n barcode_type, added_demultiplex_field)\r\n\r\n if reverse_primers != 'disable':\r\n if 'ReversePrimer' not in headers:\r\n raise ValueError('To enable reverse primer check, there must ' +\r\n 'be a \"ReversePrimer\" column in the mapping file with a reverse ' +\r\n 'primer in each cell.')\r\n rev_primers = get_reverse_primers(id_map)\r\n else:\r\n rev_primers = False\r\n\r\n # *** Generate dictionary of {barcode: DNA(ReversePrimer).rc()}\r\n # First check for ReversePrimer in headers, raise error if not found\r\n # Implement local alignment for primer after barcode is determined.\r\n # Add option to flag seq with error for rev_primer not found\r\n # Check primer hit index, truncate sequence\r\n # unit tests.\r\n\r\n map_file.close()\r\n if errors:\r\n raise ValueError(\"Invalid mapping file. \" +\r\n \"Validate with check_id_map first: %s\" % \"\\n\".join(errors))\r\n\r\n # Find actual length of barcodes in the mapping file, also check for\r\n # variable lengths, in case of added_demultiplex, split on comma.\r\n barcode_length_check =\\\r\n list(set([len(bc.split(',')[0]) for bc in valid_map]))\r\n\r\n # Check barcode type\r\n if barcode_type not in BARCODE_TYPES:\r\n try:\r\n barcode_len, barcode_fun = int(barcode_type), correct_barcode\r\n except ValueError:\r\n raise ValueError(\"Unsupported barcode type: %s\" % barcode_type)\r\n else:\r\n barcode_len, barcode_fun = BARCODE_TYPES[barcode_type]\r\n\r\n # As people often do not specify a barcode that matches the lengths\r\n # of the barcodes used, a check on the actual barcode lengths needs to\r\n # be done, and an exception raised if they are variable length and not\r\n # specified as so.\r\n if barcode_type != \"variable_length\":\r\n # Raise error if variable length barcodes are present but not\r\n # specified\r\n if len(barcode_length_check) != 1:\r\n raise ValueError('Mapping file has variable length ' +\r\n 'barcodes. If this is intended, specifiy variable lengths ' +\r\n 'with the -b variable_length option.')\r\n # Raise error if the specified barcode length doesn't match what\r\n # is present in the mapping file.\r\n if barcode_len != barcode_length_check[0]:\r\n raise ValueError('Barcode length detected in the mapping file, ' +\r\n ' %d does not match specified barcode length, %d. ' %\r\n (barcode_length_check[0], barcode_len) + 'To specify a barcode ' +\r\n 'length use -b golay_12 or -b hamming_8 for 12 and 8 base pair ' +\r\n 'golay or hamming codes respectively, or -b # where # is the ' +\r\n 'length of the barcode used. E.g. -b 4 for 4 base pair barcodes.')\r\n\r\n fasta_files = map(get_infile, fasta_files)\r\n qual_files = map(get_infile, qual_files)\r\n\r\n # Check fasta files valid format, no duplicate ids\r\n # and ids match between fasta and qual files\r\n all_fasta_ids = fasta_ids(fasta_files)\r\n all_qual_ids = fasta_ids(qual_files)\r\n if qual_files and (len(all_fasta_ids) != len(all_qual_ids)):\r\n f_ids = all_fasta_ids.difference(all_qual_ids)\r\n q_ids = all_qual_ids.difference(all_fasta_ids)\r\n raise ValueError(\r\n \"Found %d ids in fasta file not in qual file, %d ids in qual file not in fasta\" %\r\n (len(f_ids), len(q_ids)))\r\n\r\n for f in fasta_files:\r\n f.seek(0)\r\n if qual_files:\r\n for q in qual_files:\r\n q.seek(0)\r\n # Load quality scores\r\n qual_mappings = parse_qual_scores(qual_files)\r\n for q in qual_files:\r\n q.close()\r\n else:\r\n qual_mappings = {}\r\n\r\n # make filters\r\n filters = []\r\n # seq len filter depends on whether we're including the barcode, if\r\n # median_length_filtering turned on, no length filtering.\r\n if not median_length_filtering:\r\n if trim_seq_len:\r\n # This processing occurs before primer testing, will use largest\r\n # primer length to calculate lengths. the dict all_primers has\r\n # keys of each primer with the length of said primer as the value\r\n if disable_primer_check:\r\n primer_seq_len = 0\r\n else:\r\n primer_seq_len = max(all_primers.values())\r\n\r\n if barcode_type == \"variable_length\":\r\n barcode_len = max(barcode_length_check)\r\n\r\n trim = barcode_len + primer_seq_len\r\n filters.append(SeqQualBad(\r\n 'Length outside bounds of %s and %s' % (\r\n min_seq_len,\r\n max_seq_len),\r\n lambda id_, seq, qual:\r\n not (min_seq_len <= len(seq) - trim <= max_seq_len)))\r\n else:\r\n filters.append(SeqQualBad(\r\n 'Length outside bounds of %s and %s' % (\r\n min_seq_len,\r\n max_seq_len),\r\n lambda id_, seq, qual: not (min_seq_len <= len(seq) <= max_seq_len)))\r\n\r\n if not truncate_ambi_bases:\r\n filters.append(SeqQualBad(\r\n 'Num ambiguous bases exceeds limit of %s' % max_ambig,\r\n lambda id_, seq, qual: count_ambig(seq) > max_ambig))\r\n\r\n if qual_mappings:\r\n filters.append(QualMissing)\r\n filters.append(SeqQualBad(\r\n 'Mean qual score below minimum of %s' % min_qual_score,\r\n lambda id_, seq, qual: mean(qual) < min_qual_score))\r\n \"\"\"if qual_score_window:\r\n filters.append(SeqQualBad('Mean window qual score below '+\\\r\n 'minimum of %s' % min_qual_score,\r\n lambda id_, seq, qual: \\\r\n not check_window_qual_scores(qual, qual_score_window, \\\r\n min_qual_score))) \"\"\"\r\n\r\n # Changed this to check entire sequence after barcode-could cause issue\r\n # if barcode-linker-primer have long homopolymers though.\r\n filters.append(SeqQualBad(\r\n 'Max homopolymer run exceeds limit of %s' % max_homopolymer,\r\n lambda id_, seq, qual: seq_exceeds_homopolymers(\r\n seq[barcode_len:], max_homopolymer)))\r\n\r\n # Check seqs and write out\r\n fasta_out = open(dir_prefix + '/' + 'seqs.fna.tmp', 'w+')\r\n if record_qual_scores:\r\n qual_out = open(dir_prefix + '/' + 'seqs_filtered.qual', 'w+')\r\n else:\r\n qual_out = False\r\n\r\n '''log_stats, pre_lens, post_lens = check_seqs(fasta_out, fasta_files,\r\n starting_ix, valid_map, qual_mappings, filters, barcode_len,\r\n primer_seq_len, keep_primer, keep_barcode, barcode_type, max_bc_errors,\r\n retain_unassigned_reads) '''\r\n log_stats, raw_lens, pre_lens, post_lens = check_seqs(fasta_out,\r\n fasta_files, starting_ix, valid_map, qual_mappings, filters,\r\n barcode_len, keep_primer, keep_barcode, barcode_type, max_bc_errors,\r\n retain_unassigned_reads, attempt_bc_correction,\r\n primer_seqs_lens, all_primers, max_primer_mm, disable_primer_check,\r\n reverse_primers, rev_primers, qual_out, qual_score_window,\r\n discard_bad_windows, min_qual_score, min_seq_len,\r\n median_length_filtering, added_demultiplex_field,\r\n reverse_primer_mismatches, truncate_ambi_bases)\r\n\r\n # Write log file\r\n log_file = open(dir_prefix + '/' + \"split_library_log.txt\", 'w+')\r\n log_file.write('\\n'.join(log_stats))\r\n log_file.close()\r\n\r\n # Write sequence distros here\r\n histogram_file = open(dir_prefix + '/' + 'histograms.txt', 'w+')\r\n\r\n histogram_file.write(format_histograms\r\n (*make_histograms(raw_lens, pre_lens, post_lens)))\r\n histogram_file.close()", "def main(argv):\n \n ### gets data from csv, sets variables\n seq1, seq2 = get_seqs('../data/seq.csv')\n \n \n # Assign the longer sequence to s1, and the shorter to s2\n l1, l2 = len(seq1), len(seq2)\n if l1 >= l2:\n s1, s2 = ((l2 - 1) * \".\" + seq1 + (l2 - 1) * \".\"), seq2\n #puts l2-1 \".\"s both sides of l1, allows alignment of all overlap combos\n else:\n s1, s2 = ((l1 - 1) * \".\" + seq2 + (l1 - 1) * \".\"), seq1\n l1, l2 = l2, l1 \n\n # writes alignment(s) with highest score into output file\n my_best_score = -1 #so 0 beats best score\n for i in range(l1 + l2 -1):\n score, matched, shift, end_shift = calculate_score(s1, s2, l1, l2, i)\n #assigns returns from calc_score function to these variables\n if score > my_best_score:\n my_best_score = score\n statement = \"This alignment occurs when the smaller strand (\" + \\\n str(l2) + \"nt in length) attaches from base \" + str(i - l2 + 2) + \\\n \" of the larger strand, with the highest score of \" + str(score) + \\\n \":\\n\"\n #statement explaining the alignment in detail\n best_comparison_highSP = (shift + matched + (l2 - 1) * \".\" + \"\\n\")\n best_comparison_lowSP = (shift + matched + end_shift + \"\\n\")\n best_s2, best_s1 = (shift + s2 + end_shift + \"\\n\"), (s1 + \"\\n\\n\\n\")\n #formats the matching, s1 and s2 lines to line-up neatly\n if i < l1 - 1:\n best_alignment = (str(statement) + str(best_comparison_lowSP) \\\n + str(best_s2) + str(best_s1))\n else:\n best_alignment = (str(statement) + str(best_comparison_highSP) \\\n + str(best_s2) + str(best_s1))\n # uses returned variables to write a statement about the alignment \n # giving its score and startpoint, and assigns 3 lines of alignment \n # (s1, s2 and matching bases) to a variable each for later printing\n f = open('../results/seqs_align.txt', 'w')\n f.write(best_alignment)\n f.close()\n print(\"Done!\")\n return None", "def convertXmlToProtein(self, xml):\n\t\t# XML to dictionary\n\t\tproteinObject = Protein()\n\t\t\n\t\tdictionary = xmltodict.parse(xml)\n\t\troot = dictionary[\"uniprot\"]\n\t\tentry = root[\"entry\"]\n\t\t\n\t\tfor element, value in entry.items():\n\t\t\tif element == \"@accession\":\n\t\t\t\tproteinObject.addAttribute(\"id\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"name\":\n\t\t\t\tproteinObject.addAttribute(\"proteinShortName\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"protein\":\n\t\t\t\tfullname = value[\"recommendedName\"][\"fullName\"]\n\t\t\t\tproteinObject.addAttribute(\"proteinFullName\", \"uniprot\", fullname)\n\t\t\t\t\n\t\t\tif element == \"@created\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"creationDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\t\n\t\t\tif element == \"@modified\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"modifiedDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\n\t\t\tif element == \"comment\":\n\t\t\t\tfor comment in entry[\"comment\"]:\n\t\t\t\t\tif \"text\" in comment:\n\t\t\t\t\t\ttext = comment[\"text\"][\"#text\"] if isinstance(comment[\"text\"], OrderedDict) else comment[\"text\"]\n\t\t\t\t\t\tproteinObject.addAttribute(comment[\"@type\"], \"uniprot\",text)\n\t\t\t\t\t\n\t\t\tif element == \"gene\":\n\t\t\t\tgenes = []\n\t\t\t\tfor gene in value[\"name\"]:\n\t\t\t\t\tif \"#text\" in gene and isinstance(gene, OrderedDict):\n\t\t\t\t\t\tgenes.append(gene[\"#text\"])\n\t\t\t\t\t\n\t\t\t\tproteinObject.addAttribute(\"geneName\", \"uniprot\", genes)\n\t\t\t\t\t\n\t\t\tif element == \"organism\":\n\t\t\t\tif isinstance(value[\"name\"], list):\n\t\t\t\t\torganisms = []\n\t\t\t\t\tfor organism in value[\"name\"]:\n\t\t\t\t\t\torganisms.append(organism[\"#text\"])\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tproteinObject.addAttribute(\"organism\", \"uniprot\", value[\"name\"][\"#text\"])\n\t\t\t\t\n\t\t\t\n\t\t\tif element == \"sequence\":\n\t\t\t\tproteinObject.addAttribute(\"sequence\", \"uniprot\",value[\"#text\"].replace(\"\\n\",\"\"))\n\t\t\t\tproteinObject.addAttribute(\"sequencelength\", \"uniprot\",value[\"@length\"].replace(\"\\n\",\"\"))\n\n\n\t\treturn proteinObject", "def _read_next_alignment(self, stream):", "def main(argv):\r\n\r\n mapperAbbrs = {'C':'cushaw', 'S':'shrimp', 'B':'bfast', 'W':'bwa-mem', 'N':'novoalign'}\r\n\r\n #Dictionary of commands to use for various mappers - configure your mapper commands here\r\n aligner_dict = {\r\n\t'B,CS,S':[\r\n\t\t'bfast fasta2brg -f DDiFasta -A 0',\r\n\t\t'bfast fasta2brg -f DDiFasta -A 1',\r\n\t\t'bfast index -f DDiFasta -m 1111111111111111111111 -w 14 -i 1 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111110100111110011111111111 -w 14 -i 2 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 10111111011001100011111000111111 -w 14 -i 3 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1111111100101111000001100011111011 -w 14 -i 4 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111111110001111110011111111 -w 14 -i 5 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 11111011010011000011000110011111111 -w 14 -i 6 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1111111111110011101111111 -w 14 -i 7 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111011000011111111001111011111 -w 14 -i 8 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1110110001011010011100101111101111 -w 14 -i 9 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111111001000110001011100110001100011111 -w 14 -i 10 -A 1 -n DDiProcs',\r\n\t\t'bfast match -f DDiFasta -A 1 -i 1-10 -k 18 -K 100000 -w 0 -t -n DDiProcs -Q 100000 -l -r DDiFastq1 > DDiBMF',\r\n\t\t'bfast localalign -f DDiFasta -m DDiBMF -A 1 -n DDiProcs -U -q 20 -Q 100000 -t > DDiBAF',\r\n\t\t'rm DDiBMF',\r\n\t\t'bfast postprocess -f DDiFasta -i DDiBAF -o DDiAligned -O 1 -a 3 -z -n DDiProcs -q 20 -Q 100000 -t > DDiSAM',\r\n\t\t'rm DDiBAF'\r\n\t ],\r\n 'C,CS,S':[\r\n 'cushaw3 index DDiFasta -c -p bwtindex',\r\n 'cushaw3 calign -r bwtindex -f DDiFastq1 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'C,NT,S':[\r\n 'cushaw3 index DDiFasta -p bwtindex',\r\n 'cushaw3 align -r bwtindex -f DDiFastq1 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'C,NT,P':[\r\n 'cushaw3 index DDiFasta -p bwtindex',\r\n 'cushaw3 align -r bwtindex -q DDiFastq1 DDiFastq2 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'S,CS,S':[\r\n 'gmapper-cs -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts DDiFastq1 DDiFasta > DDiSAM'\r\n ],\r\n 'S,NT,S':[\r\n 'gmapper-ls -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts DDiFastq1 DDiFasta > DDiSAM'\r\n ],\r\n 'S,NT,P':[\r\n 'gmapper-ls -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts -1 DDiFastq1 -2 DDiFastq2 DDiFasta > DDiSAM'\r\n ],\r\n\t'W,NT,S':[\r\n 'bwa index DDiFasta',\r\n\t 'bwa mem -t DDiProcs BwaMemOpts DDiFasta DDiFastq1 > DDiSAM'\r\n ],\r\n\t'W,NT,P':[\r\n 'bwa index DDiFasta',\r\n\t 'bwa mem -t DDiProcs BwaMemOpts DDiFasta DDiFastq1 DDiFastq2 > DDiSAM'\r\n ],\r\n\t'N,NT,S':[\r\n\t 'novoindex DDiNIX DDiFasta',\r\n 'novoalign -r Random -n 100 -o SAM -d DDiNIX -f DDiFastq1 > DDiSAM'\r\n ],\r\n\t'N,NT,P':[\r\n\t 'novoindex DDiNIX DDiFasta',\r\n 'novoalign -r Random -n 100 -o SAM NovoOpts -d DDiNIX -f DDiFastq1 DDiFastq2 > DDiSAM'\r\n ]\r\n }\r\n\r\n #Arguments that are required\r\n required = ['fastqFiles', 'mappingRefSeqFiles', 'outputDir']\r\n\r\n parser = argparse.ArgumentParser(description='Iteratively calls 3rd party mappers and DDiMap executable')\r\n\r\n #Argument options\r\n parser.add_argument('-q', type=str, metavar='file', nargs='+', help='list of fastq files', dest='fastqFiles')\r\n parser.add_argument('-r', type=str, metavar='file', nargs='+', help='list of files to use for reference sequences', dest='mappingRefSeqFiles')\r\n parser.add_argument('-j', type=str, metavar='file', nargs='+', help='list of files to use for junctions', dest='junctionRefSeqFiles')\r\n parser.add_argument('-o', type=str, metavar='directory', help='output directory', dest='outputDir')\r\n \r\n group = parser.add_mutually_exclusive_group()\r\n group.add_argument('-p', '--paired', action='store_true', help='fastq files have paired ends', dest='pairedEnds')\r\n group.add_argument('-s', '--single', action='store_false', help='fastq files have single ends', dest='pairedEnds')\r\n parser.add_argument('-n', type=int, metavar='cpus', help='number of processors to use', dest='nProcs')\r\n parser.add_argument('-c', type=str, metavar='config_file', help='location of config file', dest='configFile')\r\n parser.add_argument('-v', action='store_true', help='turns on verbosity', dest='verbose')\r\n\r\n parser.add_argument('--aligner_order', type=str, metavar='{'+','.join(mapperAbbrs.keys())+'}', help='mapper sequence as a string. ie CSC', dest='alignerOrder')\r\n parser.add_argument('--first_iter', metavar='n', type=int, help='first iteration', dest='firstIter')\r\n parser.add_argument('--max_iters', metavar='n', type=int, help='maximum iterations', dest='maxIters')\r\n parser.add_argument('--read_length', metavar='n', type=int, help='read length', dest='readLength')\r\n parser.add_argument('--read_type', type=str, help='read type', choices=['CS','NT'], dest='readType')\r\n parser.add_argument('--req_frag_conv', help='require frags to converge as well as SNVs', action='store_true', dest='reqFragConv')\r\n parser.add_argument('--no-req_frag_conv', help='does not require frags to converge as well as SNVs', action='store_false', dest='reqFragConv')\r\n\r\n parser.add_argument('--frag_maker_thresh',type=float, metavar='threshold', help='verified frag maker threshold', dest='fragMakerThresh')\r\n parser.add_argument('--frag_thresh', type=float, metavar='threshold', help='unverified frag maker threshold', dest='fragThresh')\r\n parser.add_argument('--min_absolute_cover', type=int, metavar='n', help='minimum absolute cover', dest='minAbsoluteCover')\r\n parser.add_argument('--snv_thresh', type=float, metavar='threshold', help='SNV threshold', dest='SNVthresh')\r\n parser.add_argument('--snv_type2_thresh', type=float, metavar='threshold', help='SNV type 2 threshold', dest='SNVtype2thresh')\r\n parser.add_argument('--snv_type3_thresh', type=float, metavar='threshold', help='SNV type 3 threshold', dest='SNVtype3thresh')\r\n parser.add_argument('--roa_size', type=int, metavar='size', help='Size to use for region of analysis in DDiMAP', dest='roaSize')\r\n\r\n group = parser.add_mutually_exclusive_group()\r\n group.add_argument('--use_DI', action='store_true', help='use reads mapped with deletion and insertion', dest='useDI')\r\n group.add_argument('--no-use_DI', action='store_false', help='do not use reads mapped with deletion and insertion', dest='useDI')\r\n\r\n parser.add_argument('--cushaw_opts', type=str, metavar=\"'options'\", help='cushaw specific options', dest='cushawOpts')\r\n parser.add_argument('--shrimp_opts', type=str, metavar=\"'options'\", help='shrimp specific options', dest='shrimpOpts')\r\n parser.add_argument('--bwamem_opts', type=str, metavar=\"'options'\", help='bwa-mem specific options', dest='bwaMemOpts')\r\n parser.add_argument('--novo_opts', type=str, metavar=\"'options'\", help='novoalign specific options', dest='novoOpts')\r\n\r\n\r\n #Parse args and check for config file\r\n args = parser.parse_args()\r\n if args.configFile:\r\n configFile = args.configFile\r\n if not path.isfile(configFile):\r\n print 'config file specified, but not found'\r\n exit(1)\r\n else:\r\n configFile = 'DDiMap.cfg'\r\n\r\n #Read in settings from config file\r\n Settings = read_config(configFile)\r\n\r\n # Loop over each section and replace values with those passed in on command line. \r\n # Also create a local variable that matches the keys in the settings dictionary.\r\n\r\n for section in Settings.keys():\r\n for key in Settings[section].keys():\r\n if getattr(args, key):\r\n Settings[section][key] = getattr(args, key)\r\n exec '%s = Settings[section][key]' % key\r\n if key in required and not Settings[section][key]:\r\n print '%s not specified on command line or in config file. Aborting...' % key\r\n print Settings[section][key]\r\n parser.print_help()\r\n exit(1)\r\n if (type(Settings[section][key]) == list):\r\n Settings[section][key] = ', '.join(Settings[section][key])\r\n\r\n if useDI: # reads with CIGARs containing both I and D are processed\r\n kFlag='-k'\r\n else: # reads with CIGARs containing both I and D are not processed\r\n kFlag=''\r\n\r\n if pairedEnds:\r\n pair_str='P'\r\n else:\r\n pair_str='S'\r\n\r\n # do the work - set up for the iteration\r\n aligners = list(alignerOrder)\r\n iterMin = len(aligners)\r\n iterMax = max(maxIters, iterMin); # always do as many iters as are in alignerOrder string\r\n aligners = aligners + list(repeat(aligners[-1], iterMax - iterMin)) # define the aligner ID sequence to be used over the iterations\r\n\r\n\r\n # Make paths absolute\r\n fastqFiles = [path.abspath(x) for x in fastqFiles]\r\n mappingRefSeqFiles = [path.abspath(x) for x in mappingRefSeqFiles]\r\n junctionRefSeqFiles = [path.abspath(x) for x in junctionRefSeqFiles]\r\n outputDir = path.abspath(outputDir) + '/'\r\n\r\n # Make sure the output directory exists\r\n\r\n if not path.isdir(outputDir):\r\n makedirs(outputDir)\r\n\r\n # Write configuration file in outputDir\r\n write_config(outputDir, Settings)\r\n\r\n # INITIAL VALUES OF LOOP CONTROL PARAMETERS\r\n converged = False\r\n prevFragList = [] # this will be replaced by counts of fragments created for each baseline refernce sequence\r\n prevSNVList = [] # this will be replaced by counts of SNV candidates found for each baseline reference sequence\r\n\r\n thisIter = firstIter\r\n\r\n\r\n for RefSeqFile in fastqFiles:\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find fastqFile at ' + RefSeqFile\r\n exit(1)\r\n\r\n # Delete old enhanced fast file if present. It should never be...\r\n\r\n enhancedFastaFile = outputDir + 'refSeqEnhanced.fa'\r\n if path.isfile(enhancedFastaFile): # see if one is already here - need to zap it\r\n remove(enhancedFastaFile) # remove if present because fastawrite appends to existing files\r\n output_handle = open(enhancedFastaFile, 'a')\r\n\r\n # Add reference sequences to file with _Ref tag\r\n RefSeqs=[]\r\n for RefSeqFile in mappingRefSeqFiles:\r\n\tprint 'ref seq file = ' + RefSeqFile\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find RefSeqFile at ' + RefSeqFile\r\n exit(1)\r\n RefSeqs = RefSeqs + list(SeqIO.parse(RefSeqFile, 'fasta'))\r\n if (RefSeqs):\r\n formattedRefSeqs = add_ref_tag(RefSeqs)\r\n SeqIO.write(formattedRefSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n \r\n # Create junctions if they are needed and then add to ref seq file as mapping targets for chimeric reads\r\n RefSeqs=[]\r\n for RefSeqFile in junctionRefSeqFiles:\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find RefSeqFile at ' + RefSeqFile\r\n exit(1)\r\n RefSeqs = RefSeqs + list(SeqIO.parse(RefSeqFile, 'fasta'))\r\n if (RefSeqs):\r\n formattedRefSeqs = add_ref_tag(RefSeqs)\r\n junctionSeqs = make_junctions(formattedRefSeqs,readLength);\r\n SeqIO.write(junctionSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n output_handle.close() \r\n\r\n\r\n # allows restarts\r\n if thisIter > 1: # there is no previous iteration, so start fresh\r\n prevWorkingDir = outputDir + ('Gen%d/' % (thisIter-1))\r\n for i in range(1, thisIter):\r\n prevWorkingDir = '%sGen%d/' % (outputDir, i) \r\n fragFile = prevWorkingDir + 'fasta.fa'\r\n snvFile = prevWorkingDir + 'snv.csv'\r\n ddimap_convergence_test(fragFile, snvFile, prevFragList, prevSNVList, reqFragConv)\r\n\r\n\r\n while not converged and thisIter <= iterMax:\r\n \r\n print '======= Iteration %d of %d ========' % (thisIter, iterMax)\r\n\r\n # creates working dir if not present\r\n thisWorkingDir = outputDir + ('Gen%d/' % thisIter)\r\n if path.isdir(thisWorkingDir):\r\n rmtree(thisWorkingDir)\r\n makedirs(thisWorkingDir)\r\n \r\n # Delete old enhanced fast file if present. It should never be...\r\n enhancedFastaFile = thisWorkingDir + 'refSeqEnhanced.fa'\r\n if path.isfile(enhancedFastaFile): \r\n remove(enhancedFastaFile) \r\n copyfile(outputDir + 'refSeqEnhanced.fa', enhancedFastaFile)\r\n\r\n output_handle = open(enhancedFastaFile, 'a')\r\n \r\n # Append frags from previous iteration if any (these sequences are tagged as fragments when the file is written by DDiMAP)\r\n if (thisIter > 1):\r\n prevFragFile=prevWorkingDir + '/fasta.fa'\r\n if path.isfile(prevFragFile) and path.getsize(prevFragFile) > 0:\r\n fragSeqs=list(SeqIO.parse(prevFragFile, 'fasta'))\r\n SeqIO.write(fragSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n output_handle.close() \r\n\r\n # Setup variables for aligner\r\n thisAligner=aligners[thisIter-1]\r\n thisAligned='DDiMAP_%s' % thisAligner\r\n \r\n if path.isfile(thisWorkingDir + 'mapper.log'):\r\n remove(thisWorkingDir + 'mapper.log')\r\n\r\n if not ','.join([thisAligner,readType,pair_str]) in aligner_dict.keys():\r\n print mapperAbbrs[thisAligner] + ' does not support ' + readType + ' read type with ' + ('paired ends' if pairedEnds else 'non paired ends')\r\n exit(1)\r\n\r\n\r\n # execute commands for aligner\r\n\r\n open(thisWorkingDir + 'mapper.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'mapper.log'])\r\n\r\n # set substitutions for aligner commands\r\n commandsubs={'DDiFastq1':fastqFiles[0], \r\n 'DDiProcs':nProcs, \r\n 'DDiFasta':enhancedFastaFile, \r\n 'DDiBMF':thisAligned + '.bmf', \r\n 'DDiBAF':thisAligned + '.baf', \r\n 'DDiSAM':thisAligned + '.sam',\r\n 'DDiNIX':thisAligned + '.nix', \r\n 'DDiAligned':thisAligned, \r\n 'CushawOpts':cushawOpts, \r\n 'ShrimpOpts':shrimpOpts, \r\n 'BwaMemOpts':bwaMemOpts, \r\n 'NovoOpts':novoOpts}\r\n\r\n if (len(fastqFiles) > 1):\r\n commandsubs['DDiFastq2']=fastqFiles[1]\r\n\r\n for command in aligner_dict[','.join([thisAligner,readType,pair_str])]:\r\n cmdlist=re.split('\\s*',command)\r\n #remove empty arguments and subsitute in values from commandsubs \r\n args=filter(None,[str(commandsubs[x]) if x in commandsubs.keys() else x for x in cmdlist])\r\n args=re.split('\\s*',' '.join(args)) \r\n print ' '.join(args) # output actual command\r\n if 'DDiFastq2' in args: #This hasn't been substituted because one wasn't provided\r\n print mapperAbbrs[thisAligner] + ' expects 2 fastq files for use with ' + readType + ' read type with ' + ('paired ends' if pairedEnds else 'non paired ends')\r\n exit(1)\r\n\r\n # Now we need to detect stdout redirection and do it properly using pOpen\r\n if '>' in args: \r\n i = args.index('>')\r\n outfile = args[i+1]\r\n del args[i:i+2]\r\n else:\r\n outfile = None\r\n \r\n log_file = open(thisWorkingDir + 'mapper.log', 'a')\r\n \r\n if (outfile):\r\n with open(thisWorkingDir + outfile, 'w') as output_file:\r\n a=Popen(args, cwd=thisWorkingDir, stdout=output_file, stderr=log_file)\r\n else:\r\n a=Popen(args, cwd=thisWorkingDir, stderr=log_file, stdout=log_file)\r\n\r\n success=a.wait()\r\n log_file.close()\r\n if not success == 0:\r\n print '*** mapper exited with error', success\r\n print 'See ' + thisWorkingDir + 'mapper.log' + ' for more details'\r\n exit(success)\r\n\r\n if verbose:\r\n b.terminate()\r\n # Perform sam to bam conversion for DDiMap\r\n args=['samtools', 'view', '-b', '-S', '-o', thisAligned + '.bam', thisAligned + '.sam']\r\n print ' '.join(args) \r\n\r\n open(thisWorkingDir + 'samtools.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'samtools.log'])\r\n log_file = open(thisWorkingDir + 'samtools.log', 'w')\r\n a=Popen(args, cwd=thisWorkingDir, stderr=log_file, stdout=log_file)\r\n success=a.wait()\r\n log_file.close()\r\n if verbose:\r\n b.terminate()\r\n if not success == 0:\r\n print '*** samtools exited with error', success\r\n print 'See ' + thisWorkingDir + 'samtools.log' + ' for more details' \r\n exit(success)\r\n # remove the uncompressed sam file\r\n args=['rm', thisAligned + '.sam'];\r\n a=Popen(args, cwd=thisWorkingDir)\r\n\r\n # now run the DDiMAP code\r\n thisAlignedFile = thisWorkingDir + thisAligned + '.bam'\r\n args = (['DDiMAP', kFlag, '-r', roaSize, '-f', enhancedFastaFile, '-b', \r\n thisAlignedFile, '-c', minAbsoluteCover, '-n', fragThresh, '-a', \r\n fragMakerThresh, '-p', SNVthresh, '-s', SNVtype2thresh, '-l', \r\n SNVtype3thresh, '-o', thisWorkingDir])\r\n args = [str(x) for x in args]\r\n print ' '.join(args)\r\n open(thisWorkingDir + 'DDiMap.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'DDiMap.log'])\r\n log_file = open(thisWorkingDir + 'DDiMap.log', 'a')\r\n a = Popen(args, cwd=thisWorkingDir, stdout=log_file, stderr=log_file)\r\n success=a.wait()\r\n if verbose:\r\n b.terminate()\r\n log_file.close()\r\n if not success == 0:\r\n print '*** DDiMap exited with error', success\r\n print 'See ' + thisWorkingDir + 'DDiMap.log' + ' for more details'\r\n exit(success)\r\n \r\n # now check for convergence\r\n \r\n fragFile = thisWorkingDir + 'fasta.fa'\r\n snvFile = thisWorkingDir + 'snv.csv'\r\n \r\n # call to the convergence test matlab function\r\n # result history kept in currFrags/prevFrags and currSNVs/prevSNVs\r\n \r\n if ddimap_convergence_test(fragFile, snvFile, prevFragList, prevSNVList, reqFragConv):\r\n print 'Convergence found. Stopping...'\r\n break\r\n\r\n prevWorkingDir = thisWorkingDir; # all done with the previous, this will be the next iteration previous directory\r\n thisIter = thisIter+1\r\n else:\r\n print 'Failed to converge'\r\n\r\n print '%10s %10s %10s' % ('Iteration', 'nFrags', 'nSNVs')\r\n for i, (frags, snvs) in enumerate(zip(prevFragList, prevSNVList)):\r\n print '%10d %10d %10d' % (i+1, sum(frags), sum(snvs))\r\n\r\n # put final results into outputDir\r\n # make renamed copies of the final iteration result files, naming them using\r\n copyfile(thisWorkingDir+'fasta.fa',outputDir+'convergedFrags.fa')\r\n copyfile(thisWorkingDir+'dictionary.csv',outputDir+'convergedDictionary.csv')\r\n copyfile(thisWorkingDir+'snv.csv',outputDir+'convergedSNVs.csv')\r\n copyfile(thisWorkingDir+'coverage.csv',outputDir+'convergedCoverage.csv')\r\n copyfile(thisWorkingDir+'refSeqEnhanced.fa',outputDir+'convergedEnhancedRefSeqs.fa')", "def map_reads(SRA):\n\n #1. bowtie to rRNA\n print(\"Bowtie alignement on contaminant RNA...\")\n cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam'\n output = subprocess.run(cmd_bowtie, shell=True)\n\n # 2. STAR to ref genome\n print(\"STAR alignement to yeast genome...\")\n cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n # 3. Samtools keep uniquely mapped reads and sort\n print(\"Samtools to keep uniquely mapped reads and sort...\")\n cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam'\n output = subprocess.run(cmd_samtools1, shell=True)\n\n cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam'\n output = subprocess.run(cmd_samtools2, shell=True)\n\n cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam'\n output = subprocess.run(cmd_samtools3, shell=True)", "def question2():\n \n # load sequences and scoring matrix\n score_matrix = read_scoring_matrix(PAM50_URL)\n human_seq = \"HSGVNQLGGVFVNGRPLPDSTRQKIVELAHSGARPCDISRILQVSNGCVSKILGRYYETGSIRPRAIGGSKPRVATPEVVSKIAQYKRECPSIFAWEIRDRLLSEGVCTNDNIPSVSSINRVLRNLASEKQQ\"\n frfly_seq = \"HSGVNQLGGVFVGGRPLPDSTRQKIVELAHSGARPCDISRILQVSNGCVSKILGRYYETGSIRPRAIGGSKPRVATAEVVSKISQYKRECPSIFAWEIRDRLLQENVCTNDNIPSVSSINRVLRNLAAQKEQQ\"\n consensus_pax = read_protein(CONSENSUS_PAX_URL)\n \n # compute human and fruitfly global alignment matrix with consensus pax\n human_align_matrix = student.compute_alignment_matrix(human_seq, consensus_pax, score_matrix, True)\n frfly_align_matrix = student.compute_alignment_matrix(frfly_seq, consensus_pax, score_matrix, True)\n \n # compute human and fruitfly global alignment sequences\n score_human, human_align, consensus_align = student.compute_global_alignment(human_seq, consensus_pax, \n score_matrix, human_align_matrix)\n score_fly, frfly_align, consensus_align_2 = student.compute_global_alignment(frfly_seq, consensus_pax,\n score_matrix, frfly_align_matrix)\n \n # compute percentages match for human and fruitfly\n human_count = 0.0\n for index in range(len(human_align)):\n if human_align[index] == consensus_align[index]:\n human_count += 1\n \n frfly_count = 0.0\n for index in range(len(frfly_align)):\n if frfly_align[index] == consensus_align_2[index]:\n frfly_count += 1\n \n print \"% Human: \" + str(human_count / len(human_align) * 100)\n print \"Hmn: \" + human_align\n print \"PAX: \" + consensus_align\n \n print \"\"\n \n print \"% FrFly: \" + str(frfly_count / len(frfly_align) * 100)\n print \"Fly: \" + frfly_align\n print \"PAX: \" + consensus_align_2", "def build_alignment(self,score,pieces):\n\t \t# build text\n\t\tself.open_seqs()\n\t\ttext1 = text2 = \"\"\n\t\tend1 = end2 = None\n\t\tfor (start1,start2,length,pctId) in pieces:\n\t\t\tif (end1 != None):\n\t\t\t\tif (start1 == end1): # insertion in sequence 2\n\t\t\t\t\ttext1 += self.seq1_gap * (start2-end2)\n\t\t\t\t\ttext2 += self.seq2_file.get(end2,start2-end2)\n\t\t\t\telse: # insertion in sequence 1\n\t\t\t\t\ttext1 += self.seq1_file.get(end1,start1-end1)\n\t\t\t\t\ttext2 += self.seq2_gap * (start1-end1)\n\n\t\t\ttext1 += self.seq1_file.get(start1,length)\n\t\t\ttext2 += self.seq2_file.get(start2,length)\n\t\t\tend1 = start1 + length\n\t\t\tend2 = start2 + length\n\t\t# create alignment\n\t\tstart1 = pieces[0][0]\n\t\tstart2 = pieces[0][1]\n\t\tend1 = pieces[-1][0] + pieces[-1][2]\n\t\tend2 = pieces[-1][1] + pieces[-1][2]\n\t\tsize1 = end1 - start1\n\t\tsize2 = end2 - start2\n\t\ta = Alignment(score=score,species_to_lengths=self.species_to_lengths)\n\t\t#if (self.seq1_strand == \"-\"): start1 = self.seq1_file.length - end1\n\t\ta.add_component(Component(self.seq1_src,start1,size1,self.seq1_strand,text=text1))\n\t\t#if (self.seq2_strand == \"-\"): start2 = self.seq2_file.length - end2\n\t\ta.add_component(Component(self.seq2_src,start2,size2,self.seq2_strand,text=text2))\n\t\treturn a", "def alignScore():\n matrix = mapMatrix(\"BLOSUM62\")\n \n path = \"./data/\"\n for file in os.listdir(path):\n if file.endswith(\".fa\") or file.endswith(\".fasta\"):\n sequences = []\n input_sequences = SeqIO.parse(path + file, \"fasta\", \\\n IUPAC.protein)\n\n for record in input_sequences:\n seq = str(record.seq)\n sequences.append(seq) \n \n SumOfPairs = 0\n for pair in combinations(sequences, 2): \n SumOfPairs += pairwiseScore(pair[0], pair[1], matrix)\n \n print SumOfPairs", "def alnPrank(data):\n\taln = runPrank(data.ORFs, \n\t\t data.o)\n\tdata.aln = aln", "def main():\n\n # Define the names of required input files, and other main configuration variables\n protein_w_underscores = os.getcwd().split('/')[-1]\n protein = protein_w_underscores.replace('_', ' ')\n pdbfile = 'pdb_structure.pdb' # the name of the PDB file\n pdbchain = None # chain in pdbfile -- there is only one chain, so not relevant here\n seqfile = 'protseq.txt' # file containing the protein sequence\n ddgdatafile = 'ddG_data.txt' # file containing the literature-culled ddG values\n ddgdatafile_warning = False # warn if ddgdatafile has conflicting ddG values for a mutation\n alignment_file = \"uniref_alignment-gaps_lt_0.1-identities_gt_0.5.fasta\" # file with aligned sequences\n phylip_path = '/Users/bloom/phylip-3.67/exe/' # path to phylip phylogeny program\n\n # Define the names of files that will be created by the script if they do not already exist\n cupsatfile = 'CUPSAT_ddGs.txt' # contains the ddG values from CUPSAT\n treefile = \"tree.newick\" # phylogenetic tree created by phylip\n phylipsequencefile = \"phylip_sequence_file\" # phylip input sequence file\n phylipdistancefile = \"phylip_distance_file\" # phylip distance matrix\n pipsddgsfile = \"pips_ddgs.txt\" # pips ddgs file\n regularizingpriorpipsddgsfile = 'pips_ddgs_with_regularizing_priors.txt' # pips ddgs file calculated with regularizing priors\n hydrophobicitypriorpipsddgsfile = 'pips_ddgs_with_hydrophobicity_priors.txt' # pips ddgs file calculated with hydrophobicity priors\n\n # Begin execution of the program\n seq = open(seqfile).read().strip() # read in protein sequence\n\n # Get the ddG values from CUPSAT and store in the dictionary cupsat_ddgs. Note that\n # in this and all subsequent ddG dictionaries, the first residue is numbered as 0.\n print \"\\nObtaining CUPSAT ddG values...\"\n sys.stdout.flush()\n if os.path.isfile(cupsatfile): # ddG values already obtained, just read from file\n (datetime, cupsat_ddgs) = pips.ddg_inference.ReadDDGs(cupsatfile)\n print \"Read the stored CUPSAT values from %s from the file %s.\" % (datetime, cupsatfile)\n else: # we need to obtain the ddG values from the CUPSAT webserver\n datetime = time.asctime()\n print \"Beginning to calculate and download CUPSAT ddGs at %s...\" % datetime\n sys.stdout.flush()\n cupsat_ddgs = pips.cupsat.RunCUPSAT(pdbfile, seq, pdbchain)\n pips.ddg_inference.WriteDDGs(cupsat_ddgs, cupsatfile, datetime)\n print \"Completed download of CUPSAT ddG values, stored in the file %s.\" % cupsatfile\n rescaled_cupsat_ddgs = pips.ddg_inference.RescaleDDGs(cupsat_ddgs, 10.0, '10TH_TO_90TH', recenter=5.0, min_max=(-3.0, 13.0)) \n\n # Read the literature-culled ddG data from ddgdatafile and store in the dictionary ddg_data\n print \"\\nReading the literature-culled ddG data from %s...\" % ddgdatafile\n sys.stdout.flush()\n ddgmatch = re.compile(\"^(?P<wt>[A-Y])(?P<r>\\d+)(?P<mut>[A-Y])\\s+(?P<ddg>\\-{0,1}\\d+\\.\\d+)$\")\n ddg_data = {}\n for r in range(len(seq)):\n rdict = {}\n wt = seq[r]\n for aa in pips.ddg_inference.AminoAcids():\n if aa != wt:\n rdict[aa] = []\n ddg_data[r] = (wt, rdict)\n for line in open(ddgdatafile).readlines(): # loop over all lines in ddgdatafile\n if line[0] == '#':\n continue # line is a comment\n m = ddgmatch.search(line.strip()) # match the ddG value\n if not m:\n raise ValueError, \"Cannot read ddG value of %s\" % line\n (wt, r, mut, ddg) = (m.group('wt'), int(m.group('r')), m.group('mut'), float(m.group('ddg')))\n r -= 1 # we decrement r because we are calling the first residue 0\n if seq[r] != wt:\n raise ValueError, \"Wildtype residue does not match protein sequence in %s\" % line\n ddg_data[r][1][mut].append(ddg) \n nddgs = 0\n ddgslist = []\n for (r, (wt, rddgs)) in ddg_data.iteritems():\n for mut in rddgs.iterkeys():\n if not rddgs[mut]:\n rddgs[mut] = None # no ddG value\n else:\n nddgs += 1\n ddg0 = rddgs[mut][0]\n allthesame = True\n for ddgi in rddgs[mut][1 : ]: # see if all ddG values are the same for mutation\n if ddgi != ddg0:\n allthesame = False\n if allthesame: # all of the ddG values are the same, take this value\n rddgs[mut] = ddg0\n ddgslist.append(ddg0)\n else: # ddG values differ, print warning and take the average value\n ddg = pips.stats.Mean(rddgs[mut])\n if ddgdatafile_warning:\n print \"WARNING: Mutation %s%d%s has multiple ddG values of\" % (wt, r + 1, mut),\n for ddgi in rddgs[mut]:\n print \"%.2f\" % ddgi,\n print \"--- taking the average value of %.2f.\" % ddg\n sys.stdout.flush()\n rddgs[mut] = ddg\n ddgslist.append(ddg)\n print \"Read a total of %d different ddG values from %s. The mean value is %.2f, the maximum value is %.2f, and the minimum value is %.2f.\" % (nddgs, ddgdatafile, pips.stats.Mean(ddgslist), max(ddgslist), min(ddgslist))\n\n # Read the aligned sequences (into sequences), give short names for phylip\n sequences = pips.fasta.Read(alignment_file)\n nsequences = len(sequences)\n sequences = [(\"SEQ%d\" % (i + 1), sequences[i][1]) for i in range(nsequences)] # rename \n pips.fasta.Write(sequences, 'renamed_alignment.fasta')\n sequences = pips.align.StripGapsToFirstSequence(sequences) \n print \"\\nThere are %d sequences in the alignment.\" % nsequences\n\n # Construct the phylogenetic tree\n if os.path.isfile(treefile):\n print \"A phylogenetic tree has already been constructed for these sequences, and is being read from %s.\" % treefile\n newick_tree = open(treefile).read()\n else:\n print \"Constructing a phylogenetic tree for these sequences...\"\n sys.stdout.flush()\n pips.phylip.WritePhylipSequenceFile(sequences, phylipsequencefile)\n open(phylipdistancefile, 'w').write(pips.phylip.Protdist(phylipsequencefile, phylip_path))\n newick_tree = pips.phylip.DistanceTree(phylipdistancefile, phylip_path, molecular_clock=True, neighbor_joining=True)\n print \"Finished constructing the phylogenetic tree, writing it to %s.\" % treefile\n sys.stdout.flush()\n open(treefile, 'w').write(newick_tree)\n\n # Perform the pips analysis\n sequences = pips.fasta.UnknownsToGaps(sequences) # replace unknown amino acids with gaps\n random.seed(1) # seed the random number generator to make output predictable\n (datetime, pips_ddgs) = pips.ddg_inference.ReadDDGs(pipsddgsfile)\n\n # Read things in with the new pips\n tree = pips.tree.Tree(newick_tree, tipnames_sequences=sequences) # phylogenetic tree data\n ddgset = pips.ddg_inference.DDGSet(seq, tree, ('TRANSITION_TRANSVERSION_RATIO', 0.5), ('SPECIFIED', pips_ddgs, 0, 0), ('BETA', 3, ('KYTE_DOOLITTLE_HYDROPHOBICITY', 1, 0)), 5.0, underflow=5, runtestcode=False)\n ddgset.MaximizePosterior(nrandomstarts=1, printprogress=True)\n new_pips_ddgs = ddgset.DDGDict()\n pips.ddg_inference.WriteDDGs(new_pips_ddgs, 'new_pips_ddgs.txt', time.asctime())\n\n # Get the consensus ddG\n consensus_ddgs = pips.ddg_inference.ConsensusDDGs(seq, sequences, pseudocounts=1)\n\n sys.exit()\n\n # Perform analysis of correlations, and make pylab plots\n print \"\\nAnalysis of correlations to experimental ddG values...\"\n ddgtypes = ['actual', 'CUPSAT', 'consensus', '\\\\begin{tabular}{c} PIPS with \\\\\\\\ informative prior \\end{tabular}', '\\\\begin{tabular}{c} PIPS with \\\\\\\\ regularizing prior \\end{tabular}', '\\\\begin{tabular}{c} PIPS with \\\\\\\\ hydrophobicity prior \\end{tabular}']\n zippedlists = pips.ddg_inference.ZippedDDGLists(ddg_data, cupsat_ddgs, consensus_ddgs, pips_ddgs, pips_ddgs_regularizing, pips_ddgs_hydrophobicity)\n mutations = zippedlists[0]\n nmutations = len(mutations)\n ddgs = dict([(ddgtypes[i], zippedlists[i + 1]) for i in range(len(ddgtypes))])\n pylab.rc('text', usetex=True)\n nplots = len(ddgtypes) - 1 # number of different plots\n invnplots = 1.0 / nplots\n (xscale, yscale) = (2.8, 2.5) # each plot covers a rectangle of this size, in inches\n bottom = 1.06\n (tmargin, bmargin, lmargin, rmargin) = (0.03, 0, 0.22, 0.03)\n fig = pylab.figure(figsize=(xscale * (1 + lmargin + rmargin), 3 * yscale * (1 + tmargin + bmargin) * bottom))\n figaxes = pylab.axes([0, 0, 1, 1])\n figaxes.axison = False\n iplot = 0\n maxticks = 5\n (xmin, xmax) = (int(round(min(ddgs['actual'])) - 1), int(round(max(ddgs['actual'])) + 1))\n xtick = 1\n while (xmax - xmin) / float(xtick) > maxticks:\n xtick += 1\n nxticks = int(math.ceil((xmax - xmin) / float(xtick)))\n xticks = [x for x in range(xmin, xmin + nxticks * xtick + 1, xtick)]\n xticklocator = matplotlib.ticker.FixedLocator(xticks)\n xtickformatter = matplotlib.ticker.FixedFormatter([\"%d\" % x for x in xticks])\n for ddgtype in ddgtypes[1 : ]:\n if ddgtype == ddgtypes[-1]:\n xlabel = 'experimental $\\Delta\\Delta G$ values'\n else:\n xlabel = ''\n (r, p, npoints) = pips.stats.PearsonCorrelation(ddgs['actual'], ddgs[ddgtype])\n axes = pylab.axes([lmargin, 1.0 - invnplots * (1 + iplot + bmargin) / bottom, 1.0 - rmargin - lmargin, invnplots * (1.0 - tmargin - bmargin) / bottom], xlabel=xlabel, ylabel=ddgtype)\n nolabels = matplotlib.ticker.NullFormatter()\n (ymin, ymax) = (int(round(min(ddgs[ddgtype])) - 1), int(round(max(ddgs[ddgtype])) + 1))\n ytick = 1\n while (ymax - ymin) / float(ytick) > maxticks:\n ytick += 1\n nyticks = int(math.ceil((ymax - ymin) / float(ytick)))\n yticks = [y for y in range(ymin, ymin + nyticks * ytick + 1, ytick)]\n yticklocator = matplotlib.ticker.FixedLocator(yticks)\n ytickformatter = matplotlib.ticker.FixedFormatter([\"%d\" % y for y in yticks])\n axes.xaxis.set_major_locator(xticklocator)\n axes.yaxis.set_major_locator(yticklocator)\n axes.yaxis.set_major_formatter(ytickformatter)\n if ddgtype != ddgtypes[-1]:\n axes.xaxis.set_major_formatter(nolabels)\n else:\n axes.xaxis.set_major_formatter(xtickformatter)\n iplot += 1\n pylab.text(0.64, 0.14, '$R^2 = %.2f$' % r**2, transform=axes.transAxes, ha='left', va='top', size=14)\n pylab.scatter(ddgs['actual'], ddgs[ddgtype], figure=fig, axes=axes)\n pylab.savefig(\"%s_vertical_plot.eps\" % protein_w_underscores)\n\n pylab.show()", "def TrinityAnnotation(Trinityfa,fileBlast6,GIfa,output):\n from Bio import SeqIO\n myTrinity = list(SeqIO.parse(Trinityfa,\"fasta\"))\n mygislist = list(SeqIO.parse(GIfa,\"fasta\"))\n mygis = {}\n for giseq in mygislist:\n mygis[giseq.id.split(\"|\")[1]] = giseq\n mylist = open(fileBlast6,\"r\").readlines()\n fout = open(output,\"w\")\n sqdic = {}\n queryset = set()\n for ele in mylist:\n subject = ele.split()[1] +\"|\" + ele.split()[3]+\" Identity: \" + ele.split()[2]\n query = ele.split()[0]\n if query not in queryset:\n sqdic[query]=subject\n queryset.add(query)\n for ele in myTrinity:\n if ele.id not in queryset:\n fout.write(ele.id +\"\\t\"+\"NA\\n\")\n else:\n fout.write(ele.id +\"\\t\"+mygis[sqdic[ele.id].split(\"|\")[1]].description+\" QL:\"+\\\n str(len(ele.seq)) +\" SL: \"+str(len(mygis[sqdic[ele.id].split(\"|\")[1]].seq)) +\\\n \" ML: \"+ sqdic[ele.id].split(\"|\")[-1] +\"\\n\")\n fout.close()", "def draw_tre_and_aln(aln_region, tre_str, og, target_sp):\n #site, aa, score=ps_site\n\n # get leaves from tree string\n tstax = getleaf(tre_str)\n\n ts = TreeStyle()\n ts.margin_left = 5\n ts.margin_right = 30\n ts.margin_top = 20\n ts.tree_width = 50\n\n aln_region_str = aln_region.format(\"fasta\")\n\n t = PhyloTree(tre_str, alignment=aln_region_str, alg_format=\"fasta\")\n\n # interfact\n def _set_style(t):\n # input an t with alignment, add label to it\n \"\"\"\n info = TextFace(\"{}\\nCodon:{}\\nScore:{}\".format(og,site,score), fsize=8, fgcolor='black', ftype='Arial')\n info.margin_top = 10\n info.margin_right = 20\n info.margin_left = 5\n t.add_face(info, column=0, position=\"branch-bottom\")\n #t.add_face(TextFace(\"Codon:{}\".format(site)),column=0,position=\"branch-bottom\")\n \"\"\"\n ## label the longbranch\n nstyle = NodeStyle()\n # red line\n #nstyle[\"bgcolor\"] = \"DarkSeaGreen\"\n #nstyle[\"bgcolor\"] = \"LightSalmon\"\n nstyle[\"hz_line_type\"] = 0\n #nstyle[\"hz_line_color\"] = \"#ff0000\"\n for tst in tstax:\n tsnode = t.get_leaves_by_name(name=tst)[0]\n tsnode.set_style(nstyle)\n # add #1 to target species\n if target_sp in tsnode.name:\n tsnode.name = tsnode.name + \"_#1\"\n return t\n t = _set_style(t)\n ## add AA alignment\n def _trans_aln(aln_region):\n # input an coding aln\n aa_aln_str = \"\"\n for seq_obj in aln_region:\n dna = Seq(str(seq_obj.seq))\n aa_s = dna.translate(gap=\"-\")\n aa_aln_str += '>{}\\n{}\\n'.format(seq_obj.id, aa_s)\n #print(aa_aln_str)\n return aa_aln_str\n\n aa_aln_region_str = _trans_aln(aln_region)\n t_aa = PhyloTree(tre_str, alignment=aa_aln_region_str, alg_format=\"fasta\")\n t_aa = _set_style(t_aa)\n\n return t, t_aa, ts", "def parse(self, resprotein, ligands, bio=False, model=None, chain='all'):\n\n #ligands can be a list of ligands chosen by the user. Put all ligand atoms into a dict\n #where the keys are the (chain, ID) tuples then sort out after residue info has been loaded into a ResidueList\n\n with open(self.pdb) as fin:\n lines = fin.readlines()\n id_ = 0\n residues = residuenetwork.residues.ResidueList()\n ligand_dict = {key:[] for key in ligands}\n\n for line in lines:\n #load in residues\n if line[0:4] == 'ATOM' and line[13:15] == 'CA' and (line[21], line[22:27].strip()) not in ligands: #check this, CA could appear in preamble...\n name = line[12:16].strip()\n PDBnum = int(line[6:11])\n chain = line[21]\n res_num = line[22:27].strip()\n res_name = line[17:20].strip() + line[26].strip()\n \n try:\n bfactor = float(line[60:66])\n except ValueError:\n bfactor = None\n\n coordinates = np.array([float(line[30:38]),\n float(line[38:46]),\n float(line[46:54])])\n\n residues.append(residuenetwork.residues.Residue(id_, name, PDBnum, res_name, chain, res_num, coordinates, bfactor))\n id_ = id_ + 1\n\n #ligand atoms need to be loaded individually then turned into a single bead\n elif line.startswith('ATOM') or line.startswith('HETATM'):\n if (line[21], line[22:27].strip()) in ligands:\n name = line[12:16].strip()\n PDBnum = int(line[6:11])\n chain = line[21]\n res_num = line[22:27].strip()\n res_name = line[17:20].strip() + line[26].strip()\n\n coordinates = np.array([float(line[30:38]),\n float(line[38:46]),\n float(line[46:54])])\n\n ligand_dict[(chain, res_num)].append((coordinates, res_name))\n \n\n for ligand in ligands:\n chain = ligand[0]\n res_num = ligand[1]\n\n res_name = list(set([entry[1] for entry in ligand_dict[ligand]]))[0]\n coordinates = [entry[0] for entry in ligand_dict[ligand]]\n\n centre_of_mass = np.array(coordinates).sum(axis=0)/len(coordinates)\n\n residues.append(residuenetwork.residues.Residue(id_, name, PDBnum, res_name, chain, res_num, centre_of_mass, None))\n \n id_ = id_ + 1\n\n\n\n\n \n\n\n\n resprotein.residues = residues", "def read_in_file():\n\t# Declare variables\n\treads = []\n\n\t# Get command line arguments\n\targuments = sys.argv\n\targuments_length = len(arguments)\n\n\t# Read file is the first argument\n\tread_file_name = arguments[1]\n\n\t# Process read file \n\tread_file = open(read_file_name, 'r')\n\tfor line in read_file:\n\t\tread_info = line.split()\n\t\tread_string = read_info[2].replace('\\'', '')\n\t\tnew_read = GenerativeRead(read_string, [], read_info[5], read_info[3], None, [], read_info[0], read_info[1], read_info[4]) \n\t\treads.append(new_read)\n\tread_file.close()\n\n\t# Repeat regions file in the second argument\n\trepeat_file_name = arguments[2]\n\n\t# Process repeat file\n\trepeat_file = open(repeat_file_name, 'r')\n\talignments = [[]]\n\talignment_index = -1\n\tprevious_line = ''\n\n\n\tfor line in repeat_file:\n\t\talignment_info = line.split()\n\n\t\t# This consists of a tuple of alignment string, alignment start position and alignment chromosome\n\t\t#new_align = alignment_info[2], alignment_info[4], alignment_info[3]\n\n\t\tnew_align = Alignment(alignment_info[2], None, alignment_info[4], alignment_info[3])\n\n\t\tif previous_line != alignment_info[0]:\n\t\t\t# It is not a repeat\n\t\t\talignment_index = alignment_index + 1\n\t\t\talignments.append([])\n\t\t\tprevious_line = alignment_info[0]\n\n\t\talignments[alignment_index].append(new_align)\n\n\trepeat_file.close()\n\n\t# Associate each read with the other alignments\n\tfor read in reads:\n\t\t# Find the other alignments\n\t\tpos = read.get_position()\n\t\tfound = False\n\t\tfound_index = -1\n\n\t\tfor a_index, alignment_lists in enumerate(alignments):\n\t\t\t# find matching alignments\n\t\t\t# TODO: Don't add alignment already have\n\t\t\t# TODO: Make functional with filter\n\t\t\tfor align in alignment_lists:\n\t\t\t\tif align.get_position() == pos:\n\t\t\t\t\tfound = True\n\t\t\t\t\tfound_index = a_index\n\t\t\t\t\tbreak\n\n\t\t\tif found is True:\n\t\t\t\tbreak\n\n\t\tif found is True:\n\t\t\tfor new_align in alignments[found_index]:\n\t\t\t\tread.add_alignment(new_align)\n\t\t\t\n\n\n\t# SNP files are the remaining ones\n\tsnp_file_names = [arguments[file_id] for file_id in range(3, arguments_length) ]\n\n\t# Process SNP files\n\tfor file_name in snp_file_names:\n\t\tsnp_file = open(file_name, 'r')\n\n\t\tfor line in snp_file:\n\t\t\tsnp_info = line.split()\n\t\t\tsnps = snp_info[3].split('/')\n\t\t\tsnp_pos = int(float(snp_info[2]))\n\n\t\t\t# Ignore alleles that are longer than one base\n\n\t\t\t\n\t\t\tif all(len(x) < 2 for x in snps):\n\n\t\t\t\t# Iterate through reads and determine whether or not it contains this SNP\n\t\t\t\tpos_low = snp_pos - 49\n\t\t\t\n\n\t\t\t\tfor read in reads:\n\t\t\t\t\tpositions = read.get_alignment_positions()\n\n\t\t\t\t\tfor p_index, p in enumerate(positions):\n\t\t\t\t\t\tp = int(float(p))\n\t\t\t\t\t\tif p >= pos_low and p <= snp_pos:\n\t\t\t\t\t\t\t# Get index of snp\n\t\t\t\t\t\t\toffset = snp_pos - p\n\t\t\t\t\t\t\tcalls = [0, 0, 0, 0]\n\t\t\t\t\t\t\tfor snp in snps:\n\t\t\t\t\t\t\t\tcall_index = get_base_num(snp)\n\t\t\t\t\t\t\t\tcalls[call_index] = 1\n\n\t\t\t\t\t\t\t# Add the SNP to the read\n\t\t\t\t\t\t\tread.add_snp(p_index, offset, calls)\n\t\t\t\t\t\t\t\n\t\tsnp_file.close()\n\treturn reads", "def __init__(self,\n seq,\n aligned_index,\n unaligned_index):\n \n self.seq=seq\n self.aligned_index=aligned_index\n self.unaligned_index=unaligned_index\n self.numeric_seq=convert_to_numeric(self.seq)\n self.upstream_regions=[]\n self.downstream_regions=[]\n self.labels=[]\n self.match_count=0\n self.percent_match=0\n self.non_specific_hits=0\n self.non_specific_percent=0\n \n self.std_index = False\n self.f_std_index = None\n self.r_std_index = None", "def make_codon_pos_align(aln):\n\n def func(seq):\n dct = {-1: \"-\",\n 0: \"0\",\n 1: \"1\",\n 2: \"2\"}\n return \"\".join(util.mget(dct, mark_codon_pos(seq)))\n return mapalign(aln, valfunc=func)", "def map_RE(self, index):\n if index is None:\n self.logger.error(\"The bowtie genome index must be specified to \"\n \"map restriction enzyme sites\")\n return None\n self.logger.info(\"Mapping restriction enyzme recognition sites\")\n # Start bowtie as a subprocess\n mapping = subprocess.Popen(\n self.arguments + [index, '-'], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # Send the raw sequence of the DpnII recognition site\n mapping.stdin.write(b'GATC')\n mapping.stdin.close()\n bed = {}\n total = 0\n # Retrieve the alignments from bowtie\n with mapping.stdout as f:\n for line in f:\n line = line.decode('UTF-8').split('\\t')\n chrom, start = line[2], int(line[3])\n stop = start + 4\n if chrom not in bed:\n bed[chrom] = []\n bed[chrom].append((start, stop))\n total += 1\n # Log mapping results\n with mapping.stderr as f:\n for line in f:\n if line[0] == '#':\n continue\n self.logger.debug(line.decode('UTF-8').rstrip('\\n'))\n # Sort chromosome list by name/number\n chroms = numpy.array(list(bed))\n chrints = []\n for i in range(chroms.shape[0]):\n try:\n chrints.append((\n str(int(chroms[i].lstrip('chr'))).rjust(2, '0'),\n chroms[i]))\n except ValueError:\n chrints.append((chroms[i], chroms[i]))\n chrints.sort()\n chroms = []\n for i in range(len(chrints)):\n chroms.append(chrints[i][1])\n self.chroms = numpy.array(chroms)\n self.chr_indices = numpy.zeros(self.chroms.shape[0] + 1,\n dtype=numpy.int32)\n if self.focus is None:\n self.logger.info(\"Defaulting to a fragment-focused analysis\")\n self.focus = 'fragments'\n if self.focus == 'fragments':\n N = total - self.chroms.shape[0]\n else:\n N = total\n # Arrange data into single array with indexed chromosomes\n self.data = numpy.zeros(N, dtype=numpy.dtype([\n ('chr', numpy.int32), ('coords', numpy.int32, (2,)),\n ('treatment', numpy.int32), ('control', numpy.int32),\n ('score', numpy.float64), ('alignable', numpy.bool)]))\n self.data['alignable'].fill(True)\n for i in range(self.chroms.shape[0]):\n chrom = self.chroms[i]\n bed[chrom] = numpy.array(bed[chrom])\n bed[chrom] = bed[chrom][numpy.argsort(bed[chrom][:, 0]), :]\n start = self.chr_indices[i]\n if self.focus == 'fragments':\n self.chr_indices[i + 1] = start + bed[chrom].shape[0] - 1\n stop = self.chr_indices[i + 1]\n self.data['coords'][start:stop, 0] = bed[chrom][:-1, 1]\n self.data['coords'][start:stop, 1] = bed[chrom][1:, 0]\n else:\n self.chr_indices[i + 1] = start + bed[chrom].shape[0]\n stop = self.chr_indices[i + 1]\n self.data['coords'][start:stop, :] = bed[chrom]\n self.data['chr'][start:stop] = i", "def readMappedData(options,phase):\n whole_mapped_data={}\n mapped_data_per_size_per_register={}\n alignment_filename=options.output_directory+\"/\"+options.input_filename+\"_bowtie1.bwt\"\n fhr=open(alignment_filename,\"r\")\n for line in fhr:\n try:\n read_id, strand, chromosome, coordinate, sequence, quality, mapped_times = line.strip().split()\n except ValueError:\n print(line)\n continue\n try:\n coordinate=int(coordinate)\n mapped_times=int(mapped_times)+1\n length=len(sequence)\n except ValueError:\n print(line)\n continue\n if strand==\"-\":\n coordinate+=2\n if chromosome not in whole_mapped_data:\n whole_mapped_data[chromosome]={}\n if coordinate not in whole_mapped_data[chromosome]: \n whole_mapped_data[chromosome][coordinate]=0\n whole_mapped_data[chromosome][coordinate]+=1\n \n if phase!=length:\n continue\n if chromosome not in mapped_data_per_size_per_register:\n mapped_data_per_size_per_register[chromosome]={}\n register=coordinate % length\n if register not in mapped_data_per_size_per_register[chromosome]:\n mapped_data_per_size_per_register[chromosome][register]={}\n if coordinate not in mapped_data_per_size_per_register[chromosome][register]:\n mapped_data_per_size_per_register[chromosome][register][coordinate]=0\n mapped_data_per_size_per_register[chromosome][register][coordinate]+=1\n if mapped_data_per_size_per_register[chromosome][register][coordinate]>2:\n print(\"Trouble with alignments\",length,chromosome,register,coordinate)\n \n return whole_mapped_data,mapped_data_per_size_per_register", "def parse_interactions_imex(interactions_base_df, protein_df, gene_df):\n interactions_base_df.dropna(how='any', subset=['A', 'B'], inplace=True)\n\n custom_interactions = pd.DataFrame()\n\n custom_interactions['a_raw_data'] = interactions_base_df['A']\n custom_interactions['b_raw_data'] = interactions_base_df['B']\n custom_interactions['a_raw_ensembl'] = interactions_base_df['altA']\n custom_interactions['b_raw_ensembl'] = interactions_base_df['altB']\n\n custom_interactions['protein_1'] = interactions_base_df[\n interactions_base_df['A'].apply(lambda value: value.split(':')[0] == 'uniprotkb')]['A'].apply(\n lambda value: value.split(':')[1].split('-')[0])\n\n custom_interactions['protein_2'] = interactions_base_df[\n interactions_base_df['B'].apply(lambda value: value.split(':')[0] == 'uniprotkb')]['B'].apply(\n lambda value: value.split(':')[1].split('-')[0])\n\n custom_interactions['source'] = interactions_base_df['provider']\n\n custom_interactions['raw_score'] = interactions_base_df['confidenceScore'] # .apply(extract_score)\n\n # Extract ensembl for a_raw_ensembl data. Only if value is not null and has ensembl: prefix\n custom_interactions['ensembl_1'] = custom_interactions.dropna(subset=['a_raw_ensembl'])[\n custom_interactions.dropna(subset=['a_raw_ensembl'])['a_raw_ensembl'].apply(\n lambda value: value.split(':')[0] == 'ensembl')][\n 'a_raw_ensembl'].apply(\n lambda value: value.split(':')[1])\n\n custom_interactions['ensembl_2'] = custom_interactions.dropna(subset=['b_raw_ensembl'])[\n custom_interactions.dropna(subset=['b_raw_ensembl'])['b_raw_ensembl'].apply(\n lambda value: value.split(':')[0] == 'ensembl')][\n 'b_raw_ensembl'].apply(\n lambda value: value.split(':')[1])\n\n custom_interactions = pd.merge(custom_interactions, gene_df, left_on='ensembl_1', right_on='ensembl', how='outer',\n indicator='_merge_1')\n\n custom_interactions.drop(['ensembl'], inplace=True, axis=1)\n custom_interactions = pd.merge(custom_interactions, gene_df, left_on='ensembl_2', right_on='ensembl', how='outer',\n indicator='_merge_2', suffixes=['_1', '_2'])\n\n def get_protein(row, protein_number):\n protein_x = row['protein_%s' % protein_number]\n if isinstance(protein_x, float) and math.isnan(protein_x):\n return row['uniprot_%s' % protein_number]\n\n return row['protein_%s' % protein_number]\n\n custom_interactions['protein_1'] = custom_interactions.apply(lambda row: get_protein(row, 1), axis=1)\n custom_interactions['protein_2'] = custom_interactions.apply(lambda row: get_protein(row, 2), axis=1)\n\n custom_interactions.dropna(how='any', subset=['protein_1', 'protein_2'], inplace=True)\n\n custom_interactions = custom_interactions[['protein_1', 'protein_2', 'raw_score', 'source']]\n custom_interactions = _only_uniprots_in_df(protein_df, custom_interactions)\n\n def get_score(row):\n intact_miscore = row['raw_score'].split('intact-miscore:')\n default_score = 0\n default_innatedb_score_2 = 1\n\n row['has_intacted'] = False\n if len(intact_miscore) < 2:\n row['score_1'] = default_score\n row['score_2'] = default_score\n if row['source'] == 'InnateDB-All' or row['source'] == 'InnateDB':\n row['score_2'] = default_innatedb_score_2\n\n else:\n row['score_1'] = float(intact_miscore[1])\n row['score_2'] = float(intact_miscore[1])\n row['has_intacted'] = True\n\n return row\n\n custom_interactions = custom_interactions.apply(get_score, axis=1)\n\n def set_score_duplicates(interaction):\n \"\"\"\n Returns the interaction with max score_1. Instact-miscore predominates over default score values\n :type interaction: pd.Series()\n :rtype: pd.Series()\n \"\"\"\n same_interactions = custom_interactions[(custom_interactions['protein_1'] == interaction['protein_1']) & (\n custom_interactions['protein_2'] == interaction['protein_2'])]\n\n interactions_intacted = same_interactions[same_interactions['has_intacted'] == True]\n if not interactions_intacted.empty:\n index_max = interactions_intacted['score_1'].argmax()\n return interactions_intacted.loc[index_max]\n\n index_max = same_interactions['score_1'].argmax()\n return same_interactions.loc[index_max]\n\n custom_interactions = normalize_interactions(custom_interactions)\n\n custom_interactions_unique = custom_interactions.drop_duplicates(['protein_1', 'protein_2'], keep='first')\n\n custom_interactions_unique = custom_interactions_unique.apply(set_score_duplicates, axis=1)\n\n custom_interactions_unique = custom_interactions_unique[['protein_1', 'protein_2', 'score_1', 'score_2', 'source']]\n\n _validate_sources(custom_interactions_unique['source'].tolist(), interactions_base_df['provider'].tolist())\n\n custom_interactions_unique.rename(index=str, columns={'protein_1': 'uniprot_1', 'protein_2': 'uniprot_2'},\n inplace=True)\n\n return custom_interactions_unique", "def append_primer_hit(primer, \n label,\n hit_index,\n region_slice,\n overall_length,\n unaligned_seq,\n primer_len):\n \n \n primer.match_count+=1\n primer.labels.append(label.split()[0])\n # Fill in 'N' for incomplete sequences\n # Set primer_index to 0 in case slicing left end of sequence\n primer_index=hit_index-region_slice\n if primer_index<0:\n primer_index=0\n unknown_bases=overall_length-len(unaligned_seq[primer_index:hit_index+\n primer_len])\n if unknown_bases>0:\n filler=\"-\"*unknown_bases\n else:\n filler=\"\"\n upstream_region=filler+unaligned_seq[primer_index:hit_index+primer_len]\n primer.upstream_regions.append(upstream_region)\n unknown_bases=overall_length-len(unaligned_seq[hit_index:hit_index+\n primer_len+region_slice])\n if unknown_bases>0:\n filler=\"-\"*unknown_bases\n else:\n filler=\"\"\n downstream_region=unaligned_seq[hit_index:hit_index +\n primer_len+region_slice]+filler\n primer.downstream_regions.append(downstream_region)\n return", "def main (fastq):\n\t\n\t\n\t\n\tfor record in SeqIO.parse(fastq, \"fastq\"):\n\t\t\n\t\tQ = record.letter_annotations[\"phred_quality\"]\n\n\t\tif record.id[-2:]==\"_1\":\n\t\t\n\t\t\tupperseq = SeqRecord( record.seq.reverse_complement(), id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q[::-1]\n\t\t\tprint upperseq.format(\"fastq\"),\n\t\t\n\t\telse:\n\t\t\tupperseq = SeqRecord( record.seq, id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q\t\t\t\n\t\t\tprint upperseq.format(\"fastq\"),", "def proteinTranslation(seq, geneticCode = STANDARD_GENETIC_CODE):\n\n seq = seq.replace('T','U') # Make sure we have RNA sequence\n proteinSeq = []\n \n i = 0\n while i+2 < len(seq):\n \n codon = seq[i:i+3]\n aminoAcid = geneticCode[codon]\n \n if aminoAcid is None: # Found stop codon\n break\n\n proteinSeq.append(aminoAcid)\n i += 3\n\n return proteinSeq", "def bb8_transform(ex_rois, gt_bb8_coordinates, bb8_variance, granularity, im_info):\n assert ex_rois.shape[0] == gt_bb8_coordinates.shape[0], 'inconsistent rois number'\n\n ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0\n ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0\n ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)\n ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)\n\n # fine grained anchor centers\n ex_FGA_ctr_x = np.zeros(shape=(ex_rois.shape[0], 1, granularity[1]))\n ex_FGA_ctr_y = np.zeros(shape=(ex_rois.shape[0], granularity[0], 1))\n for i in range(granularity[1]):\n ex_FGA_ctr_x[:, 0, i] = ex_ctr_x + (i - int((granularity[1] - 1) / 2)) * ex_widths / granularity[1]\n ex_FGA_ctr_x = np.repeat(ex_FGA_ctr_x, repeats=granularity[0], axis=1)\n for i in range(granularity[0]):\n ex_FGA_ctr_y[:, i, 0] = ex_ctr_y + (i - int((granularity[0] - 1) / 2)) * ex_heights / granularity[0]\n ex_FGA_ctr_y = np.repeat(ex_FGA_ctr_y, repeats=granularity[1], axis=2)\n\n\n # gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0\n # gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0\n gt_bb8_coordinates = gt_bb8_coordinates.reshape((gt_bb8_coordinates.shape[0], 8, 2))\n gt_bb8_coordinates_x = gt_bb8_coordinates[:, :, 0] * im_info[1]\n gt_bb8_coordinates_y = gt_bb8_coordinates[:, :, 1] * im_info[0]\n\n distance_x = gt_bb8_coordinates_x[:, :, np.newaxis, np.newaxis] - ex_FGA_ctr_x[:, np.newaxis]\n distance_y = gt_bb8_coordinates_y[:, :, np.newaxis, np.newaxis] - ex_FGA_ctr_y[:, np.newaxis]\n distance = np.sqrt(np.square(distance_x) + np.square(distance_y))\n\n FGA_cls_targets = np.zeros_like(distance)\n FGA_cls_targets = FGA_cls_targets.reshape((FGA_cls_targets.shape[0], FGA_cls_targets.shape[1], -1))\n # min_distances = np.min(distance.reshape((distance.shape[0], distance.shape[1], -1)), axis=2)\n index = np.argmin(distance.reshape((distance.shape[0], distance.shape[1], -1)), axis=2)\n for i in range(FGA_cls_targets.shape[0]):\n for j in range(FGA_cls_targets.shape[1]):\n FGA_cls_targets[i, j, index[i, j]] = 1\n FGA_cls_targets = FGA_cls_targets.reshape(distance.shape)\n\n # shape (N, 8)\n FGA_reg_targets_dx = distance_x[FGA_cls_targets > 0].reshape((distance_x.shape[0], -1))\n FGA_reg_targets_dx = FGA_reg_targets_dx / (ex_widths[:, np.newaxis] / granularity[1] + 1e-14) / bb8_variance[0]\n FGA_reg_targets_dy = distance_y[FGA_cls_targets > 0].reshape((distance_y.shape[0], -1))\n FGA_reg_targets_dy = FGA_reg_targets_dy / (ex_heights[:, np.newaxis] / granularity[0] + 1e-14) / bb8_variance[1]\n\n # shape (N, 16) xyxy\n FGA_reg_targets = np.stack((FGA_reg_targets_dx, FGA_reg_targets_dy), axis=-1).reshape((FGA_cls_targets.shape[0], -1))\n FGA_reg_weights = np.ones_like(FGA_reg_targets)\n\n # for softmax target (N, 8)\n FGA_cls_targets = np.argmax(FGA_cls_targets.reshape(FGA_cls_targets.shape[0], FGA_cls_targets.shape[1], -1), axis=2)\n\n return FGA_cls_targets, FGA_reg_targets, FGA_reg_weights", "def setFromAlignment(self, aligned, pseudo_count = 0.0):\n self.cols = -1\n self.nsites = len(aligned)\n seqs = []\n # Below we create a list of Sequence from the alignment,\n # while doing some error checking, and figure out the number of columns\n for s in aligned:\n # probably a text string, so we make a nameless sequence from it\n if not type(s) is Sequence:\n s=Sequence(s, Motif.getAlphabet(self))\n else:\n # it was a sequence, so we check that the alphabet in\n # this motif will be able to process it\n if not Motif.isAlphabet(self, s):\n raise RuntimeError(\"Motif alphabet is not valid for sequence \" + s.getName())\n if self.cols == -1:\n self.cols = s.getLen()\n elif self.cols != s.getLen():\n raise RuntimeError(\"Sequences in alignment are not of equal length\")\n seqs.append(s)\n # The line below initializes the list of Distrib (one for each column of the alignment)\n self.counts = [Distrib(Motif.getAlphabet(self), pseudo_count) for _ in range(self.cols)]\n # Next, we do the counting, column by column\n for c in range( self.cols ): # iterate through columns\n for s in seqs: # iterate through rows\n # determine the index of the symbol we find at this position (row, column c)\n self.counts[c].count(s.getSite(c))\n # Update the length\n self.len = self.cols", "def stereo_score(alignment):\n #dictionary with properties for each residue\n dic_prop = {'I': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'L': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n 'V': [1, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n 'C': [1, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n 'A': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'G': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n 'M': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'F': [1, 0, 0, 0, 0, 0, 1, 0, 0, 0],\n 'Y': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'W': [1, 1, 0, 0, 0, 0, 1, 0, 0, 0],\n 'H': [1, 1, 0, 0, 0, 0, 1, 1, 0, 1],\n 'K': [1, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'R': [0, 1, 0, 0, 0, 0, 0, 1, 0, 1],\n 'E': [0, 1, 0, 0, 0, 0, 0, 0, 1, 1],\n 'Q': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'D': [0, 1, 1, 0, 0, 0, 0, 0, 1, 1],\n 'N': [0, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'S': [0, 1, 1, 0, 1, 0, 0, 0, 0, 0],\n 'T': [1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n 'P': [0, 0, 1, 1, 0, 0, 0, 0, 0, 0],\n 'B': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'Z': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n 'X': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n '-': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n score_list = []\n for i in range(0, alignment.get_alignment_length()):\n #extract the unique residues in the alignment\n column = ''.join(set(alignment[:, i]))\n stereo_list = []\n #loop through each residue\n for res in range(0, len(column)):\n #replace the residue with list of properties\n residue = column[res]\n #append the properties list to a\n stereo_prop = dic_prop.get(residue)\n stereo_list.append(stereo_prop)\n #number of common properties\n count_stereo = sum(len(set(i)) == 1 for i in zip(*stereo_list))\n #add the number of properties to a list\n score_list.append(count_stereo)\n score_list_final = [float(i*0.1) for i in score_list]\n return score_list_final", "def get_transcript_biotype_map(db_path, table=Annotation.__tablename__, index_col='TranscriptId'):\n df = read_attrs(db_path, table, index_col)\n return dict(list(zip(df.index, df.TranscriptBiotype)))", "def transform_SEQ(self, seq_in, cigar_list, original_pos, read_pos, original_end):\n # Transform the sequence according to the cigar so its end can be computed.\n seq_middle = self.modify_SEQ(seq_in, cigar_list)\n # Compute the read end once the sequence has been processed. \n read_end = read_pos + len(seq_middle) - 1\n # Obtain the sequence for the pileup. \n seq_out = self.process_SEQ(seq_middle, original_pos, read_pos, original_end, read_end)\n return seq_out", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-P\", dest=\"pdblist\", help=\"pdblist\")\n\tparser.add_option(\"-t\", dest=\"transpose\", help=\"transpose\", action=\"store_true\")\n\tparser.add_option(\"-n\", dest=\"number\", help=\"number\", action=\"store_true\")\n\tparser.add_option(\"-r\", dest=\"range\", help=\"range\")\n\tparser.add_option(\"-s\", dest=\"selection\", help=\"selection\")\n\tparser.set_description(main.__doc__)\n\t(options,args) = parser.parse_args()\n\n\tpdbfiles = []\n\tif options.pdblist:\n\t\tpdbfiles = files_from_list(options.pdblist)\n\telif options.pdbfile:\n\t\tpdbfiles.append(options.pdbfile)\t\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\t\n\tif options.selection:\n\t\tsele = Selection()\n\t\tsele.makeSelection(options.selection)\n\n\tseq_min = 1\n\tseq_max = 1\n\tif options.range:\n\t\t(min,max) = string.split(arg, \"-\")\n\t\tseq_min = int(min)\n\t\tseq_max = int(max)\n\n\tprotein = Molecule()\n\tSeq = \"\"\n\tfor pdb in pdbfiles:\n\t\tprotein.readPDB(pdb)\n\t\tif options.selection:\n\t\t\tnewmol = sele.apply_selection(protein)\n\t\t\tSeq = newmol.sequence()\n\t\telse:\n\t\t\tSeq = protein.sequence()\n\n\t\tif options.range:\n\t\t\tSeq = Seq[seq_min:seq_max]\n\n\t\tif options.transpose:\n\t\t\tfor i in range(len(Seq)):\n\t\t\t\tprint Seq[i]\n\t\telse:\n\t\t\tprint Seq\n\n\t\tprotein.clear()", "def _getPerBaseInfo( self, readGroup ):\r\n\r\n if 'AlignmentArray' not in readGroup:\r\n return None\r\n\r\n alignmentArrayDS = readGroup['AlignmentArray']\r\n dataSize = len(alignmentArrayDS)\r\n \r\n # fetch all to memory for speeding up, it \r\n # requires explicitly slicing coordinate to copy the data \r\n alignmentArray = alignmentArrayDS[0:dataSize] \r\n \r\n ### these are done in numpy, fast,.,\r\n binRBases = (alignmentArray & 0xf0) >> 4; \r\n binTBases = (alignmentArray & 0x0f) ;\r\n rSeqAll = \"\".join(Basemap[binRBases])\r\n tSeqAll = \"\".join(Basemap[binTBases])\r\n\r\n return { \"tSeq\":tSeqAll, \"rSeq\":rSeqAll }", "def _interpret_kinase(self, protein: ProteinSystem):\n import pandas as pd\n\n from ..core.sequences import AminoAcidSequence\n from ..utils import LocalFileStorage\n\n klifs_structures = pd.read_csv(LocalFileStorage.klifs_structure_db(self.cache_dir))\n klifs_kinases = pd.read_csv(LocalFileStorage.klifs_kinase_db(self.cache_dir))\n\n # identify kinase of interest and get KLIFS kinase ID and UniProt ID if not provided\n if any([\n hasattr(protein, \"klifs_kinase_id\"),\n hasattr(protein, \"uniprot_id\"),\n hasattr(protein, \"pdb_id\")\n ]):\n # add chain_id and alternate_location attributes if not present\n if not hasattr(protein, \"chain_id\"):\n protein.chain_id = None\n if not hasattr(protein, \"alternate_location\"):\n protein.alternate_location = None\n if protein.alternate_location == \"-\":\n protein.alternate_location = None\n # if pdb id is given, query KLIFS by pdb\n if hasattr(protein, \"pdb_id\"):\n structures = klifs_structures[\n klifs_structures[\"structure.pdb_id\"] == protein.pdb_id\n ]\n if protein.alternate_location:\n structures = structures[\n structures[\"structure.alternate_model\"] == protein.alternate_location\n ]\n if protein.chain_id:\n structures = structures[\n structures[\"structure.chain\"] == protein.chain_id\n ]\n protein.klifs_kinase_id = structures[\"kinase.klifs_id\"].iloc[0]\n # if KLIFS kinase ID is not given, query by UniProt ID\n if not hasattr(protein, \"klifs_kinase_id\"):\n logging.debug(\"Converting UniProt ID to KLIFS kinase ID ...\")\n protein.klifs_kinase_id = klifs_kinases[\n klifs_kinases[\"kinase.uniprot\"] == protein.uniprot_id\n ][\"kinase.klifs_id\"].iloc[0]\n # if UniProt ID is not given, query by KLIFS kinase ID\n if not hasattr(protein, \"uniprot_id\"):\n logging.debug(\"Converting KLIFS kinase ID to UniProt ID ...\")\n protein.uniprot_id = klifs_kinases[\n klifs_kinases[\"kinase.klifs_id\"] == protein.klifs_kinase_id\n ][\"kinase.uniprot\"].iloc[0]\n else:\n text = (\n f\"{self.__class__.__name__} requires a system with a protein having a \"\n \"'klifs_kinase_id', 'uniprot_id' or 'pdb_id' attribute.\")\n logging.debug(\"Exception: \" + text)\n raise NotImplementedError(text)\n\n # identify DFG conformation of interest\n if not hasattr(protein, \"dfg\"):\n protein.dfg = None\n else:\n if protein.dfg not in [\"in\", \"out\", \"out-like\"]:\n text = (\n f\"{self.__class__.__name__} requires a system with a protein having either no \"\n \"'dfg' attribute or a 'dfg' attribute with a KLIFS specific DFG conformation \"\n \"('in', 'out' or 'out-like').\"\n )\n logging.debug(\"Exception: \" + text)\n raise NotImplementedError(text)\n\n # identify aC helix conformation of interest\n if not hasattr(protein, \"ac_helix\"):\n protein.ac_helix = None\n else:\n if protein.ac_helix not in [\"in\", \"out\", \"out-like\"]:\n text = (\n f\"{self.__class__.__name__} requires a system with a protein having either no \"\n \"'ac_helix' attribute or an 'ac_helix' attribute with a KLIFS specific alpha C\"\n \" helix conformation ('in', 'out' or 'out-like').\"\n )\n logging.debug(\"Exception: \" + text)\n raise NotImplementedError(text)\n\n # identify amino acid sequence of interest\n if not hasattr(protein, \"sequence\"):\n logging.debug(\n f\"Retrieving kinase sequence details for UniProt entry {protein.uniprot_id} ...\")\n protein.sequence = AminoAcidSequence.from_uniprot(protein.uniprot_id)\n\n return # TODO: What to do if kinase not in KLIFS?", "def main():\n filename = sys.argv[1]\n aligner = Aligner(cv2.imread(filename))\n aligned_image = aligner.get_aligned_image()\n\n return aligned_image", "def enhancer2gene(\n self,\n peak_pr,\n up=100_000,\n down=100_000,\n alpha=1e4,\n promoter=2000,\n full_weight_region=5000,\n ):\n genes = region_gene_overlap(peak_pr, self.gene_bed)\n\n # Get the distance from center of enhancer to TSS\n # Correct for extension\n genes[\"dist\"] = (\n (genes[\"Start_b\"] + genes[\"End_b\"]) / 2 - genes[\"Start\"]\n ).astype(int)\n genes.loc[genes[\"Strand\"] == \"+\", \"dist\"] -= up\n genes.loc[genes[\"Strand\"] == \"-\", \"dist\"] -= down\n genes[\"dist\"] = np.abs(genes[\"dist\"])\n\n # Create region in chr:start:end format\n genes[\"loc\"] = (\n genes[\"Chromosome\"].astype(str)\n + \":\"\n + genes[\"Start_b\"].astype(str)\n + \"-\"\n + genes[\"End_b\"].astype(str)\n )\n\n # Keep the gene-enhancer combination with the smallest distance\n genes = genes.sort_values(\"dist\").drop_duplicates(\n subset=[\"loc\", \"Name\"], keep=\"first\"\n )\n\n # Return the right stuff\n genes = genes.set_index(\"loc\")[[\"Name\", \"dist\"]].rename(\n columns={\"Name\": \"gene\"}\n )\n\n # Get distance-based wight\n weight = self.distance_weight(\n include_promoter=self.include_promoter,\n include_enhancer=self.include_enhancer,\n alpha=alpha,\n promoter_region=promoter,\n full_weight_region=full_weight_region,\n ).set_index(\"dist\")\n genes = genes.join(weight, on=\"dist\")\n\n return genes", "def subalign(aln, cols):\n\n return mapalign(aln, valfunc=lambda x: \"\".join(util.mget(x, cols)))", "def motif_from_align(align):\n width = len(align[0])\n nucs = {\"A\":0,\"C\":1,\"G\":2,\"T\":3}\n pfm = [[0 for _ in range(4)] for _ in range(width)]\n for row in align:\n for i in range(len(row)):\n pfm[i][nucs[row[i]]] += 1\n m = Motif(pfm)\n m.align = align[:]\n return m", "def writeProteinRelations( self ):\n\n self.logger.info( 'writeProteinRelations: START' )\n\n self.logger.info( 'writeProteinRelations: keggreader.getAllProteinMaps() : START' )\n\n # Get all protein maps relations.\n # Notice that proteins without any map wont exist in the result below. That's important to save memory (no other reason at all).\n proteinMaps = self.reader.getAllProteinMaps()\n\n self.logger.info( 'writeProteinRelations: keggreader.getAllProteinMaps() : DONE' )\n\n\n self.logger.info( 'writeProteinRelations: proteinEcFile is: proteinEcsInsert.psql' )\n\n # Open protein_ecs insert file.\n proteinEcFile = self.openInsertFile( 'proteinEcsInsert.psql' )\n\n\n self.logger.info( 'writeProteinRelations: proteinMapFile is: proteinMapsInsert.psql' )\n\n # Open protein_maps insert file.\n proteinMapFile = self.openInsertFile( 'proteinMapsInsert.psql' )\n\n\n self.logger.info( 'writeProteinRelations: iterating through all the proteins: START' )\n\n # Keep a counter to know how long it's taking.\n counter = 0\n\n # Now we have to write protein_ecs table.\n # That means get the proteins ids and its related ecs ids.\n # Those ids comes from dictionary variables generated by the 'write' methods for each table.\n # So, we run through proteins ids and get ec from KeggReader 'getEcNumberByGene' method and make the correct relation.\n for protein,relationalDatabaseId in self.proteinsInserted.iteritems():\n\n # Only log how long it's taking to run.\n # By thousands.\n counter += 1\n if ( counter % 100000 ) == 0:\n self.logger.info( 'writeProteinRelations: step: ' + str(counter) + '.')\n # END log step.\n\n self.logger.info( 'writeProteinRelations: keggreader.getEcNumbersByGene(): START' )\n\n # We get all EC numbers related to the specific protein.\n ecs = self.reader.getEcNumberByGene( protein ) \n\n self.logger.info( 'writeProteinRelations: keggreader.getEcNumbersByGene(): DONE' )\n\n # If there's EC number (almost of proteins doesn't has a related EC number - which means they're no enzymes).\n if ecs:\n\n self.logger.info( 'writeProteinRelations: FOUND EC Numbers for the protein: ' + str(protein) + '.' )\n self.logger.info( 'writeProteinRelations: ' + str(protein) + ' : Total of EC Numbers FOUND: ' + str(len(ecs)) + '.' )\n\n # Iterate through the ECs found for that specific protein.\n for ec in ecs:\n # Get the relational database EC id for that EC number being iterated \n ecId = self.importerEc.ecsInserted[ str(ec) ] \n proteinId = relationalDatabaseId\n\n # Actual write protein_ecs file.\n #self.writeProteinEcsFile( proteinEcFile, proteinId, ecId )\n self.writeFile( proteinEcFile, 'protein_ecs', [ str(proteinId), str(ecId) ] )\n else:\n self.logger.info( 'writeProteinRelations: NOT FOUND EC Numbers for the protein: ' + str(protein) + '.' )\n\n\n # Maps to specific protein.\n if protein in proteinMaps:\n maps = proteinMaps[ protein ]\n\n if maps:\n self.logger.info( 'writeProteinRelations: FOUND MAP Numbers for the protein: ' + str(protein) + '.' )\n self.logger.info( 'writeProteinRelations: ' + str(protein) + ' : Total of MAP Numbers FOUND: ' + str(len(maps)) + '.' )\n\n for proteinMap in maps:\n\n # Some maps aren't metabolic pathways but simple pathways for other molecular mechanisms.\n # And we're interested only in metabolic maps at this moment.\n if proteinMap in self.importerPathway.pathwayMapsInserted:\n mapId = self.importerPathway.pathwayMapsInserted[ proteinMap ]\n proteinId = relationalDatabaseId\n\n #self.writeProteinMapsFile( proteinMapFile, proteinId, mapId )\n self.writeFile( proteinMapFile, 'protein_maps', [ str(proteinId), str(mapId) ] )\n else:\n self.logger.info( 'writeProteinRelations: NOT FOUND MAP Numbers for the protein: ' + str(protein) + '.' )\n\n\n self.logger.info( 'writeProteinRelations: iterating through all the proteins: DONE' )\n self.logger.info( 'writeProteinRelations: DONE' )", "def main():\n\n args = get_args()\n seq = args.seq.upper()\n codon_to_aa = {\n 'AAA': 'K',\n 'AAC': 'N',\n 'AAG': 'K',\n 'AAU': 'N',\n 'ACA': 'T',\n 'ACC': 'T',\n 'ACG': 'T',\n 'ACU': 'T',\n 'AGA': 'R',\n 'AGC': 'S',\n 'AGG': 'R',\n 'AGU': 'S',\n 'AUA': 'I',\n 'AUC': 'I',\n 'AUG': 'M',\n 'AUU': 'I',\n 'CAA': 'Q',\n 'CAC': 'H',\n 'CAG': 'Q',\n 'CAU': 'H',\n 'CCA': 'P',\n 'CCC': 'P',\n 'CCG': 'P',\n 'CCU': 'P',\n 'CGA': 'R',\n 'CGC': 'R',\n 'CGG': 'R',\n 'CGU': 'R',\n 'CUA': 'L',\n 'CUC': 'L',\n 'CUG': 'L',\n 'CUU': 'L',\n 'GAA': 'E',\n 'GAC': 'D',\n 'GAG': 'E',\n 'GAU': 'D',\n 'GCA': 'A',\n 'GCC': 'A',\n 'GCG': 'A',\n 'GCU': 'A',\n 'GGA': 'G',\n 'GGC': 'G',\n 'GGG': 'G',\n 'GGU': 'G',\n 'GUA': 'V',\n 'GUC': 'V',\n 'GUG': 'V',\n 'GUU': 'V',\n 'UAA': 'Stop',\n 'UAC': 'Y',\n 'UAG': 'Stop',\n 'UAU': 'Y',\n 'UCA': 'S',\n 'UCC': 'S',\n 'UCG': 'S',\n 'UCU': 'S',\n 'UGA': 'Stop',\n 'UGC': 'C',\n 'UGG': 'W',\n 'UGU': 'C',\n 'UUA': 'L',\n 'UUC': 'F',\n 'UUG': 'L',\n 'UUU': 'F',\n }\n\n k = 3\n\n # 1: for loop\n # protein = ''\n # for codon in [seq[i:i + k] for i in range(0, len(seq), k)]:\n # aa = codon_to_aa.get(codon, '-')\n # if aa == 'Stop':\n # break\n # protein += aa\n\n # 2: list comprehension, slice to remove Stop\n # codons = [seq[i:i + k] for i in range(0, len(seq), k)]\n # aa = [codon_to_aa.get(codon, '-') for codon in codons]\n # if 'Stop' in aa:\n # aa = aa[:aa.index('Stop')]\n # print(''.join(aa))\n\n # 3: L.C. -> map(), slice -> takewhile\n # codons = map(lambda i: seq[i:i + k], range(0, len(seq), k))\n # aa = map(lambda codon: codon_to_aa.get(codon, '-'), codons)\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 4: combine map()\n # aa = map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k)))\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 5: combine all\n # print(''.join(\n # takewhile(\n # lambda c: c != 'Stop',\n # map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k))))))\n\n # 6: Seq\n print(str(Seq(args.seq).translate()).replace('*', ''))", "def preprocess(self):", "def map_to_mgi(adata, copy = False):\n from pybiomart import Server\n # connest to the biomart server\n server = Server(host='http://www.ensembl.org')\n\n # retrieve the mouse data set we need\n dataset = (server.marts['ENSEMBL_MART_ENSEMBL']\n .datasets['mmusculus_gene_ensembl'])\n\n # recieve the mapping from ensembl to MGI\n conv_table = dataset.query(attributes=['ensembl_gene_id', 'external_gene_name'])\n\n # we first drop duplicates in the first column\n conv_table = conv_table.drop_duplicates(conv_table.columns.values[0])\n\n # convert the gene names from the adata object to a data frame\n adata_table = pd.DataFrame(adata.var_names)\n\n # give the first column a name\n adata_table.columns = ['Gene stable ID']\n\n # change the gene table so that the ensembl names are now the index\n conv_table = conv_table.set_index('Gene stable ID')\n\n # project the names from the conversion table on the corr. names in the\n # adata var names table\n mapping = adata_table.join(conv_table, on='Gene stable ID')\n\n # how many could we not map\n not_found_mgi = sum(pd.isnull(mapping).iloc[:,1])\n\n # how many ensg symbols did we map several times?\n rep_ensg = len(mapping.iloc[:, 0]) - len(set(mapping.iloc[:, 0]))\n\n # how many mgi symbols did we map several times?\n rep_mgi = len(mapping.iloc[:, 1]) - len(set(mapping.iloc[:, 1]))\n\n # print this information\n print('Genes where no MGI annotations where found: {}\\nENSG repetition: {}\\nMGI repetition: {}'.\\\n format(not_found_mgi, rep_ensg, rep_mgi))\n\n # fill nans in mgi column with corresponding ensembl annotations\n mapping['Gene name'].fillna(mapping['Gene stable ID'], inplace = True)\n\n # add the new gene names to the adata object\n adata.var['mgi_symbols'] = mapping['Gene name'].tolist()", "def main():\r\n\r\n # contents = ['ATGGCCATGGCCCCCAGAACTGAGATCAATAGTACCCGTATTAACGGGTGA', 'MA'] # sample input\r\n contents = []\r\n for line in sys.stdin:\r\n contents.append(line.strip())\r\n myPeptide = GenomeEncoding(contents[0], contents[1])\r\n myPeptide.getCodonSeqs()\r\n myPeptide.getRevCodonSeqs()\r\n myPeptide.printEncodePep()", "def call_bt2_align(read_seqs,\n index_fp,\n output_fp,\n bowtie_args):\n subprocess.call([\" \".join([\"bowtie2\", bowtie_args, index_fp, \"-c\",\n read_seqs, \"-S\", output_fp])],\n shell=True)", "def prepocessImg(self, method, size, img, bb,offset=0.3,gray=True,\n boundry=False, outputDebug=False,outputprefix=None):\n if method == 'crop':\n crop_img = crop_only(img,bb.left(),bb.top(),bb.width(),bb.height(),offset,size)\n elif method == 'affine':\n img = Image.fromarray(img)\n if self.predictor == None:\n raise Exception(\"Error: method affine should initial with an facepredictor.\")\n alignPoints = self.align(img, bb)\n (xs, ys) = zip(*alignPoints)\n (l, r, t, b) = (min(xs), max(xs), min(ys), max(ys))\n w,h = img.size\n if boundry and (l < 0 or r > w or t < 0 or b > h):\n raise AliError('face out of boundry')\n \n left_eye_l = alignPoints[36]\n left_eye_r = alignPoints[39]\n left_eye = (np.array(left_eye_l)+np.array(left_eye_r))/2\n right_eye_l = alignPoints[42]\n right_eye_r = alignPoints[45]\n right_eye = (np.array(right_eye_l)+np.array(right_eye_r))/2\n crop_img = crop_simi(img,left_eye,right_eye,(offset,offset),(size,size))\n im_buffer = cStringIO.StringIO()\n crop_img.save(im_buffer, format=\"JPEG\")\n im_str = base64.b64encode(im_buffer.getvalue())\n else:\n raise Exception(\"undefined crop method\")\n if gray:\n crop_img = crop_img.convert('L')\n if outputDebug:\n dirname = './aligndebug'\n if not os.path.exists(os.path.abspath(dirname)):\n os.mkdir(dirname)\n drawbox(img,(bb.left(),bb.right(),bb.top(),bb.bottom()))\n if method == 'affine':\n drawpoint(img,left_eye)\n drawpoint(img,right_eye)\n img.save('{}/{}_annotated.jpg'.format(dirname,outputprefix))\n crop_img.save('{}/{}_crop.jpg'.format(dirname,outputprefix))\n crop_img = np.array(crop_img,dtype=np.float32) #look carefully on data format\n if crop_img.ndim == 3: #data shape for caffe\n return crop_img,score\n elif crop_img.ndim == 2:\n bbox = [bb.left(),bb.top(),bb.right(),bb.bottom()]\n return crop_img[:,:,np.newaxis], bbox\n else:\n raise Exception(\"wrong dimension\")", "def pdb2pka_sugelm(self):\n import Protool\n P=Protool.structureIO()\n P.readpdb(self.pdbfile)\n P.RemoveALT()\n #import Protool.mutate\n #MUT=Protool.mutate.Mutate(P)\n #\n # Construct arrays\n #\n import pKD_dict\n self.data=pKD_dict.pKD_dict()\n self.atom_data=pKD_dict.pKD_dict()\n #\n # Create dir for mutant PDB files\n #\n import os\n mutdir=os.path.join(self.topdir,self.pdbfile+'.pdbs')\n if not os.path.isdir(mutdir):\n os.mkdir(mutdir)\n #\n # Loop over all residues\n #\n residues=P.residues.keys()\n residues.sort()\n for residue in residues:\n orgres=P.resname(residue)\n print 'Calculating for %s %s' %(residue,P.resname(residue))\n #\n # If neutral mutate to Asp, Glu, Lys, Arg, His\n #\n targets=[]\n for res in ['ARG','LYS','HIS','ASP','GLU']:\n if P.resname(residue)!=res:\n targets.append(res)\n #if orgres=='GLU':\n # targets.append('GLN')\n #elif orgres=='ASP':\n # targets.append('ASN')\n #elif orgres=='HIS':\n # targets.append('PHE')\n #elif orgres=='ARG' or P.resname(residue)=='LYS':\n # targets.append('MET')\n #\n # Target identified. Now model each\n #\n for target in targets:\n import pKD_tools\n resid=pKD_tools.get_resid_from_res(residue)\n orgres=P.resname(residue)\n filename=os.path.join(mutdir,'%s:%s:%s.pdb' %(residue,orgres,target))\n mutation='%s:%s:%s' %(residue,orgres,target)\n if not os.path.isfile(filename):\n import Design_pKa_help\n Design_pKa_help.make_mutation(self.pdbfile,mutation)\n NP=Protool.structureIO()\n NP.readpdb(filename)\n NP.writepdb(filename,TER=None)\n #\n # Calculate the interaction energies\n #\n protein,routines,forcefield,apbs_setup,lig_titgrps = pdb2pka.pre_init(pdbfilename=filename,\n ff='parse',\n ligand=None,\n verbose=1)\n mypkaRoutines = pdb2pka.pKaRoutines(protein, routines, forcefield,apbs_setup)\n #\n # Find our group\n #\n sp=residue.split(':')\n chainid=sp[0]\n resnum=int(sp[1])\n mypkaRoutines.findTitratableGroups()\n this_pKa=None\n for pKa in mypkaRoutines.pKas:\n print pKa.residue.resSeq,resnum\n print pKa.residue.chainID,chainid\n print pKa.residue.name,target\n print pKa.pKaGroup.name,target\n print '--------------'\n print 'ChainID',pKa.residue.chainID\n if pKa.residue.resSeq==resnum and pKa.residue.chainID==chainid and pKa.residue.name==target and pKa.pKaGroup.name==target:\n #print 'Found group',pKa.residue.resSeq,pKa.pKaGroup.name\n this_pKa=pKa\n break\n if not this_pKa:\n raise Exception,'Could not find inserted titratable group'\n mypkaRoutines.get_interaction_energies_setup(this_pKa,mode='pKD')\n matrix=mypkaRoutines.matrix\n #\n # Dig the interaction energies out of the pdb2pka array\n #\n for titration1 in matrix[this_pKa].keys():\n for state1 in matrix[this_pKa][titration1].keys():\n grp_sub=matrix[this_pKa][titration1][state1]\n if mypkaRoutines.is_charged(this_pKa,titration1,state1):\n for pKa2 in grp_sub.keys(): \n import string\n chainID2=pKa.residue.chainID\n resid2='%s:%s' %(chainID2,string.zfill(pKa2.residue.resSeq,4))\n for titration2 in grp_sub[pKa2].keys():\n for state2 in grp_sub[pKa2][titration2].keys():\n if mypkaRoutines.is_charged(pKa2,titration2,state2):\n #\n # Both states are charged, so now we can pull the\n # interaction energies out\n #\n if not self.data.has_key(mutation):\n self.data[mutation]={}\n self.data[mutation][resid2]=grp_sub[pKa2][titration2][state2]\n #\n # Get the potentials at all atoms too\n #\n all_pots=mypkaRoutines.all_potentials[this_pKa][titration1][state1]\n sub_all_pots=all_pots[pKa2][titration2][state2]\n for atom in sub_all_pots.keys():\n resid=mutation\n import pKD_tools\n resid2=pKD_tools.get_resid_from_res(atom)\n atomname=atom.split(':')[-1] #atom.name\n if atomname[0]=='H' or atomname in ['N','C','O']:\n continue # Skip all H atoms and all non-CA backbone atoms to save memory\n if not self.atom_data.has_key(resid):\n self.atom_data[resid]={}\n if not self.atom_data[resid].has_key(resid2):\n self.atom_data[resid][resid2]={}\n self.atom_data[resid][resid2][atomname]=abs(sub_all_pots[atom])\n return self.data,self.atom_data", "def pose_structure(self, pose):\n\n display_residues = range(1, pose.total_residue() + 1)\n # store the pose's number of residues, example Python syntax\n nres = pose.total_residue()\n # 1. obtain the pose's sequence\n sequence = pose.sequence()\n \n # 2. obtain a list of PDB numbering and icode as a single string\n pdb_info = pose.pdb_info()\n PDB_nums = [(str( pdb_info.number(i)) + pdb_info.icode(i)).strip()\n for i in range(1, nres + 1)]\n # 3. obtains a list of the chains organized by residue\n chains = [pdb_info.chain(i) for i in range(1, nres + 1)]\n # 4. extracts a list of the unique chain IDs\n unique_chains = []\n for c in chains:\n if c not in unique_chains:\n unique_chains.append(c)\n # start outputting information to screen\n print('\\n' + '='*80)\n #print('Loaded from' , pdb_info.name())\n print(nres , 'residues')\n print(len(unique_chains), 'chain(s) ('+ str(unique_chains)[1:-1] + ')')\n print('Sequence:\\n' + sequence)\n \n # this object is contained in PyRosetta v2.0 and above\n # 5. obtain the pose's secondary structure as predicted by PyRosetta's\n # built-in DSSP algorithm\n DSSP = protocols.moves.DsspMover()\n DSSP.apply(pose) # populates the pose's Pose.secstruct\n ss = pose.secstruct()\n print( 'Secondary Structure:\\n' + ss )\n print( '\\t' + str(100. * ss.count('H') / len(ss))[:4] + '% Helical' )\n print( '\\t' + str(100. * ss.count('E') / len(ss))[:4] + '% Sheet' )\n print( '\\t' + str(100. * ss.count('L') / len(ss))[:4] + '% Loop' )\n \n # 6. obtain the phi, psi, and omega torsion angles\n phis = [pose.phi(i) for i in range(1, nres + 1)]\n psis = [pose.psi(i) for i in range(1, nres + 1)]\n omegas = [pose.omega(i) for i in range(1, nres + 1)]\n \n # this object is contained in PyRosetta v2.0 and above\n # create a PyMOLMover for exporting structures directly to PyMOL\n #pymover = PyMOLMover()\n #pymover.apply(pose) # export the structure to PyMOL (optional)\n self.display_structure(pose, sequence, chains, phis, psis, omegas, ss)", "def join(args):\n from jcvi.formats.agp import OO, Phases, build\n from jcvi.formats.sizes import Sizes\n\n p = OptionParser(join.__doc__)\n p.add_option(\"--newid\", default=None, help=\"New sequence ID\")\n p.add_option(\n \"--gapsize\",\n default=100,\n type=\"int\",\n help=\"Number of N's in between the sequences\",\n )\n p.add_option(\"--gaptype\", default=\"contig\", help=\"Gap type to use in the AGP file\")\n p.add_option(\n \"--evidence\", default=\"\", help=\"Linkage evidence to report in the AGP file\"\n )\n p.add_option(\"--oo\", help=\"Use .oo file generated by bambus\")\n opts, args = p.parse_args(args)\n\n nargs = len(args)\n if nargs not in (1, 2):\n sys.exit(not p.print_help())\n\n if nargs == 2:\n fastafile, phasefile = args\n phases = DictFile(phasefile)\n phases = dict((a, Phases[int(b)]) for a, b in phases.items())\n else:\n (fastafile,) = args\n phases = {}\n\n sizes = Sizes(fastafile)\n prefix = fastafile.rsplit(\".\", 1)[0]\n agpfile = prefix + \".agp\"\n newid = opts.newid\n oo = opts.oo\n\n o = OO(oo, sizes.mapping)\n\n if oo:\n seen = o.contigs\n # The leftover contigs not in the oo file\n logging.debug(\n \"A total of {0} contigs ({1} in `{2}`)\".format(len(sizes), len(seen), oo)\n )\n\n for ctg, size in sizes.iter_sizes():\n if ctg in seen:\n continue\n o.add(ctg, ctg, size)\n\n else:\n if newid:\n for ctg, size in sizes.iter_sizes():\n o.add(newid, ctg, size)\n else:\n for scaffold_number, (ctg, size) in enumerate(sizes.iter_sizes()):\n object_id = \"scaffold{0:03d}\".format(scaffold_number + 1)\n o.add(object_id, ctg, size)\n\n fw = open(agpfile, \"w\")\n o.write_AGP(\n fw,\n gapsize=opts.gapsize,\n gaptype=opts.gaptype,\n evidence=opts.evidence,\n phases=phases,\n )\n fw.close()\n\n joinedfastafile = prefix + \".joined.fasta\"\n build([agpfile, fastafile, joinedfastafile])\n\n return joinedfastafile", "def isoformAln(aln, o):\n\n logger = logging.getLogger(\"main.alignment\")\n logger.info(\"Clustering isoforms.\")\n\n dRem={} #for remaining sequences\n dId2Seq={} #for remaining sequences\n laln=0 #alignement length\n for fasta in SeqIO.parse(open(aln),'fasta'):\n post=fasta.id.find(\"_\")\n if post!=-1: #regular format\n sp=fasta.id[:post]\n tag=fasta.id[post+1:]\n if not sp in dId2Seq:\n dId2Seq[sp]={}\n dId2Seq[sp][tag]=str(fasta.seq)\n if laln==0:\n laln=len(fasta.seq)\n else:\n dRem[fasta.id]=str(fasta.seq)\n\n \n outCov = o+aln.split(\"/\")[-1].split(\".\")[0]+\"_clustiso.fasta\"\n clustok=False #flag to check if a cluster has occured\n for sp,dtagseq in dId2Seq.items():\n lclust=[list(dtagseq)] #list of clusters of tags to be split\n for pos in range(laln):\n lclust2=[]\n for clust in lclust:\n dlet={tag:dtagseq[tag][pos] for tag in clust}\n llet=set([x for x in dlet.values() if x!=\"-\"])\n if len(llet)<=1: #one letter at most, keep all\n lclust2.append(clust)\n continue\n else:\n for x in llet:\n lclust2.append([tag for tag in clust if dlet[tag]==x])\n lind=[tag for tag in clust if dlet[tag]==\"-\"] #conservative, do not know wether to merge, may be improved\n if len(lind)!=0:\n lclust2.append(lind)\n lclust=lclust2\n \n #now merge sequences in each cluster\n for clust in lclust:\n if len(clust)==1:\n dRem[sp+\"_\"+clust[0]]=dtagseq[clust[0]]\n else:\n clustok=True\n ntag=clust[-1]+\"_clust\"\n logger.info(\"Clustered sequences \" + sp+\"_\" + (\", %s_\"%(sp)).join(clust) + \" into %s_\"%(sp)+ntag)\n nseq=\"\".join([max([dtagseq[tag][pos] for tag in clust]) for pos in range(laln)])\n dRem[sp+\"_\"+ntag]=nseq\n\n if clustok:\n with open(outCov, \"w\") as outC:\n \t outC.write(FastaResFunc.dict2fasta(dRem))\n \t outC.close()\n\t\n return(outCov)\n else:\n return(aln)", "def guess_align(aln):\n \n if \"pep\" in [guess_seq(seq) for seq in aln.itervalues()]:\n return \"pep\"\n else:\n return \"dna\"" ]
[ "0.5886286", "0.578243", "0.5694541", "0.55110466", "0.54499483", "0.5421526", "0.5384452", "0.5378168", "0.53772914", "0.5373772", "0.5367927", "0.5356481", "0.52636886", "0.5250715", "0.52294004", "0.521091", "0.5200446", "0.5192469", "0.5176645", "0.5132264", "0.5124896", "0.50737834", "0.50724614", "0.505876", "0.50254524", "0.50022477", "0.4995939", "0.49936405", "0.49844715", "0.495108", "0.4947819", "0.49441752", "0.49234837", "0.49232113", "0.4917254", "0.4905946", "0.49030998", "0.49016494", "0.48900136", "0.48777887", "0.4871814", "0.48691583", "0.4867484", "0.4862672", "0.48611054", "0.4845308", "0.48396888", "0.48315746", "0.48291337", "0.48264936", "0.48223895", "0.48167163", "0.4808226", "0.4806604", "0.48020634", "0.47980705", "0.47970706", "0.47888076", "0.47860363", "0.47842515", "0.47754675", "0.47660312", "0.47659966", "0.47594863", "0.4757521", "0.47500083", "0.4749612", "0.47488374", "0.47431046", "0.47415888", "0.47380617", "0.47367078", "0.47350624", "0.473248", "0.4730632", "0.4726945", "0.47264072", "0.47244585", "0.47212023", "0.4721127", "0.47154325", "0.47113284", "0.47075307", "0.47064805", "0.47054085", "0.4705176", "0.47042108", "0.4702419", "0.46975216", "0.46970984", "0.4696202", "0.46952766", "0.46923324", "0.46906248", "0.4687748", "0.46871313", "0.4684764", "0.46844527", "0.46828517", "0.46820894" ]
0.52686924
12
Instantiate a evaluator class.
def build_evaluator(cfg: CfgNode) -> EvaluatorBase: name = cfg["name"] evaluator = simple_build(name, cfg, EVALUATORS) return evaluator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluator(self, evaluator):\n self.__evaluator = evaluator", "def _create_evaluators(self):\n pass", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def __init__(\r\n self,\r\n generator,\r\n mode,\r\n tensorboard=None,\r\n verbose=1,\r\n **kwargs\r\n ):\r\n self.generator = generator\r\n\r\n if mode == 'recall':\r\n self.evaluate = eval_recall\r\n elif mode == 'accuracy':\r\n self.evaluate = eval_accuracy\r\n elif mode == 'mAP':\r\n self.evaluate = eval_mAP\r\n else:\r\n raise ValueError('unsupported evaluation callback mode')\r\n self.mode = mode\r\n\r\n self.tensorboard = tensorboard\r\n self.verbose = verbose\r\n self.kwargs = kwargs\r\n\r\n super(Evaluate, self).__init__()", "def sub_evaluator(self, ast: lark.Tree) -> 'Evaluator':\n return Evaluator(ast, activation=self.activation, functions=self.functions)", "def __new__(cls,\n input_fn,\n steps=100,\n name=None,\n hooks=None,\n exporters=None,\n delay_secs=120,\n throttle_secs=600):\n # Validate input_fn.\n _validate_input_fn(input_fn)\n\n # Validate steps.\n if steps is not None and steps <= 0:\n raise ValueError('Must specify steps > 0, given: {}'.format(steps))\n\n # Validate name.\n if name is not None and not isinstance(name, six.string_types):\n raise TypeError('`name` must be string, given: {}'.format(name))\n\n # Validate hooks.\n hooks = _validate_hooks(hooks)\n\n # Validate exporters.\n exporters = _validate_exporters(exporters)\n\n # Validate delay_secs.\n if delay_secs < 0:\n raise ValueError(\n 'Must specify delay_secs >= 0, given: {}'.format(delay_secs))\n\n # Validate throttle_secs.\n if throttle_secs < 0:\n raise ValueError(\n 'Must specify throttle_secs >= 0, given: {}'.format(throttle_secs))\n\n return super(EvalSpec, cls).__new__(\n cls,\n input_fn=input_fn,\n steps=steps,\n name=name,\n hooks=hooks,\n exporters=exporters,\n delay_secs=delay_secs,\n throttle_secs=throttle_secs)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type == \"sem_seg\":\n return SemSegEvaluator(\n dataset_name,\n distributed=True,\n output_dir=output_folder,\n num_classes=4,\n ignore_label=255\n )\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def create_eval(self):\n self.ev_id = \"ev-\" + base64.b32encode(os.urandom(10)).decode(\"ascii\")\n self.ev_name = \"Evaluation: \" + self.ml_name\n self._ml.create_evaluation(\n EvaluationId=self.ev_id,\n EvaluationName=self.ev_name,\n MLModelId=self.ml_id,\n EvaluationDataSourceId=self.fold.eval_ds_id\n )\n logger.info(\"Created Evaluation \" + self.ev_id)", "def evaluator(self):\n return self.__evaluator", "def setup_evaluation(evalfile, solufile, tolerance, evalstring=False):\n if evalstring:\n evaluation = IPETEvaluation.fromXML(evalfile)\n else:\n evaluation = IPETEvaluation.fromXMLFile(evalfile[\"path\"])\n\n evaluation.set_grouptags(True)\n evaluation.set_validate(solufile)\n evaluation.set_feastol(tolerance)\n return evaluation", "def __init__(self, generators: List[Generator] = None, evaluators: List[Evaluator] = None): # noqa: E501\n self.swagger_types = {\n 'generators': List[Generator],\n 'evaluators': List[Evaluator]\n }\n\n self.attribute_map = {\n 'generators': 'generators',\n 'evaluators': 'evaluators'\n }\n self._generators = generators\n self._evaluators = evaluators", "def __init__(self, grid_points, metrics_eval_func=None):\n self.grid_points = grid_points\n self.metrics_eval_func = metrics_eval_func or self._create_default_metrics_eval_func(grid_points)", "def eval(self):\n raise NotImplementedError('Must define eval function to use this base class')", "def instantiate(cls):\n default_xml = '<condition class=\"{0}\" plugin=\"run-condition@1.2\"/>'\n default_xml = default_xml.format(cls.get_jenkins_plugin_name())\n root_node = ElementTree.fromstring(default_xml)\n\n return cls(root_node)", "def clf_eval():\n y_true = np.random.randint(2, size=10000)\n y_pred = np.clip(np.random.normal(0.25, 0.3, size=y_true.shape) + y_true * 0.5, 0, 1)\n\n model_eval = ClassificationEvaluation(\n y_true=y_true,\n y_pred=y_pred,\n class_names=['a', 'b'],\n model_name='foo',\n )\n return model_eval", "def __init__(self, array: Tuple[int, ...]) -> None:\n self.evaluate: Callable[[str], int] = \\\n lambda program: FitnessEvaluator._evaluate(array, program)", "def __init__(self, expr: typing.Callable[[], typing.Any]):\n\n self.expr = expr", "def evaluator(evaluate):\r\n @functools.wraps(evaluate)\r\n def ecspy_evaluator(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n fitness.append(evaluate(candidate, args))\r\n return fitness\r\n ecspy_evaluator.single_evaluation = evaluate\r\n return ecspy_evaluator", "def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None):\n self.scorer = UniEvaluator(\n model_name_or_path='MingZhong/unieval-fact' if model_name_or_path == \"\" else model_name_or_path,\n max_length=max_length,\n device=device,\n cache_dir=cache_dir)\n self.task = 'fact'\n self.dim = 'consistency'", "def __init__(\n self,\n eval_fn: Callable[[Posting], Union[str, None]] = lambda p: None\n ):\n self.eval_fn = eval_fn", "def eval(cls, *args):\n raise NotImplementedError(\"subclasses need to override this method\")", "def build_evaluate_helper(cfg: CfgNode) -> EvaluateHelper:\n evaluator = build_evaluator(cfg.evaluator)\n helper = EvaluateHelper(evaluator)\n return helper", "def __init__(self, plant, orderList, simulator, evaluator):\n\t\tassert plant != None\n\t\tassert orderList != None\n\t\t\n\t\tself.plant = plant\n\t\tself.orderList = orderList\n\t\tself.simulator = simulator\n\t\tself.evaluator = evaluator\n\t\t\n\t\t# used for benchmarking\n\t\tself.simulatorTime = 0\n\t\t\n\t\t# enable/disable console output\n\t\tself.printing = True\n\t\t\n\t\t# parameters for the evolution strategy algorithm\n\t\tself.populationSize = 0\n\t\tself.indivMutationRate = 0\n\t\tself.selectionRate = 0\n\t\tself.mutationRange = 0\n\t\tself.iterations = 0", "def __init__(self, md, ev=None, var=None, out=None):\n self.model = md\n\n ## Construct default evaluator\n if ev is None:\n\n def _ev(md, df):\n df_res = md.evaluate_df(df)\n return df_res[md.out]\n\n self.ev = _ev\n self.var = self.model.var\n self.out = self.model.out\n\n ## Use given evaluator\n else:\n self.ev = ev\n self.var = var\n self.out = out\n\n ## Copy model data\n self.runtime = md.runtime(1)\n self.name = copy.copy(md.name)", "def _instantiate(cls, **kwargs):\n return cls(**kwargs)", "def _evaluation():\n return {\n 'type' : 'class',\n 'name' : 'evaluation',\n 'base' : None,\n 'is_abstract' : False,\n 'doc' : None,\n 'properties' : [\n ('date', 'datetime', '0.1', None),\n ('description', 'str', '0.1', None),\n ('did_pass', 'bool', '0.1', None),\n ('explanation', 'str', '0.1', None),\n ('specification', 'str', '0.1', None),\n ('specification_hyperlink', 'str', '0.1', None),\n ('type', 'str', '0.1', None),\n ('type_hyperlink', 'str', '0.1', None),\n ('title', 'str', '0.1', None),\n ],\n 'decodings' : [\n ('date', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date'),\n ('description', 'gmd:evaluationMethodDescription/gco:CharacterString'),\n ('did_pass', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean'),\n ('explanation', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:explanation/gco:CharacterString'),\n ('type', 'child::gmd:result/@xlink:title'),\n ('type_hyperlink', 'child::gmd:result/@xlink:href'),\n ('specification', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/@xlink:title'),\n ('specification_hyperlink', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/@xlink:href'),\n ('title', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString'),\n ]\n }", "def evaluator(self, candidates, args):\n\t\traise NotImplementedError", "def evaluator(self, candidates, args):\r\n raise NotImplementedError", "def evaluator(test_config: TestConfig, criterion: nn.Module, model: nn.Module,\n device: torch.device) -> Engine:\n metrics, eval_metric, *_ = test_config\n metrics['loss'] = Loss(criterion,\n output_transform=lambda data: (data[0], data[1]))\n val_evaluator = create_supervised_evaluator(model, metrics, device,\n prepare_batch=prepare_batch)\n return val_evaluator", "def instantiate_callable_class(builder: IRBuilder, fn_info: FuncInfo) -> Value:\n fitem = fn_info.fitem\n func_reg = builder.add(Call(fn_info.callable_class.ir.ctor, [], fitem.line))\n\n # Set the environment attribute of the callable class to point at\n # the environment class defined in the callable class' immediate\n # outer scope. Note that there are three possible environment\n # class registers we may use. This depends on what the encapsulating\n # (parent) function is:\n #\n # - A nested function: the callable class is instantiated\n # from the current callable class' '__call__' function, and hence\n # the callable class' environment register is used.\n # - A generator function: the callable class is instantiated\n # from the '__next__' method of the generator class, and hence the\n # environment of the generator class is used.\n # - Regular function: we use the environment of the original function.\n curr_env_reg = None\n if builder.fn_info.is_generator:\n curr_env_reg = builder.fn_info.generator_class.curr_env_reg\n elif builder.fn_info.is_nested:\n curr_env_reg = builder.fn_info.callable_class.curr_env_reg\n elif builder.fn_info.contains_nested:\n curr_env_reg = builder.fn_info.curr_env_reg\n if curr_env_reg:\n builder.add(SetAttr(func_reg, ENV_ATTR_NAME, curr_env_reg, fitem.line))\n return func_reg", "def __init__(self, self_evaluation: str=None, mentor_evaluation: str=None): # noqa: E501\n self.swagger_types = {\n 'self_evaluation': str,\n 'mentor_evaluation': str\n }\n\n self.attribute_map = {\n 'self_evaluation': 'selfEvaluation',\n 'mentor_evaluation': 'mentorEvaluation'\n }\n\n self._self_evaluation = self_evaluation\n self._mentor_evaluation = mentor_evaluation", "def __init__(self, expression, result, is_singleton=False):\n\n self.expr = expression\n self.result = result\n self.is_singleton = is_singleton", "def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)", "def __init__(self, str_exp=None, kind=None, scanner=None):\n self.kind = None\n self.name = 'undef'\n self.attr = None\n self.child = None\n self.left = None\n self.right = None\n self.code = None\n\n if str_exp is not None:\n logging.debug('========== EXP in init(NODE): SEXP = [' + str_exp + ']')\n scanner = lex.Scanner(rules)\n scanner.setString(str_exp)\n\n if kind is not None: # create an empty node\n self.kind = kind\n return\n\n if scanner is None:\n raise Exception('Fatal Error: scanner not defined')\n\n while scanner.curToken().type in FIRST:\n\n if scanner.curToken().type == LITERAL:\n self.name = scanner.curToken().name\n self.code = LITERAL\n self.kind = ATOM\n scanner.move()\n\n elif scanner.curToken().type == LPAREN:\n scanner.move() # skip the parentheses\n\n tmp = Exp(scanner=scanner) # tree of the expression between parentheses\n self.kind = tmp.kind\n self.attr = tmp.attr\n self.name = tmp.name\n self.left = tmp.left\n self.right = tmp.right\n self.child = tmp.child\n\n if scanner.curToken().type != RPAREN:\n raise ParserException(\"')' expected\")\n scanner.move()\n\n elif isUnitary(scanner.curToken().type):\n self.kind = UNARY\n self.name = scanner.curToken().name\n self.code = scanner.curToken().type\n\n # if token_type == ATTRIB # this is for existence and foreach\n\n scanner.move()\n self.child = Exp(scanner=scanner)\n\n # the scanner has been moved to a successive token\n if scanner.curToken().type == NULLTOKEN:\n break\n\n # check for infix operators\n if isBinary(scanner.curToken().type):\n operator_name = scanner.curToken().name\n operator_type = scanner.curToken().type\n scanner.move()\n\n # move the current node to the left of the tree\n lnode = Exp(kind=self.kind)\n lnode.name = self.name\n lnode.attr = self.attr\n lnode.child = self.child\n lnode.left = self.left\n lnode.right = self.right\n lnode.code = self.code\n\n # this node became the handler aka the binary operator\n self.code = operator_type\n self.name = operator_name\n self.kind = BINARY\n self.left = lnode\n # lookup the second child of the operator\n self.right = Exp(scanner=scanner)", "def runner_decrator(cls):\n\n def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n \"\"\"\n Create evaluator(s) for a given dataset.\n This uses the special metadata \"evaluator_type\" associated with each builtin dataset.\n For your own dataset, you can simply create an evaluator manually in your\n script and do not have to worry about the hacky if-else logic here.\n \"\"\"\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)\n\n def custom_test_with_TTA(cls, cfg, model):\n # In the end of training, run an evaluation with TTA\n # Only support some R-CNN models.\n logger.info(\"Running inference with test-time augmentation ...\")\n model = GeneralizedRCNNWithTTA(cfg, model)\n res = cls.test(cfg, model, output_folder=os.path.join(cfg.OUTPUT_DIR, \"inference_TTA\"))\n res = OrderedDict({k + \"_TTA\": v for k, v in res.items()})\n return res\n\n cls.build_evaluator = classmethod(custom_build_evaluator)\n cls.test_with_TTA = classmethod(custom_test_with_TTA)\n\n return cls", "def eval(self):\n return self.with_transforms(\"eval\")", "def __init__(self, out_dir, analysis_out_dir, evaluation_run_name,\n evaluation_name):\n if out_dir is None:\n out_dir = os.getcwd()\n self.out_dir = out_dir\n\n # Copy the analysis results to the report output directory, so that the HTML\n # report can be correctly rendered even if we move the csv files, plots,\n # etc.\n if out_dir != analysis_out_dir:\n analysis_file_dirs = evaluator.load_directory_tree(\n out_dir=analysis_out_dir,\n run_name=evaluation_run_name,\n evaluation_name=evaluation_name)\n shutil.copytree(\n analysis_file_dirs[evaluator.KEY_RUN_DIR],\n os.path.join(out_dir, evaluation_run_name))\n\n self.analysis_results = analyzer.get_analysis_results(\n out_dir, evaluation_run_name, evaluation_name)\n\n self.analysis_results[KEY_NUM_ESTIMABLE_SETS_STATS_DF] = (\n ReportGenerator.add_parsed_sketch_estimator_name_cols(\n self.analysis_results[KEY_NUM_ESTIMABLE_SETS_STATS_DF],\n analyzer.SKETCH_ESTIMATOR_NAME))\n\n self.analysis_type = None", "def __init__(\n self,\n name,\n expectedValue,\n extractedValue,\n weight=1.0,\n meritValue=None,\n evaluatorName=None,\n ):\n self.name = name\n \"\"\"Record Name\"\"\"\n\n self.expectedValue = expectedValue\n \"\"\"Value expected for this evaluator record\"\"\"\n\n self.extractedValue = extractedValue\n \"\"\"Actual value extracted for this evaluator record\"\"\"\n\n self.weight = weight\n \"\"\"Weight to be given to evaluator record\"\"\"\n\n self.evaluatorName = evaluatorName\n \"\"\"Name of evaluator that created this record\"\"\"\n\n self.meritValue = meritValue\n \"\"\"Value to be used in calculating global merit value\"\"\"\n\n self.errorFlag = False\n \"\"\"Flag indicating if error was experienced when extracting value\"\"\"", "def eval(expr):\n global simulator\n\n if simulator is None:\n print \"program is not running\"\n return\n return simulator.eval (expr)", "def eval(self):\n raise NotImplementedError", "def create_multi_node_evaluator(actual_evaluator, communicator):\n\n actual_evaluator._mn_original_evaluate = actual_evaluator.evaluate\n actual_evaluator._mn_communicator = communicator\n\n def new_evaluate(self):\n local_mean_dict = self._mn_original_evaluate()\n global_mean_dict = {\n name:\n self._mn_communicator.allreduce_obj(\n value) / self._mn_communicator.size\n for name, value in sorted(local_mean_dict.items())\n }\n return global_mean_dict\n\n actual_evaluator.evaluate = six.create_bound_method(\n new_evaluate, actual_evaluator)\n return actual_evaluator", "def specific_evaluator(self, evaluator: Path, bundle: Bundle):\n pass", "def eval(self, *args, **kwargs):\n raise NotImplementedError", "def getFactoryEvaluateExpressionOnly(self):\n # factory function for evaluateExpressionOnly\n def evaluateExpressionOnly_factory(expression):\n return self.evaluateExpressionOnly(expression)\n\n return evaluateExpressionOnly_factory", "def evaluate(self, evaluator: DialogEvaluator, output_path: str = None):\r\n if output_path is not None:\r\n os.makedirs(output_path, exist_ok=True)\r\n return evaluator(self, output_path)", "def evaluation(store, evaluation_obj):\n evaluation_obj['institute'] = store.institute(evaluation_obj['institute_id'])\n evaluation_obj['case'] = store.case(evaluation_obj['case_id'])\n evaluation_obj['variant'] = store.variant(evaluation_obj['variant_specific'])\n evaluation_obj['criteria'] = {criterion['term']: criterion for criterion in\n evaluation_obj['criteria']}\n evaluation_obj['classification'] = ACMG_COMPLETE_MAP[evaluation_obj['classification']]\n return evaluation_obj", "def __init__(self, num_workers, eval_function, timeout=None, maxtasksperchild=None):\n self.eval_function = eval_function\n self.timeout = timeout\n self.pool = Pool(processes=num_workers, maxtasksperchild=maxtasksperchild)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .eco_valuator import EcoValuatorPlugin\n return EcoValuatorPlugin()", "def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None):\n self.scorer = UniEvaluator(\n model_name_or_path='MingZhong/unieval-sum' if model_name_or_path == \"\" else model_name_or_path,\n max_length=max_length,\n device=device,\n cache_dir=cache_dir)\n self.task = 'summarization'\n self.dimensions = ['coherence', 'consistency', 'fluency', 'relevance']", "def __init__(self, name=''):\n self.domain = (0, np.pi)\n self._evaluator_overrides = None\n self._name = name\n self._user_data = dict()\n self.__loaded_from = None", "def eval(self):\n pass", "def eval(self):\n pass", "def eval(self):\n pass", "def config_evaluation_instance(self):\n with tf.device(self.main_eval_device):\n self.inputs_eval = self.model.input\n self.labels_eval = tf.placeholder(tf.float32,\n [None, self.num_classes])\n self.logits_eval = self.model(self.inputs_eval)\n self.probs_eval = tf.nn.softmax(self.logits_eval)\n self.correct_pred_eval = tf.equal(tf.argmax(self.logits_eval, 1),\n tf.argmax(self.labels_eval, 1))\n self.accuracy_eval = tf.reduce_mean(\n tf.cast(self.correct_pred_eval, tf.float32))\n # Created for checking the MorphNet statistics only\n _, _, self.exporter_eval, self.cost_eval = self.embed_morphnet(\n input_boundary=[self.inputs_eval.op],\n output_boundary=[self.logits_eval.op],\n morphnet_regularization_strength=self.\n morphnet_regularization_strength_placeholder,\n morphnet_cost_thresholds=self.morphnet_target_cost_thresholds)", "def instantiate_from_string(class_name):\n class_name = convert_underscore_to_camel_case(class_name)\n return globals()[class_name]()", "def __init__(self,func ,domain_space, max_evals = 10):\n self.func = func\n # optimizing for FLOAT values\n #self.space = hp.uniform('x', 36, 200)\n # optimizing for Integer values\n self.space = domain_space\n self.algorithm = tpe.suggest # creating algorithm\n self.trials = Trials() # to check records\n self.max_evals = max_evals", "def __init__(self, expr, samples=100, domain=None, expr_domain=None,\n rtol=_rtol, atol=1e-15, name='inverse'):\n if expr_domain is None:\n expr_domain = expr.domain\n super().__init__(domain=domain, name=name)\n ## Expression or function to invert.\n if hasattr(expr, \"evaluator\"):\n self.e = expr\n else:\n # This is not picklable\n def _mp_version(x): # pylint: disable=unused-argument\n raise NotImplementedError(\"mpmath version not implemented\")\n self.e = SimpleExpression(\n domain=expr_domain,\n fp_terms=[expr],\n mp_terms=[_mp_version],\n desc=\"callable\", name=\"callable\",\n )\n ## Domain of function to invert (i.e. range of this inverse).\n self.expr_domain = expr_domain\n ## Number of samples for estimated inverse.\n self.samples = samples\n ## Relative tolerance for the computed inverse.\n self.rtol = rtol\n ## Absolute tolerance for the computed inverse.\n self.atol = atol", "def _make_executor(self, expr=None):\n raise NotImplementedError()", "def __init__(self, exp=None):\n\n # invalid input is now handled in ScanRE\n self.DefineRE(exp)\n\n # These functions are moved to DefineRE(), so they can be triggered\n # after defining new regular expressions\n # self.REtoNFA()\n # self.NFAtoDFA()\n # self.MinimizeDFA()", "def get_default_evaluator(self) -> EvaluatorConfig:\n raise NotImplementedError()", "def __init__(self, exprs):\n self.exprs = exprs", "def _generate_evaluaters(self):\n evaluators = []\n for para_key in self.parameter[1]:\n for value in self.parameter[1][para_key]:\n evaluators.append(evaluaterSearch.evaluaterSearch(self.parameter[2], [para_key, value]))\n self.evaluators = evaluators", "def _instantiate_benchmark_class(self, output_dir):\n module_import_path, class_name = self.config.test_class_str.rsplit('.', 1)\n module = importlib.import_module(module_import_path)\n class_ = getattr(module, class_name)\n\n instance = class_(output_dir=output_dir)\n instance.oss_report_object = benchmark_result.BenchmarkResult()\n return instance", "def __init__(\n self,\n model=None,\n parameterSpace=None,\n evalFunction=None,\n filename=None,\n saveAllModelOutputs=False,\n ncores=None,\n ):\n self.model = model\n if evalFunction is None and model is not None:\n self.evalFunction = self._runModel\n elif evalFunction is not None:\n self.evalFunction = evalFunction\n\n assert (evalFunction is not None) or (\n model is not None\n ), \"Either a model has to be specified or an evalFunction.\"\n\n assert parameterSpace is not None, \"No parameters to explore.\"\n\n if parameterSpace.kind == \"sequence\":\n assert model is not None, \"Model must be defined for sequential explore\"\n\n self.parameterSpace = parameterSpace\n self.exploreParameters = parameterSpace.dict()\n\n # TODO: use random ICs for every explored point or rather reuse the ones that are generated at model\n # initialization\n self.useRandomICs = False\n\n filename = filename or \"exploration.hdf\"\n self.filename = filename\n\n self.saveAllModelOutputs = saveAllModelOutputs\n\n # number of cores\n if ncores is None:\n ncores = multiprocessing.cpu_count()\n self.ncores = ncores\n logging.info(\"Number of processes: {}\".format(self.ncores))\n\n # bool to check whether pypet was initialized properly\n self.initialized = False\n self._initializeExploration(self.filename)\n\n self.results = None", "def setUpClass(cls):\n celltype_analyse = \"Adipocyte - breast\"\n data_type = \"promoters\"\n sample_type = \"primary cells\"\n parsed = False\n files_path = \"test\"\n cls.element_list = ('chr10:100027943..100027958,-', 'chr10:100174900..100174956,-',\n 'chr10:100204220..100204230,-', 'chr10:100206642..100206717,-')\n expression_obj = iext.CheckElementExpression(inputs=cv.test_promoter_file_name,\n element_list=cls.element_list,\n cell_type=celltype_analyse,\n data_type=data_type, sample_type=sample_type,\n parsed=parsed, files_path=files_path)\n cls.expression = expression_obj.export_expression_data(method=\"return\")", "def make_eval(s, filename=\"unknown\"):\n return _Eval(s.strip(), filename)", "def __init__(self, math_expr, case_sensitive=False):\r\n self.case_sensitive = case_sensitive\r\n self.math_expr = math_expr\r\n self.tree = None\r\n self.variables_used = set()\r\n self.functions_used = set()\r\n\r\n def vpa(tokens):\r\n \"\"\"\r\n When a variable is recognized, store it in `variables_used`.\r\n \"\"\"\r\n varname = tokens[0][0]\r\n self.variables_used.add(varname)\r\n\r\n def fpa(tokens):\r\n \"\"\"\r\n When a function is recognized, store it in `functions_used`.\r\n \"\"\"\r\n varname = tokens[0][0]\r\n self.functions_used.add(varname)\r\n\r\n self.variable_parse_action = vpa\r\n self.function_parse_action = fpa", "def __init__(self, model, tb_logger):\n\t\tsuper(Evaluator, self).__init__(self.predict_on_batch)\n\t\tself.model = model\n\t\t# FROC\n\t\tavg_fps = list(range(1, 26))\n\t\tavg_fps.append(0.5)\n\t\tavg_fps.sort()\n\t\ttags = ['froc_{}fp'.format(fp) for fp in avg_fps]\n\t\tfor avg_fp, tag in zip(avg_fps, tags):\n\t\t\tFROC([avg_fp], iou_threshold=0.5).attach(self, tag)\n\t\t# tqdm\n\t\tProgressBar(persist=True).attach(self)\n\t\t# Tensorboard logging\n\t\ttb_logger.attach(self,\n\t\t log_handler=OutputHandler(tag='validation',\n\t\t metric_names=tags,\n\t\t global_step_transform=lambda engine, name: engine.state.epoch),\n\t\t event_name=Events.EPOCH_COMPLETED)", "def build_evaluation(self, predictions, examples, **kwargs):\n return {}", "def make_local_evaluator(self,\n env_creator,\n policy_graph,\n extra_config=None):\n\n return self._make_evaluator(\n CustomEnvPolicyEvaluator,\n env_creator,\n policy_graph,\n 0,\n merge_dicts(\n # important: allow local tf to use more CPUs for optimization\n merge_dicts(\n self.config, {\n \"tf_session_args\": self.\n config[\"local_evaluator_tf_session_args\"]\n }),\n extra_config or {}))", "def eval(self):\n dataset = self.config.dataset\n class_config = dataset.class_config\n # it might make sense to make excluded_groups a field in an EvalConfig\n # in the future\n excluded_groups = ['train_scenes']\n\n scene_id_to_cfg = {s.id: s for s in dataset.all_scenes}\n\n @lru_cache(maxsize=len(dataset.all_scenes))\n def build_scene(scene_id: str) -> Scene:\n cfg = scene_id_to_cfg[scene_id]\n scene = cfg.build(\n class_config, self.tmp_dir, use_transformers=True)\n return scene\n\n # build and run each EvaluatorConfig for each scene group\n for e in self.config.evaluators:\n for group_name, group_ids in dataset.scene_groups.items():\n if group_name in excluded_groups:\n continue\n if len(group_ids) == 0:\n log.info(f'Skipping scene group \"{group_name}\". '\n 'Empty scene group.')\n continue\n group_scenes = (build_scene(id) for id in group_ids)\n evaluator = e.build(\n class_config, scene_group=(group_name, group_scenes))\n\n log.info(f'Running {type(evaluator).__name__} on '\n f'scene group \"{group_name}\"...')\n try:\n evaluator.process(group_scenes, self.tmp_dir)\n except FileNotFoundError:\n log.warn(f'Skipping scene group \"{group_name}\". '\n 'Either labels or predictions are missing for '\n 'some scene.')", "def _prepare_evaluate(self):\n labels = list()\n labels += ['num_procs', 'num_periods', 'is_debug', 'seed_emax', 'seed_sim']\n labels += ['num_draws_emax', 'num_agents_sim', 'num_types', 'edu_spec', 'version']\n labels += ['num_draws_prob', 'seed_prob']\n num_procs, num_periods, is_debug, seed_emax, seed_sim, num_draws_emax, num_agents_sim, \\\n num_types, edu_spec, version, num_draws_prob, seed_prob = \\\n dist_class_attributes(self.respy_base, *labels)\n\n periods_draws_emax = create_draws(num_periods, num_draws_emax, seed_emax, is_debug)\n periods_draws_sims = create_draws(num_periods, num_agents_sim, seed_sim, is_debug)\n\n disturbances = (periods_draws_emax, periods_draws_sims)\n\n # We want to maintain a pure PYTHON version for testing purposes.\n args = list()\n args += [num_periods, num_types, edu_spec['start'], edu_spec['max'], edu_spec['max'] + 1]\n state_space_info = respy_f2py.wrapper_create_state_space(*args)\n if self.mpi_setup == MISSING_INT:\n slavecomm = self.mpi_setup\n else:\n slavecomm = self.mpi_setup.py2f()\n self.set_up_baseline(periods_draws_emax, None)\n\n initial_conditions = get_initial_conditions(self.respy_base)\n\n args = (smm_sample_f2py, state_space_info, initial_conditions, disturbances, slavecomm)\n self.simulate_sample = partial(*args)", "def fromXml(xmlDoc, plant, orderList, simulator, evaluator):\n\t\toptimizer = Optimizer(plant, orderList, simulator, evaluator)\n\t\telement = xmlDoc.getElementsByTagName(\"optimizer\")\n\t\t\n\t\t# there should only be 1 optimizer node in the XML tree!\n\t\tassert len(element) == 1\n\t\telement = element[0]\n\t\t\n\t\t# load the different attributes\n\t\toptimizer.populationSize = \\\n\t\t\tint(element.getAttribute(\"populationSize\"))\n\t\toptimizer.mutationRange = \\\n\t\t\tint(element.getAttribute(\"mutationRange\"))\n\t\toptimizer.iterations = \\\n\t\t\tint(element.getAttribute(\"iterations\"))\n\t\toptimizer.indivMutationRate = \\\n\t\t\tfloat(element.getAttribute(\"indivMutationRate\"))\n\t\toptimizer.selectionRate = \\\n\t\t\tfloat(element.getAttribute(\"selectionRate\"))\n\t\t\n\t\treturn optimizer", "def launch_evaluations(self):\n self.report('Launching pending evaluations.')\n with self.optimizer() as opt:\n evals = {}\n evaluate_process = load_object(self.inputs.evaluate_process.value)\n for idx, inputs in opt.create_inputs().items():\n self.report('Launching evaluation {}'.format(idx))\n inputs_merged = ChainMap(inputs, self.inputs.get('evaluate', {}))\n if is_process_function(evaluate_process):\n _, node = run_get_node(evaluate_process, **inputs_merged)\n else:\n node = self.submit(evaluate_process, **inputs_merged)\n evals[self.eval_key(idx)] = node\n self.indices_to_retrieve.append(idx)\n return self.to_context(**evals)", "def __init__(self,literal,bindings,facts):\n\n self.literal = literal\n self.bindings = bindings\n self.facts = []\n for fact in facts:\n lit_pred = self.literal.split('(')[0].strip()\n fact_pred = fact.split('(')[0].strip()\n lit_args = self.literal.split('(')[1][:-1].split(',')\n fact_args = fact.split('(')[1][:-1].split(',')\n n = len(lit_args)\n m = len(fact_args)\n if lit_pred == fact_pred and n == m:\n self.facts.append(fact)", "def _setup_engine(class_definition, params):\n\n cls = load_from_module(class_definition)\n return cls(params)", "def __new__(cls, name):\n if not name:\n raise PydmrsValueError('a GPred must have non-empty name')\n return super().__new__(cls, name)", "def evaluator(*args, clusters: bool=True, configuration: Union[AnyStr, List[AnyStr], bool]=\"\",\n enable: bool=True, info: bool=True, name: Union[AnyStr, bool]=\"\", nodeType:\n Union[AnyStr, List[AnyStr], bool]=\"\", nodeTypeChildren: bool=True, priority:\n Union[int, bool]=0, valueName: Union[AnyStr, bool]=\"\", q=True, query=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def _get_evaluators(self):\n if self._evaluator_overrides is not None:\n return self._evaluator_overrides\n return self._create_evaluators()", "def _make_callable(func):\n try:\n return func.evaluator()\n except AttributeError:\n return func", "def eval(self, expr, locals):\r\n sav = self.locals_ptr\r\n self.locals_ptr = locals\r\n x = eval(self.compile(expr), {\"__builtins__\":self.eval_allowed_globals}, locals)\r\n self.locals_ptr = sav\r\n return x", "def __init__(self, x=None, f=np.inf, evals=None):\r\n self.x = x\r\n self.x_geno = None\r\n self.f = f if f is not None and f is not np.nan else np.inf\r\n self.evals = evals\r\n self.evalsall = evals\r\n self.last = BlancClass()\r\n self.last.x = x\r\n self.last.f = f", "def __init__(self, criterion: Callable):\n self.criterion = criterion", "def _get_expression_evaluator(pipeline_builder):\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator.header_attribute_expressions = [\n {'attributeToSet': 'title', 'headerAttributeExpression': '${pipeline:title()}'},\n {'attributeToSet': 'name', 'headerAttributeExpression': '${pipeline:name()}'},\n {'attributeToSet': 'version', 'headerAttributeExpression': '${pipeline:version()}'},\n {'attributeToSet': 'id', 'headerAttributeExpression': '${pipeline:id()}'},\n ]\n return expression_evaluator, pipeline_builder", "def __init__(self, expected, test_func):\n self._f = test_func\n self._exp = expected", "def evaluator(model, config, test_dir=None):\n shottype = config.shottype\n dataset = config.data_set\n seed = config.seed\n if test_dir is None:\n test_data_gen_dir, _, _ = _generator_dir(\n config=config, target_gen=\"test\", data_dir=None\n )\n if test_dir is not None:\n print(\"Evaluating directory: '{}'.\".format(test_dir))\n test_data_gen_dir, _, _ = _generator_dir(\n config=config, target_gen=\"test\", data_dir=test_dir\n )\n score = model.evaluate_generator(test_data_gen_dir)\n print(\n \"Test metrics: \"\n \"Loss: {:.4f}, \"\n \"Accuracy: {:.4f}, \"\n \"Top 3 accuracy: {:.4f}\".format(score[0], score[1], score[2])\n )\n return score", "def __init__(self, *args):\n this = _CompuCell.new_Simulator(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n self.expvalue = np.zeros(10)\n self.iter = np.zeros(10)\n self.epsilon = 0.1", "def _instantiate_sampler_from_reporter(cls, reporter):\n # Retrieve options and create new simulation.\n options = reporter.read_dict('options')\n options['mcmc_moves'] = reporter.read_mcmc_moves()\n sampler = cls(**options)\n\n # Display papers to be cited.\n sampler._display_citations()\n return sampler", "def __init__(self, name, operator, values):\n self.name = name\n self.operator = operator\n self.values = values", "def __init__(self, predictor, scaler):\n\n # Check arguments\n if predictor is None:\n raise ValueError('Cannot load genotyper predictor `None`')\n\n if scaler is None:\n raise ValueError('Cannot load feature scaler `None`')\n\n if isinstance(predictor, str):\n predictor = joblib.load(predictor)\n\n if isinstance(scaler, str):\n scaler = joblib.load(scaler)\n\n if not isinstance(predictor, SVC):\n raise ValueError('Predictor must be class sklearn.svm.SVC: Found \"{}\"'.format(type(predictor)))\n\n if not isinstance(scaler, StandardScaler):\n raise ValueError(\n 'Scaler must be class sklearn.preprocessing.StandardScaler: Found \"{}\"'.format(type(scaler))\n )\n\n # Set fields\n self.predictor = predictor\n self.scaler = scaler", "def eval(*args, **kwargs):\n\n pass", "def __init__(self, truth: FunctionType=None):\n if truth:\n self.truths = {truth}", "def __init__(self, linearExpression, indexingExpression, numericExpression = None):\n \n LinearExpression.__init__(self)\n\n self.linearExpression = linearExpression\n self.indexingExpression = indexingExpression\n self.numericExpression = numericExpression", "def _new(cls, rep, shape, domain):\n cls._check(rep, shape, domain)\n obj = object.__new__(cls)\n obj.rep = rep\n obj.shape = obj.rows, obj.cols = shape\n obj.domain = domain\n return obj", "def __init__(self, config, mode=None):\n super(FacenetEvaluation, self).__init__(config, mode)\n\n print('Create {}'.format(self.evaluation_name))\n\n # Preprocess Configurations and check legal\n self._must_have_config = [\n eval_fields.distance_measure,\n eval_fields.sampling\n ]\n\n self._default_values = {\n eval_fields.distance_measure: {\n eval_fields.threshold: {\n eval_fields.start: 0.01,\n eval_fields.end: 0.7,\n eval_fields.step: 0.01\n }\n },\n eval_fields.sampling: {\n facenet_fields.sample_ratio: 0.2,\n facenet_fields.class_sample_method: facenet_fields.random_sample\n }\n }\n # metrics with condition\n self._metric_with_threshold = [\n metric_fields.accuracy,\n metric_fields.validation_rate,\n metric_fields.false_accept_rate,\n metric_fields.true_positive_rate,\n metric_fields.false_positive_rate,\n ]\n # metrics without condition\n self._metric_without_threshold = [\n metric_fields.mean_accuracy,\n metric_fields.mean_validation_rate,\n metric_fields.area_under_curve,\n ]\n\n # Set default values for must-have configs\n for _config in self._must_have_config:\n if _config not in self.metrics:\n if _config in self._default_values:\n pass\n else:\n print('WARNING: {} should be assigned'.format(_config))\n else:\n print('Use assigned {}: {}'.format(_config, self.metrics[_config]))\n\n # Set distance thresholds by config\n distance_config = self.distance_measure\n distance_thres = distance_config[eval_fields.threshold]\n dist_start = distance_thres[eval_fields.start]\n dist_end = distance_thres[eval_fields.end]\n dist_step = distance_thres[eval_fields.step]\n # TODO @kv: Do we need sanity check for start < end?\n if dist_start > dist_end:\n raise ValueError('FaceEvaluation: distance threshold start > end')\n self._distance_thresholds = np.arange(dist_start, dist_end, dist_step)\n\n # Attributes\n if len(self.attributes) == 0:\n self._has_attribute = False\n elif len(self.attributes) == 1:\n if attribute_fields.all_classes in self.attributes:\n self._has_attribute = False\n elif attribute_fields.all_attributes in self.attributes:\n self._has_attribute = True\n else:\n self._has_attribute = True\n self.show_configs()", "def __init__(self, function, max_eval_concurrency, assert_omp=True,\n base_model=None):\n self.base_model = base_model\n self.set_max_eval_concurrency(max_eval_concurrency)\n self.num_evaluations = 0\n self.assert_omp = assert_omp\n self.pool_function = function", "def create_eval(model, metrics, device):\n metrics = metrics or {}\n\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n data, label = batch\n num_channels = 1 if len(data.shape) == 2 else data.shape[1]\n data = data.view(-1, 1, data.shape[-1])\n data = data.to(device)\n label = label.to(device)\n label = label.float()\n\n output = model(data)\n output = output.view(-1, num_channels, output.shape[-1])\n\n return output, label\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine" ]
[ "0.66867024", "0.6603871", "0.6451372", "0.644928", "0.6330437", "0.6274948", "0.61652106", "0.6148524", "0.6130907", "0.5967345", "0.58819443", "0.5654034", "0.56327844", "0.56159383", "0.55843073", "0.5571741", "0.5563956", "0.5515962", "0.5513957", "0.5500064", "0.5477984", "0.5454834", "0.54501665", "0.54188055", "0.5409975", "0.53916436", "0.5371526", "0.53121454", "0.5309488", "0.5308969", "0.52973783", "0.5296217", "0.52847296", "0.52481335", "0.52415156", "0.5222058", "0.52054036", "0.51997924", "0.5169152", "0.51577455", "0.5126853", "0.5121245", "0.51034063", "0.51032287", "0.50858665", "0.5063112", "0.50587845", "0.50551814", "0.5046103", "0.50456464", "0.5035133", "0.5034383", "0.50315696", "0.50315696", "0.50315696", "0.50233734", "0.50229776", "0.5021465", "0.50171906", "0.5015297", "0.5013832", "0.49840182", "0.4983826", "0.4981403", "0.49706268", "0.49607515", "0.49569932", "0.4955113", "0.49458918", "0.49395576", "0.49377736", "0.49331415", "0.49253058", "0.49082974", "0.49019983", "0.49002063", "0.48882547", "0.48864552", "0.48797214", "0.48788264", "0.4840541", "0.4838321", "0.48373574", "0.4834416", "0.48107767", "0.48028934", "0.48012847", "0.47966513", "0.4795219", "0.47942725", "0.47938085", "0.4793423", "0.4784739", "0.47846958", "0.47839597", "0.47828245", "0.47644782", "0.47628057", "0.47598502", "0.47582507" ]
0.68942183
0
Instantiate a evaluate helper class.
def build_evaluate_helper(cfg: CfgNode) -> EvaluateHelper: evaluator = build_evaluator(cfg.evaluator) helper = EvaluateHelper(evaluator) return helper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_evaluators(self):\n pass", "def evaluator(evaluate):\r\n @functools.wraps(evaluate)\r\n def ecspy_evaluator(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n fitness.append(evaluate(candidate, args))\r\n return fitness\r\n ecspy_evaluator.single_evaluation = evaluate\r\n return ecspy_evaluator", "def build_evaluator(cfg: CfgNode) -> EvaluatorBase:\n name = cfg[\"name\"]\n evaluator = simple_build(name, cfg, EVALUATORS)\n return evaluator", "def eval(self):\n raise NotImplementedError('Must define eval function to use this base class')", "def __new__(cls,\n input_fn,\n steps=100,\n name=None,\n hooks=None,\n exporters=None,\n delay_secs=120,\n throttle_secs=600):\n # Validate input_fn.\n _validate_input_fn(input_fn)\n\n # Validate steps.\n if steps is not None and steps <= 0:\n raise ValueError('Must specify steps > 0, given: {}'.format(steps))\n\n # Validate name.\n if name is not None and not isinstance(name, six.string_types):\n raise TypeError('`name` must be string, given: {}'.format(name))\n\n # Validate hooks.\n hooks = _validate_hooks(hooks)\n\n # Validate exporters.\n exporters = _validate_exporters(exporters)\n\n # Validate delay_secs.\n if delay_secs < 0:\n raise ValueError(\n 'Must specify delay_secs >= 0, given: {}'.format(delay_secs))\n\n # Validate throttle_secs.\n if throttle_secs < 0:\n raise ValueError(\n 'Must specify throttle_secs >= 0, given: {}'.format(throttle_secs))\n\n return super(EvalSpec, cls).__new__(\n cls,\n input_fn=input_fn,\n steps=steps,\n name=name,\n hooks=hooks,\n exporters=exporters,\n delay_secs=delay_secs,\n throttle_secs=throttle_secs)", "def eval(cls, *args):\n raise NotImplementedError(\"subclasses need to override this method\")", "def __init__(\r\n self,\r\n generator,\r\n mode,\r\n tensorboard=None,\r\n verbose=1,\r\n **kwargs\r\n ):\r\n self.generator = generator\r\n\r\n if mode == 'recall':\r\n self.evaluate = eval_recall\r\n elif mode == 'accuracy':\r\n self.evaluate = eval_accuracy\r\n elif mode == 'mAP':\r\n self.evaluate = eval_mAP\r\n else:\r\n raise ValueError('unsupported evaluation callback mode')\r\n self.mode = mode\r\n\r\n self.tensorboard = tensorboard\r\n self.verbose = verbose\r\n self.kwargs = kwargs\r\n\r\n super(Evaluate, self).__init__()", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def eval(self, *args, **kwargs):\n raise NotImplementedError", "def evaluate(self):\n raise NotImplementedError()", "def sub_evaluator(self, ast: lark.Tree) -> 'Evaluator':\n return Evaluator(ast, activation=self.activation, functions=self.functions)", "def evaluator(self, candidates, args):\n\t\traise NotImplementedError", "def evaluator(self, candidates, args):\r\n raise NotImplementedError", "def evaluate(self) :\n pass", "def evaluator(self, evaluator):\n self.__evaluator = evaluator", "def eval(self):\n raise NotImplementedError", "def evaluate(self):\n raise NotImplementedError(\"Abstract method\")", "def _evaluation():\n return {\n 'type' : 'class',\n 'name' : 'evaluation',\n 'base' : None,\n 'is_abstract' : False,\n 'doc' : None,\n 'properties' : [\n ('date', 'datetime', '0.1', None),\n ('description', 'str', '0.1', None),\n ('did_pass', 'bool', '0.1', None),\n ('explanation', 'str', '0.1', None),\n ('specification', 'str', '0.1', None),\n ('specification_hyperlink', 'str', '0.1', None),\n ('type', 'str', '0.1', None),\n ('type_hyperlink', 'str', '0.1', None),\n ('title', 'str', '0.1', None),\n ],\n 'decodings' : [\n ('date', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:date/gmd:CI_Date/gmd:date/gco:Date'),\n ('description', 'gmd:evaluationMethodDescription/gco:CharacterString'),\n ('did_pass', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:pass/gco:Boolean'),\n ('explanation', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:explanation/gco:CharacterString'),\n ('type', 'child::gmd:result/@xlink:title'),\n ('type_hyperlink', 'child::gmd:result/@xlink:href'),\n ('specification', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/@xlink:title'),\n ('specification_hyperlink', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/@xlink:href'),\n ('title', 'child::gmd:result/gmd:DQ_ConformanceResult/gmd:specification/gmd:CI_Citation/gmd:title/gco:CharacterString'),\n ]\n }", "def evaluator(self):\n return self.__evaluator", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type == \"sem_seg\":\n return SemSegEvaluator(\n dataset_name,\n distributed=True,\n output_dir=output_folder,\n num_classes=4,\n ignore_label=255\n )\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def eval(self):\n pass", "def eval(self):\n pass", "def eval(self):\n pass", "def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)", "def clf_eval():\n y_true = np.random.randint(2, size=10000)\n y_pred = np.clip(np.random.normal(0.25, 0.3, size=y_true.shape) + y_true * 0.5, 0, 1)\n\n model_eval = ClassificationEvaluation(\n y_true=y_true,\n y_pred=y_pred,\n class_names=['a', 'b'],\n model_name='foo',\n )\n return model_eval", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception(\"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn)", "def evaluate(self):\n raise Exception(\"Not implemented.\")", "def __init__(self, grid_points, metrics_eval_func=None):\n self.grid_points = grid_points\n self.metrics_eval_func = metrics_eval_func or self._create_default_metrics_eval_func(grid_points)", "def _prepare_evaluate(self):\n labels = list()\n labels += ['num_procs', 'num_periods', 'is_debug', 'seed_emax', 'seed_sim']\n labels += ['num_draws_emax', 'num_agents_sim', 'num_types', 'edu_spec', 'version']\n labels += ['num_draws_prob', 'seed_prob']\n num_procs, num_periods, is_debug, seed_emax, seed_sim, num_draws_emax, num_agents_sim, \\\n num_types, edu_spec, version, num_draws_prob, seed_prob = \\\n dist_class_attributes(self.respy_base, *labels)\n\n periods_draws_emax = create_draws(num_periods, num_draws_emax, seed_emax, is_debug)\n periods_draws_sims = create_draws(num_periods, num_agents_sim, seed_sim, is_debug)\n\n disturbances = (periods_draws_emax, periods_draws_sims)\n\n # We want to maintain a pure PYTHON version for testing purposes.\n args = list()\n args += [num_periods, num_types, edu_spec['start'], edu_spec['max'], edu_spec['max'] + 1]\n state_space_info = respy_f2py.wrapper_create_state_space(*args)\n if self.mpi_setup == MISSING_INT:\n slavecomm = self.mpi_setup\n else:\n slavecomm = self.mpi_setup.py2f()\n self.set_up_baseline(periods_draws_emax, None)\n\n initial_conditions = get_initial_conditions(self.respy_base)\n\n args = (smm_sample_f2py, state_space_info, initial_conditions, disturbances, slavecomm)\n self.simulate_sample = partial(*args)", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception, \"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception, \"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn", "def eval(self):\n return self.with_transforms(\"eval\")", "def __init__(self, expression, result, is_singleton=False):\n\n self.expr = expression\n self.result = result\n self.is_singleton = is_singleton", "def eval(*args, **kwargs):\n\n pass", "def getFactoryEvaluateExpressionOnly(self):\n # factory function for evaluateExpressionOnly\n def evaluateExpressionOnly_factory(expression):\n return self.evaluateExpressionOnly(expression)\n\n return evaluateExpressionOnly_factory", "def evaluate(self):\r\n raise Exception(\"Not implemented.\")", "def evaluate(*args, **kwargs):\n yield from _generate(*args, **kwargs)", "def _evaluate(self, x):\n raise NotImplementedError()", "def evaluate(compiled_expression):", "def __init__(self, array: Tuple[int, ...]) -> None:\n self.evaluate: Callable[[str], int] = \\\n lambda program: FitnessEvaluator._evaluate(array, program)", "def test_custom_eval(self):\n\n player1 = \"Player1\"\n player2 = \"Player2\"\n game = isolation.Board(player1, player2)\n\n heuristic = game_agent.CustomEval()\n\n self.assertIsInstance(heuristic.score(game, player1), float,\n \"The heuristic function should return a floating point\")", "def build_evaluation(self, predictions, examples, **kwargs):\n return {}", "def evaluation(store, evaluation_obj):\n evaluation_obj['institute'] = store.institute(evaluation_obj['institute_id'])\n evaluation_obj['case'] = store.case(evaluation_obj['case_id'])\n evaluation_obj['variant'] = store.variant(evaluation_obj['variant_specific'])\n evaluation_obj['criteria'] = {criterion['term']: criterion for criterion in\n evaluation_obj['criteria']}\n evaluation_obj['classification'] = ACMG_COMPLETE_MAP[evaluation_obj['classification']]\n return evaluation_obj", "def evaluate(self, session, *args, evaluate_data_iterator=None, **kwargs):\n\n raise NotImplementedError(\"Implement evaluate() method\")", "def evaluate(self):\n\t\t\t\tif not hasattr(evaluate, 'value'):\n\t\t\t\t\tevaluate.value = func(self)\n\t\t\t\treturn evaluate.value", "def __init__(\n self,\n eval_fn: Callable[[Posting], Union[str, None]] = lambda p: None\n ):\n self.eval_fn = eval_fn", "def setup_evaluation(evalfile, solufile, tolerance, evalstring=False):\n if evalstring:\n evaluation = IPETEvaluation.fromXML(evalfile)\n else:\n evaluation = IPETEvaluation.fromXMLFile(evalfile[\"path\"])\n\n evaluation.set_grouptags(True)\n evaluation.set_validate(solufile)\n evaluation.set_feastol(tolerance)\n return evaluation", "def runner_decrator(cls):\n\n def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n \"\"\"\n Create evaluator(s) for a given dataset.\n This uses the special metadata \"evaluator_type\" associated with each builtin dataset.\n For your own dataset, you can simply create an evaluator manually in your\n script and do not have to worry about the hacky if-else logic here.\n \"\"\"\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)\n\n def custom_test_with_TTA(cls, cfg, model):\n # In the end of training, run an evaluation with TTA\n # Only support some R-CNN models.\n logger.info(\"Running inference with test-time augmentation ...\")\n model = GeneralizedRCNNWithTTA(cfg, model)\n res = cls.test(cfg, model, output_folder=os.path.join(cfg.OUTPUT_DIR, \"inference_TTA\"))\n res = OrderedDict({k + \"_TTA\": v for k, v in res.items()})\n return res\n\n cls.build_evaluator = classmethod(custom_build_evaluator)\n cls.test_with_TTA = classmethod(custom_test_with_TTA)\n\n return cls", "def evaluate(self, X):\n\n\t\tpass", "def evaluate():\n\t\t\t\tif not hasattr(evaluate, 'value'):\n\t\t\t\t\tevaluate.value = func()\n\t\t\t\treturn evaluate.value", "def evaluate(self, eval_data, eval_labels, eval_input_fn=\"default\"):\n # Validations:\n # If it is of type str, make sure is a valid\n if isinstance(eval_input_fn, str):\n # We use a list in case we want to extend in the future.\n if eval_input_fn in [\"default\"]:\n if eval_input_fn == \"default\":\n # pylint: disable=no-member\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False\n )\n\n eval_res = self.classifier.evaluate(input_fn=eval_input_fn)\n return eval_res", "def __init__(self, expr: typing.Callable[[], typing.Any]):\n\n self.expr = expr", "def evaluate(self, eval_data, eval_labels, eval_input_fn):\n raise NotImplementedError(\"Method must be implemented by subclass\")", "def eval(*args, **kwargs)->Any:\n pass", "def test(self):\n self.eval()", "def eval(self) -> typing.Any:\n return self.expr()", "def create_eval(self):\n self.ev_id = \"ev-\" + base64.b32encode(os.urandom(10)).decode(\"ascii\")\n self.ev_name = \"Evaluation: \" + self.ml_name\n self._ml.create_evaluation(\n EvaluationId=self.ev_id,\n EvaluationName=self.ev_name,\n MLModelId=self.ml_id,\n EvaluationDataSourceId=self.fold.eval_ds_id\n )\n logger.info(\"Created Evaluation \" + self.ev_id)", "def _make_callable(func):\n try:\n return func.evaluator()\n except AttributeError:\n return func", "def evaluate(parser):\n required_args = (\n 'train_tfrecord',\n 'valid_tfrecord',\n 'predicted_data',\n 'actual_data',\n )\n cli_args = add_all_args(parser, EVALUATION, *required_args)\n evaluator = Evaluator(\n input_shape=cli_args.input_shape,\n model_configuration=cli_args.model_cfg,\n train_tf_record=cli_args.train_tfrecord,\n valid_tf_record=cli_args.valid_tfrecord,\n classes_file=cli_args.classes,\n max_boxes=cli_args.max_boxes,\n iou_threshold=cli_args.iou_threshold,\n score_threshold=cli_args.score_threshold,\n )\n predicted = pd.read_csv(cli_args.predicted_data)\n actual = pd.read_csv(cli_args.actual_data)\n evaluator.calculate_map(\n prediction_data=predicted,\n actual_data=actual,\n min_overlaps=cli_args.min_overlaps,\n display_stats=cli_args.display_stats,\n save_figs=cli_args.save_figs,\n plot_results=cli_args.plot_stats,\n )", "def create_multi_node_evaluator(actual_evaluator, communicator):\n\n actual_evaluator._mn_original_evaluate = actual_evaluator.evaluate\n actual_evaluator._mn_communicator = communicator\n\n def new_evaluate(self):\n local_mean_dict = self._mn_original_evaluate()\n global_mean_dict = {\n name:\n self._mn_communicator.allreduce_obj(\n value) / self._mn_communicator.size\n for name, value in sorted(local_mean_dict.items())\n }\n return global_mean_dict\n\n actual_evaluator.evaluate = six.create_bound_method(\n new_evaluate, actual_evaluator)\n return actual_evaluator", "def build_and_evaluate(\n X, y, classifier=SGDClassifier,\n verbose=True, ngram_range=(1,1), test_size=0.2, max_features=None\n ):\n\n def build(classifier, X, y=None, ngram_range=(1,1), max_features=None):\n \"\"\"\n Inner build function that builds a single model.\n \"\"\"\n if isinstance(classifier, type):\n classifier = classifier()\n\n model = Pipeline([\n ('vectorizer', TfidfVectorizer(\n ngram_range=ngram_range,\n stop_words='english',\n max_features=max_features\n )),\n ('classifier', classifier),\n ])\n\n model.fit(X, y)\n return model\n\n # Label encode the targets\n labels = LabelEncoder()\n y = labels.fit_transform(y)\n\n # Begin evaluation\n if verbose: print(\"Building for evaluation\")\n X_train, X_test, y_train, y_test = tts(X, y, test_size=test_size)\n \n model = build(classifier, \n X_train, \n y_train, \n ngram_range=ngram_range, \n max_features=max_features\n )\n\n model.labels_ = labels\n\n if verbose: print(\"Classification Report:\\n\")\n\n y_pred = model.predict(X_test)\n print(clsr(y_test, y_pred, target_names=labels.classes_))\n print(confusion_matrix(y_test, y_pred))\n\n return model", "def evaluator(test_config: TestConfig, criterion: nn.Module, model: nn.Module,\n device: torch.device) -> Engine:\n metrics, eval_metric, *_ = test_config\n metrics['loss'] = Loss(criterion,\n output_transform=lambda data: (data[0], data[1]))\n val_evaluator = create_supervised_evaluator(model, metrics, device,\n prepare_batch=prepare_batch)\n return val_evaluator", "def __init__(self, md, ev=None, var=None, out=None):\n self.model = md\n\n ## Construct default evaluator\n if ev is None:\n\n def _ev(md, df):\n df_res = md.evaluate_df(df)\n return df_res[md.out]\n\n self.ev = _ev\n self.var = self.model.var\n self.out = self.model.out\n\n ## Use given evaluator\n else:\n self.ev = ev\n self.var = var\n self.out = out\n\n ## Copy model data\n self.runtime = md.runtime(1)\n self.name = copy.copy(md.name)", "def _run_evaluator(self, func, stats):\n host_stats = stats['host_stats']\n host_caps = stats['host_caps']\n extra_specs = stats['extra_specs']\n share_stats = stats['share_stats']\n\n result = evaluator.evaluate(\n func,\n extra=extra_specs,\n stats=host_stats,\n capabilities=host_caps,\n share=share_stats)\n\n return result", "def __init__(self, expected, test_func):\n self._f = test_func\n self._exp = expected", "def __init__(self, rawPredictionCol=\"rawPrediction\", labelCol=\"label\",\n metricName=\"areaUnderROC\", metricParams={\"recallValue\": 0.6}):\n super(BinaryClassificationEvaluatorWithPrecisionAtRecall.__mro__[1], self).__init__()\n if (metricName == \"areaUnderROC\") | (metricName == \"areaUnderPR\"):\n self._java_obj = self._new_java_obj(\n \"org.apache.spark.ml.evaluation.BinaryClassificationEvaluator\", self.uid)\n #: param for metric name in evaluation (areaUnderROC|areaUnderPR)\n self.metricName = Param(self, \"metricName\",\n \"metric name in evaluation (areaUnderROC|areaUnderPR)\")\n self._setDefault(rawPredictionCol=\"rawPrediction\", labelCol=\"label\",\n metricName=\"areaUnderROC\")\n kwargs = self.__init__._input_kwargs\n if \"metricParams\" in kwargs.keys():\n kwargs.pop(\"metricParams\")\n \n elif (metricName == \"precisionAtGivenRecall\"):\n self.metricParams = Param(\n self, \"metricParams\", \"additional parameters for calculating the metric, such as the recall value in getPrecisionAtOneRecall\")\n self.metricName = Param(self, \"metricName\",\n \"metric name in evaluation (areaUnderROC|areaUnderPR)\")\n self._setDefault(rawPredictionCol=\"rawPrediction\", labelCol=\"label\",\n metricName=\"areaUnderROC\", metricParams={\"recallValue\": 0.6})\n kwargs = self.__init__._input_kwargs\n \n else:\n raise ValueError(\"Invalid input metricName: {}\".format(self.metricName))\n \n self._set(**kwargs)\n \n # for the computing precision at given recall in PySpark (in case it's only requested in calling evaluate())\n self.initMetricParams = metricParams\n self.initMetricNameValue = metricName\n self.rawPredictionColValue = rawPredictionCol\n self.labelColValue = labelCol", "def evaluate_raw(self):\n raise NotImplementedError", "def _evaluate(self, x, y):\n raise NotImplementedError()", "def instantiate(cls):\n default_xml = '<condition class=\"{0}\" plugin=\"run-condition@1.2\"/>'\n default_xml = default_xml.format(cls.get_jenkins_plugin_name())\n root_node = ElementTree.fromstring(default_xml)\n\n return cls(root_node)", "def __evaluate(self, var_x):\n # pylint: disable=W0612,C0103\n x = var_x\n # pylint: enable=W0612,C0103\n return eval(self.expr)", "def eval(self):\n raise NotImplemented()", "def __init__(self, func, args, kwargs, res): # make test\n\n # Check if func is a callable function\n if callable(func):\n self.func = func\n else:\n self.func = lambda *args, **kwargs: func\n\n # Store parameters in class\n self.args = args or [] # If args == None, set self.args to an empty list\n self.kwargs = kwargs or {} # If kwargs == None, set self.kwargs to an empty dict\n self.res = res\n\n\n self._tolerance = 1.0e-10 # Variable used to check for equality in case of float", "def evaluate(\n self,\n test_data=None,\n print_report=True,\n save_path=\"ktrain_classification_report.csv\",\n class_names=[],\n ):\n return self.validate(\n val_data=test_data,\n print_report=print_report,\n save_path=save_path,\n class_names=class_names,\n )", "def __init__(\n self,\n name,\n expectedValue,\n extractedValue,\n weight=1.0,\n meritValue=None,\n evaluatorName=None,\n ):\n self.name = name\n \"\"\"Record Name\"\"\"\n\n self.expectedValue = expectedValue\n \"\"\"Value expected for this evaluator record\"\"\"\n\n self.extractedValue = extractedValue\n \"\"\"Actual value extracted for this evaluator record\"\"\"\n\n self.weight = weight\n \"\"\"Weight to be given to evaluator record\"\"\"\n\n self.evaluatorName = evaluatorName\n \"\"\"Name of evaluator that created this record\"\"\"\n\n self.meritValue = meritValue\n \"\"\"Value to be used in calculating global merit value\"\"\"\n\n self.errorFlag = False\n \"\"\"Flag indicating if error was experienced when extracting value\"\"\"", "def eval(self, X, Y):\n raise NotImplementedError()", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def __evaluateLocal__(self, featureVals):\n pass", "def _generate_evaluaters(self):\n evaluators = []\n for para_key in self.parameter[1]:\n for value in self.parameter[1][para_key]:\n evaluators.append(evaluaterSearch.evaluaterSearch(self.parameter[2], [para_key, value]))\n self.evaluators = evaluators", "def eval(self, expr, locals):\r\n sav = self.locals_ptr\r\n self.locals_ptr = locals\r\n x = eval(self.compile(expr), {\"__builtins__\":self.eval_allowed_globals}, locals)\r\n self.locals_ptr = sav\r\n return x", "def _make_executor(self, expr=None):\n raise NotImplementedError()", "def evaluate(self, w, X, y):\n pass # this is because it's a base class, it will be implemented below.", "def evaluate(self, X):\n\n raise NotImplementedError(\"not implemented!\")", "def _evaluate(model):\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)", "def _instantiate_benchmark_class(self, output_dir):\n module_import_path, class_name = self.config.test_class_str.rsplit('.', 1)\n module = importlib.import_module(module_import_path)\n class_ = getattr(module, class_name)\n\n instance = class_(output_dir=output_dir)\n instance.oss_report_object = benchmark_result.BenchmarkResult()\n return instance", "def usesEvaluationManager(self):\n \n pass", "def __init__(self, math_expr, case_sensitive=False):\r\n self.case_sensitive = case_sensitive\r\n self.math_expr = math_expr\r\n self.tree = None\r\n self.variables_used = set()\r\n self.functions_used = set()\r\n\r\n def vpa(tokens):\r\n \"\"\"\r\n When a variable is recognized, store it in `variables_used`.\r\n \"\"\"\r\n varname = tokens[0][0]\r\n self.variables_used.add(varname)\r\n\r\n def fpa(tokens):\r\n \"\"\"\r\n When a function is recognized, store it in `functions_used`.\r\n \"\"\"\r\n varname = tokens[0][0]\r\n self.functions_used.add(varname)\r\n\r\n self.variable_parse_action = vpa\r\n self.function_parse_action = fpa", "def __call__(self, *args):\n\n func_env = Environment(self.parent)\n self.define_args(func_env, *args)\n return evaluate(self.body, func_env)", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def __init__(self, exprs):\n self.exprs = exprs", "def evaluate(self, edict):\n pass", "def get_evaluate_fn(testset):\n x_test, y_test = testset\n\n # The `evaluate` function will be called after every round by the strategy\n def evaluate(\n server_round: int,\n parameters: fl.common.NDArrays,\n config: Dict[str, fl.common.Scalar],\n ):\n model = get_model() # Construct the model\n model.set_weights(parameters) # Update model with the latest parameters\n loss, accuracy = model.evaluate(x_test, y_test, verbose=VERBOSE)\n return loss, {\"accuracy\": accuracy}\n\n return evaluate", "def eval_obj(self):\n if self._eval_obj is not self.null:\n return self._eval_obj\n else:\n evaled_args = [getattr(i, \"eval_obj\", i) for i in self._tuple[1:]]\n arg_grps = toolz.groupby(lambda x: isinstance(x, KwdPair), evaled_args)\n evaled_args = arg_grps.get(False, [])\n evaled_kwargs = arg_grps.get(True, [])\n\n op = self._tuple[0]\n op = getattr(op, \"eval_obj\", op)\n\n try:\n op_sig = inspect.signature(op)\n except ValueError:\n # This handles some builtin function types\n _eval_obj = op(*(evaled_args + [kw.value for kw in evaled_kwargs]))\n else:\n op_args = op_sig.bind(*evaled_args, **{kw.arg: kw.value for kw in evaled_kwargs})\n op_args.apply_defaults()\n\n _eval_obj = op(*op_args.args, **op_args.kwargs)\n\n # assert not isinstance(_eval_obj, ExpressionTuple)\n\n self._eval_obj = _eval_obj\n return self._eval_obj", "def __init__(self,func ,domain_space, max_evals = 10):\n self.func = func\n # optimizing for FLOAT values\n #self.space = hp.uniform('x', 36, 200)\n # optimizing for Integer values\n self.space = domain_space\n self.algorithm = tpe.suggest # creating algorithm\n self.trials = Trials() # to check records\n self.max_evals = max_evals", "def geteval(self, key, default=None):\n r = self.get(key,default)\n if isinstance(r, classad.ExprTree):\n return r.eval()\n return r", "def evaluateValue(compiled_expression):", "def evaluate(self, X1, X2):\r\n raise NotImplementedError()" ]
[ "0.64568245", "0.6257798", "0.6237631", "0.6131028", "0.6065035", "0.60551", "0.60094994", "0.5996954", "0.59962183", "0.59601563", "0.59601563", "0.59439415", "0.5902795", "0.589313", "0.5882588", "0.58795404", "0.58464766", "0.5829064", "0.5801519", "0.5770252", "0.57448375", "0.5722661", "0.5693077", "0.5693014", "0.5693014", "0.5693014", "0.5674838", "0.56657946", "0.56647134", "0.5660649", "0.56550634", "0.5616177", "0.5591625", "0.5587484", "0.5587484", "0.5585046", "0.55817693", "0.5548264", "0.55437857", "0.5541169", "0.5535559", "0.5504974", "0.5464738", "0.5425915", "0.5423356", "0.54209524", "0.5418542", "0.5418241", "0.5417341", "0.5412784", "0.5407626", "0.5407309", "0.5401187", "0.5398831", "0.5375622", "0.5371533", "0.5370027", "0.5369903", "0.53676397", "0.5350803", "0.53474474", "0.53387326", "0.532872", "0.5328034", "0.5309288", "0.5280703", "0.52689284", "0.5265304", "0.5260567", "0.5251753", "0.52352047", "0.5233142", "0.52201694", "0.5203897", "0.52037466", "0.51794666", "0.5177003", "0.5176692", "0.51608205", "0.51503426", "0.5146793", "0.51420957", "0.51319045", "0.51266867", "0.51247996", "0.51236546", "0.5116609", "0.51069254", "0.5091309", "0.50820404", "0.50811565", "0.5081043", "0.5069084", "0.5054666", "0.5043619", "0.5041678", "0.5034276", "0.5033459", "0.5021824", "0.50188816" ]
0.70881444
0
Plot x and y axis of dfs in common graph.
def plot(x, y, *dfs): ax = None for df in dfs: ax = df[[x, y]].set_index(x).plot(kind='line', ylim=(0, None), xlim=(0, None), ax=ax)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_plot(ax, dfs, legend, x, y, xscale, yaxis_max):\n xticks = dfs_all_values(dfs, x)\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # setting the x-column as an index is required to draw the y-column\n # as a function of x argument\n df = df.set_index(x)\n # plot line on the subplot\n df[y].plot.line(ax=ax, rot=45, marker='.')\n\n if xscale == \"linear\":\n ax.set_xscale(xscale)\n else:\n ax.set_xscale(xscale, base=2)\n ax.xaxis.set_major_formatter(ScalarFormatter())\n\n ax.set_xticks(xticks)\n ax.set_xlabel(get_label(x))\n ax.set_ylabel(get_label(y))\n ax.set_ylim(bottom=0)\n if yaxis_max is not None:\n ax.set_ylim(top=float(yaxis_max))\n ax.legend(legend, fontsize=6)\n ax.grid(True)", "def plot2D(*dfs, columns=None, figsize=(5, 5), plot_titles=False):\n fig, ax = plt.subplots(figsize=figsize)\n\n for df, color in zip(dfs, cycle(COLORS)):\n X, Y = (df[col] for col in columns)\n plt.scatter(X, Y, c=color, marker=MARKER)\n\n for axis, col in zip(['x', 'y'], columns):\n getattr(ax, f'set_{axis}label')(col)\n\n if plot_titles:\n for df in dfs:\n for i, j, text in zip(df.iloc[:, 0], df.iloc[:, 1], df.index):\n corr = 2\n ax.annotate(text, xy=(i + corr, j + corr))\n\n plt.show()", "def plot_graph(self) -> None:", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def graph(df):\n df.plot()\n plt.show()", "def show_graphs ():\n plt.ylim = (0, 300)\n plt.xlim = (0, 300)\n #Set up lidar plot to figure 1\n lidar_plot = plt.figure (1)\n #Assign title\n plt.title ('Lidar data')\n #Assign data\n plt.imshow (lidar_clean)\n #Set up radar plot to figure 2\n radar_plot = plt.figure (2)\n #Assign title\n plt.title ('Radar data')\n #Assign data\n plt.imshow (radar_clean)\n #Show plots\n plt.show ()", "def show_graphs(self):\n self.frequency_plot_graph.show()\n self.resistance_graph.show()\n self.temperature_plot_graph.show()\n self.pressure_plot_graph.show()\n self.humidity_plot_graph.show()\n self.overview_graph.show()\n self.overview_graph.setXRange(-1000, 5000)", "def plot_dat_file(dat_paths: [str]):\n import pandas as pd\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots(1, 3, sharey=\"all\", sharex=\"col\", figsize=(8, 6))\n for i, dat_path in enumerate(dat_paths):\n if i == i:\n skipfoot = 11 + 9\n else:\n skipfoot = 11\n dat_file = pd.read_csv(\n dat_path,\n skiprows=3,\n skipfooter=skipfoot,\n header=None,\n delim_whitespace=True,\n engine=\"python\",\n )\n depth = dat_file.values[:, 0]\n vp = dat_file.values[:, 1]\n vs = dat_file.values[:, 3]\n dens = dat_file.values[:, 5]\n\n ax[0].plot(vp, depth, label=f\"nr {i}\")\n\n ax[1].plot(vs, depth)\n ax[2].plot(dens, depth)\n ax[0].set_ylim(ax[0].get_ylim()[::-1])\n ax[0].legend()\n plt.show()", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def plot_2D(df):\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,6))\n fig.clf()\n #Get the current Axes instance on the current figure matching the given \n #keyword args, or create one.\n ax = fig.gca()\n df.plot(kind = 'scatter', x = 'x', y = 'y', ax = ax, alpha = 0.5)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_title('X vs. Y')\n return 'Done'", "def setup_axes(self):\n fig = plt.figure(1)\n axs = fig.add_subplot(1, 1, 1)\n fig.clf()\n axs = plt.subplots(1, 2)\n ax1 : plt.axis = axs[0]\n ax2 : plt.axis = axs[1]\n fig.canvas.draw()\n \n line1_t, = ax1.plot([], label='train')\n line1_v, = ax1.plot([], label='val')\n\n ax1.set_title('Loss vs Iterations')\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n ax1.grid(True)\n ax1.autoscale()\n # ax1.legend()\n\n line2_t, = ax2.plot([], label='train')\n line2_v, = ax2.plot([], label='val')\n\n ax2.set_title('Accuracy vs Iterations')\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Percent Accuracy')\n ax2.grid(True)\n ax2.autoscale()\n # ax2.legend()\n\n lines = [line1_t, line1_v, line2_t, line2_v]\n\n return fig, ax1, ax2, lines", "def plot_xdop_distribution(dRtk: dict, dfXDOP: pd.DataFrame, dfXDOPdisp: pd.DataFrame, logger: logging.Logger, showplot: bool = False):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n logger.info('{func:s}: creating XDOP distribution plot'.format(func=cFuncName))\n\n # select colors for xDOP coordinate difference\n colors = ('blue', 'green', 'cyan', 'red')\n\n # set up the plot\n plt.style.use('ggplot')\n\n # subplots\n fig = plt.figure(figsize=(14.0, 9.0), tight_layout=False)\n fig.suptitle('{syst:s} - {posf:s} - {date:s}: XDOP'.format(posf=dRtk['info']['rtkPosFile'], syst=dRtk['syst'], date=dRtk['Time']['date']))\n\n # create a grid for lotting the XDOP line plots and 6 XDOP distribution plots\n gs = GridSpec(2, 4)\n\n # plot the XDOPs and #SVs on the first axis\n ax = fig.add_subplot(gs[0, :]) # first row, span all columns\n plot_xdop_svs(dfDops=dfXDOP, colors=colors, axis=ax, logger=logger)\n\n # add the xDOP distributions\n axisShare = None\n for col, xdop, color in zip((0, 1, 2, 3), dfXDOPdisp.columns[-4:], colors):\n # create exis for this figure\n if axisShare is None:\n ax = fig.add_subplot(gs[1, col])\n axisShare = ax\n else:\n ax = fig.add_subplot(gs[1, col], sharey=axisShare)\n # ax.get_yaxis().set_ticklabels([])\n ax.tick_params(labelleft=False)\n\n # plot distribution for a DOP value\n plot_xdop_histogram(dfDopsDist=dfXDOPdisp, xdop=xdop, color=color, axis=ax, logger=logger)\n\n # save the plot in subdir png of GNSSSystem\n amutils.mkdir_p(os.path.join(dRtk['info']['dir'], 'png'))\n pngName = os.path.join(dRtk['info']['dir'], 'png', os.path.splitext(dRtk['info']['rtkPosFile'])[0] + '-XDOP.png')\n fig.savefig(pngName, dpi=fig.dpi)\n\n if showplot:\n plt.show(block=True)\n else:\n plt.close(fig)", "def plot_datasets(datasets):\n\n\t# plt.grid(True)\n\n\tfor ds in datasets:\n\t\t(f, ax) = plt.subplots()\n\n\t\tax.grid(True)\n\n\t\tif 'xl' in ds:\n\t\t\tax.set_xlabel(ds['xl'])\n\t\tif 'yl' in ds:\n\t\t\tax.set_ylabel(ds['yl'])\n\n\t\tif 'xl' in ds and 'yl' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl'])\n\t\t\tf.canvas.set_window_title(title)\n\n\t\tif 'x' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl']) if 'title' not in ds else ds['title']\n\t\t\tf.canvas.set_window_title(title)\n\t\t\tmarker = 'y1m' in ds and ds['y1m'] or None\n\t\t\tax.plot(ds['x'], ds['y'], label=ds['yl'], marker=marker)\n\t\tif 'x2' in ds:\n\t\t\t# label = \"y2\" if 'y2l' not in ds else ds['y2l']\n\t\t\tlabel = 'y2l' in ds and ds['y2l'] or 'y2'\n\t\t\tmarker = 'y2m' in ds and ds['y2m'] or None\n\t\t\tax.plot(ds['x2'], ds['y2'], label=label, marker=marker)\n\t\t\tax.legend()\n\t\tif 'x3' in ds:\n\t\t\t# label = \"y3\" if 'y3l' not in ds else ds['y3l']\n\t\t\tlabel = 'y3l' in ds and ds['y3l'] or 'y3'\n\t\t\tmarker = 'y3m' in ds and ds['y3m'] or None\n\t\t\tax.plot(ds['x3'], ds['y3'], label=label, marker=marker)\n\t\t\tax.legend()\n\n\t\tif 'sub' in ds:\n\t\t\tfor sub in ds['sub']:\n\t\t\t\t# ax.set_ylabel(sub['yl'])\n\t\t\t\t# ax.set_xlabel(sub['xl'])\n\t\t\t\t# title = \"%s from %s\" % (sub['yl'], sub['xl']) if 'title' not in sub else sub['title']\n\t\t\t\t# f.canvas.set_window_title(title)\n\n\t\t\t\tlabel = 'yl' in sub and sub['yl']\n\t\t\t\tmarker = 'ym' in sub and sub['ym'] or None\n\t\t\t\tax.plot(sub['x'], sub['y'], label=label, marker=marker)\n\t\t\t\tax.legend()\n\n\t\tax.spines['left'].set_position('zero')\n\t\tax.spines['bottom'].set_position('zero')\n\t\tax.spines['left'].set_smart_bounds(True)\n\t\tax.spines['bottom'].set_smart_bounds(True)\n\n\tplt.show()", "def plot_all(self):\n self.plot_ramps()\n self.plot_groupdq()", "def plot_all_df_columns(df, col_nums, title='', xlabel=''):\n i = 1\n values = df.values\n for col in col_nums:\n plt.subplot(len(col_nums), 1, i)\n plt.plot(values[:, col])\n plt.title(title)\n plt.ylabel(dr_df.columns[col])\n plt.xlabel(xlabel)\n i += 1\n plt.tight_layout()\n plt.show()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def plot(self, data_frame):\n self.axes.plot(data_frame, 'o-')\n self.axes.set_ylim(0.0, 200.0)\n self.fig.autofmt_xdate()\n self.draw()", "def show(dfs):\n\n for df in dfs:\n print('{} -> {}'.format(df[0], df[1]))", "def showVs(df, feat1, feat2):\n colors = ['blue', 'red', 'green', 'coral']\n for u in range(len(cBouts)):\n plt.plot(f[f['clust_ind'] == u][feat1],\n f[f['clust_ind'] == u][feat2], 'o', color=colors[u],\n alpha=0.6, markeredgecolor='none')\n plt.xlabel(feat1)\n plt.ylabel(feat2)\n plt.show()\n return", "def plot_xdop_svs(dfDops: pd.DataFrame, colors: tuple, axis, logger: logging.Logger):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n logger.info('{func:s}: creating XDOP / #SVs vs time plot'.format(func=cFuncName))\n\n axis.set_ylim([0, 24])\n axis.set_ylabel('#SVs [-]', fontsize='large', color='grey')\n # axis.set_xlabel('Time [sec]', fontsize='large')\n\n axis.fill_between(dfDops['DT'], 0, dfDops['#SVs'], alpha=0.5, linestyle='-', linewidth=3, color='grey', label='#SVs', interpolate=False)\n # plot PDOP on second y-axis\n axRight = axis.twinx()\n\n axRight.set_ylim([0, 15])\n axRight.set_ylabel('XDOP [-]', fontsize='large')\n\n # plot XDOPs (last 4 columns)\n for dop, color in zip(dfDops.columns[-4:], colors):\n axRight.plot(dfDops['DT'], dfDops[dop], linestyle='-', marker='.', markersize=1, color=color, label=dop)\n\n # add the legend to the plot\n axRight.legend(loc=\"upper right\")\n\n # set title\n axis.set_title('Visible satellites & XDOP', fontsize='x-large')\n\n # create the ticks for the time axis\n dtFormat = plot_utils.determine_datetime_ticks(startDT=dfDops['DT'].iloc[0], endDT=dfDops['DT'].iloc[-1])\n\n if dtFormat['minutes']:\n # axis.xaxis.set_major_locator(dates.MinuteLocator(byminute=range(10, 60, 10), interval=1))\n pass\n else:\n axis.xaxis.set_major_locator(dates.HourLocator(interval=dtFormat['hourInterval'])) # every 4 hours\n axis.xaxis.set_major_formatter(dates.DateFormatter('%H:%M')) # hours and minutes\n\n axis.xaxis.set_minor_locator(dates.DayLocator(interval=1)) # every day\n axis.xaxis.set_minor_formatter(dates.DateFormatter('\\n%d-%m-%Y'))\n\n axis.xaxis.set_tick_params(rotation=0)\n for tick in axis.xaxis.get_major_ticks():\n # tick.tick1line.set_markersize(0)\n # tick.tick2line.set_markersize(0)\n tick.label1.set_horizontalalignment('center')", "def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes", "def plot_graph():\n name = request.args.get('instance')\n name = str(name)\n distance = request.args.get('distance')\n path = request.args.get('path')\n if name == 'Custom':\n coords = request.args.get('coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n else:\n nodes = create_nodes(name)\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n axis.set_title(name + \" - Distance: \"+ str(distance))\n path = str(path).split(',')\n path = [int(i) for i in path]\n for i in range(len(path) - 1):\n\n start_node = nodes[path[i]]\n x1, y1 = start_node.x, start_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[i]))\n axis.text(x1,y1, str(path[i]))\n end_node = nodes[path[i+1]]\n x2, y2 = end_node.x, end_node.y\n axis.plot([x1,x2], [y1, y2])\n\n last_node = nodes[path[len(path)-1]]\n x1, y1 = last_node.x, last_node.y\n axis.text(x1,y1, str(path[len(path)-1]))\n\n begin_node = nodes[path[0]]\n x2, y2 = begin_node.x, begin_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[len(path)-1]))\n axis.plot([x1,x2], [y1, y2])\n\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype=\"image/png\")", "def multi_plot(data, fname=None):\n for entry in data['data']:\n plt.plot(entry['x'], entry['y'], label=entry['label'])\n\n plt.title(data['title'])\n plt.xlabel(data['x_label'])\n plt.ylabel(data['y_label'])\n\n #plt.legend(loc='best')\n\n Plotter.show(data['title'], fname=fname)", "def dyplot(self, x, y, name, dir):\n fig, ax1 = plt.subplots(figsize=(6, 4), dpi=500, facecolor='white')\n ax1.plot(x, '-b*', ms=2, linewidth=1)\n ax1.set_xlabel('Epoch', fontsize=9)\n ax1.set_ylabel('Discriminator Loss per Epoch', fontsize=9, color='b')\n ax1.tick_params('y', colors='b')\n\n ax2 = ax1.twinx()\n ax2.plot( y, '-r*', ms=2, linewidth=1)\n ax2.set_ylabel('Generator Loss per Epoch', fontsize=9, color='r')\n ax2.tick_params('y', colors='r')\n fig.tight_layout()\n plt.savefig('{}/{}.png'.format(dir, 'Loss-Adversarial-' + name))\n plt.close()", "def plotLines( self ):\n \n ## plot tree in dfs manner\n def plotLines( node_id ):\n\n node = self.mTree.node( node_id )\n\n left = self.mNodeWidthsStart[node_id]\n right = self.mNodeWidthsEnd[node_id]\n height = self.mNodeHeights[node_id] \n\n if right != left and node_id != self.mTree.root:\n self.addElements( self.mDecoratorHorizontalBranches.getElements(\n node_id,\n self.getHeaderWidth() + left,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height ))\n \n\n for s in node.succ:\n\n new_height = self.mNodeHeights[s]\n self.addElements( self.mDecoratorVerticalBranches.getElements(\n node_id,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height,\n self.getHeaderHeight() + new_height ))\n \n TreeTools.TreeDFS( self.mTree, self.mTree.root,\n pre_function = plotLines )", "def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()", "def plot_graphs(x_values, y_values, x_label, y_label, title, path, legend=None):\n # If `x_values.ndim` and `y_values.ndim` are equal to 2\n # and `x_values.shape[0]` is equal to `y_values.shape[1]`\n # for instance, `plot_graphs` does not crash and saves\n # a wrong plot. That is why `x_values.ndim` and `y_values.ndim`\n # are checked.\n if x_values.ndim != 1:\n raise ValueError('`x_values.ndim` is not equal to 1.')\n if y_values.ndim != 2:\n raise ValueError('`y_values.ndim` is not equal to 2.')\n \n # Matplotlib is forced to display only\n # whole numbers on the x-axis if the\n # x-axis values are integers. Matplotlib\n # is also forced to display only whole\n # numbers on the y-axis if the y-axis\n # values are integers.\n current_axis = plt.figure().gca()\n if numpy.issubdtype(x_values.dtype, numpy.integer):\n current_axis.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))\n if numpy.issubdtype(y_values.dtype, numpy.integer):\n current_axis.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))\n \n # For the x-axis or the y-axis, if the range\n # of the absolute values is outside [1.e-4, 1.e4],\n # scientific notation is used.\n plt.ticklabel_format(style='sci',\n axis='both',\n scilimits=(-4, 4))\n \n # `plt.plot` returns a list.\n handle = []\n for i in range(y_values.shape[0]):\n handle.append(plt.plot(x_values, y_values[i, :])[0])\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n if legend is not None:\n plt.legend(handle, legend)\n plt.savefig(path)\n plt.clf()", "def display_feds(list1, list2):\n if len(list1) != len(list2):\n print(\"In display_feds: lists must be of the same length\")\n return \n fig = plt.figure(dpi=128, figsize=(10, 6))\n fed_list_answer = fed_list(list1, list2)\n plt.plot(range(len(fed_list_answer)), fed_list_answer, c='red', alpha=0.5)\n \n plt.title(\"Feature edit distances between corresponding pairs\", fontsize = 24)\n plt.xlabel('', fontsize =16)\n #fig.autofmt_xdate()\n plt.ylabel(\"Distance\", fontsize =16)\n plt.tick_params(axis='both', which = 'major', labelsize=16)\n\n plt.show()", "def scree_plot(self, ev):\n plt.scatter(range(1,len(ev)+1), ev)\n plt.plot(range(1,len(ev)+1), ev)\n plt.title(\"Scree Plot\")\n plt.xlabel(\"Factors\")\n plt.ylabel(\"Eigenvalue\")\n plt.grid()\n plt.show()", "def plot(self):\n t = np.linspace(0, self.days, self.days + 1)\n fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(nrows=5, sharex='all')\n ax1.plot(t, self.S, label=\"Susceptible\", color='r')\n ax1.set_ylabel(\"Number of Susceptible People\")\n ax1.set_title(\"Strong Infecitous Model SEIRV Simulation\")\n ax3.plot(t, self.I, label=\"Active Cases\", color='b')\n ax3.set_ylabel(\"Active Cases\")\n ax2.plot(t, self.E, label=\"Exposed\", color='c')\n ax2.set_ylabel(\"# of Exposed\")\n ax4.plot(t, self.R, label=\"Recovered\", color='m')\n ax5.set_xlabel(\"Days\")\n ax4.set_ylabel('Number of Recovered')\n ax5.plot(t, self.V, label=\"Vaccinated\")\n ax5.set_ylabel(\"# Vaccinated\")\n ax1.legend()\n ax2.legend()\n ax3.legend()\n ax4.legend()\n plt.show()\n return fig", "def plot(self):\n pass", "def show_plot(self):\n # Tight layout\n plt.tight_layout()\n # Remove whitespace between upper and lower plots\n plt.subplots_adjust(hspace=0, wspace=0.3) \n # Tick marks on all sides of each plot and show legend\n for j in range(2):\n axes=self.ax[j]\n axes.tick_params(axis='both', which='both', direction='in',\n top=True, right=True)\n legend=axes.legend(framealpha=0)\n # Save and show\n plt.savefig('CMB_lensing_potential_LCDM_MG.pdf', format='pdf')\n plt.show()", "def plot(self):\n fig, ax = plt.subplots()\n ticklabels = [item.strftime('%b %d') for item in self.series.index]\n ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))\n\n plt.ylabel('#Cases')\n i = 0\n for y in self.countries:\n plt.plot(ticklabels, self.series[y], GRAPH_FORMATS[i], label=y)\n i += 1\n ax.set_xticklabels(ticklabels, rotation='vertical', fontsize=10)\n plt.legend()\n plt.grid()\n if self.log:\n plt.yscale(\"log\")\n plt.show()", "def diplayGraph(root, df, side, title, color):\n\n figure = plt.Figure(figsize=(5, 4), dpi=100)\n figure.patch.set_facecolor(\"black\")\n ax = figure.add_subplot(111)\n line = FigureCanvasTkAgg(figure, root)\n line.get_tk_widget().pack(side= side, fill=tk.BOTH)\n df.plot(kind=\"line\", legend=True, ax=ax, color=color, marker=\"o\", fontsize=10)\n ax.set_facecolor(\"black\")\n ax.xaxis.set_major_locator(mdates.AutoDateLocator())\n ax.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n ax.spines[\"top\"].set_color(\"white\")\n ax.spines[\"bottom\"].set_color(\"white\")\n ax.spines[\"left\"].set_color(\"white\")\n ax.spines[\"right\"].set_color(\"white\")\n ax.tick_params(axis=\"x\", colors=\"white\")\n ax.tick_params(axis=\"y\", colors=\"white\")\n ax.set_title(title, color=\"white\")\n\n return figure, ax, line", "def plot(self,tickers = None,variable = \"close\"):\n data = self.get_dataframe(tickers,variable)\n fig = data.iplot(world_readable=False,asFigure=True)\n iplot(fig)", "def plot(self):\n\t\tself.plotOfTF().plot()", "def etio_subplot(df, ax, title, graph_color='skyblue'):\n\n post_dx_histo = histo_dx_includes(df)\n hist_df = pd.DataFrame({\"Dx\": post_dx_histo.index, \"Count\": post_dx_histo.data})\n #hist_df = hist_df.drop(1)\n print(hist_df)\n\n graph_range = range(1,len(hist_df.index)+1)\n ax.hlines(y=graph_range, xmin=0, xmax=hist_df['Count'], color=graph_color)\n ax.plot(hist_df['Count'], graph_range, \"D\", color=graph_color)\n ax.set_yticks(range(1, len(hist_df['Dx'])+1))\n ax.set_yticklabels(hist_df['Dx'], fontsize='10')\n\n ax.set_title(title, fontsize='10')\n return ax", "def plotGraph(self, dayArray, commentsArray, upvotesArray, retweetsArray, likesArray):\n self.canvas.get_tk_widget().place(relx=0.219, rely=0.519, relheight=0.389, relwidth=0.352)\n\n # Clears graph before plotting to prevent appending two graphs at once\n self.figure.clear()\n # self.figure.\n plt = self.figure.add_subplot(1, 1, 1)\n x = []\n max_log_size = 5000\n for i in dayArray:\n i = ''.join(i.split())\n i = i[:-5]\n x.append(i)\n\n # now there's 3 sets of points\n yCO = commentsArray\n yUV = upvotesArray\n yRT = retweetsArray\n yLK = likesArray\n\n if max(yCO)>=max_log_size or max(yUV)>=max_log_size or max(yRT)>=max_log_size or max(yLK)>=max_log_size:\n plt.set(yscale=\"log\")\n plt.plot(x, yCO, label='Comments', marker='o', color='red')\n plt.plot(x, yUV, label='Upvotes', marker='o', color='#fa93b0')\n plt.plot(x, yRT, label='Retweets', marker='o', color='#2374f7')\n plt.plot(x, yLK, label='Likes', marker='o', color='#accafa')\n\n plt.legend()\n self.figure.canvas.draw()", "def plot_xyz():\n plt.subplot(3,1,1) # for x axis\n plt.title('x value v.s. time')\n plt.grid(True)\n plt.ylabel('X')\n plt.xlabel('t')\n plt.plot(x, '-r')\n\n plt.subplot(3,1,2) # for y axis\n plt.title('y value v.s. time')\n plt.grid(True)\n plt.ylabel('Y')\n plt.xlabel('t')\n plt.plot(y, '-g')\n\n plt.subplot(3,1,3) # for z axis\n plt.title('z value v.s. time')\n plt.grid(True)\n plt.ylabel('Z')\n plt.xlabel('t')\n plt.plot(z, '-b')", "def plot(self):\n\n fig, ax = plt.subplots()\n\n for run in self.runs:\n # Load datasets\n data_measure = run.get_dataset(\"stats-collect_link_congestion-raw-*.csv\")\n data_sp = run.get_dataset(\"stats-collect_link_congestion-sp-*.csv\")\n\n # Extract link congestion information\n data_measure = data_measure['msgs']\n data_sp = data_sp['msgs']\n\n # Compute ECDF and plot it\n ecdf_measure = sm.distributions.ECDF(data_measure)\n ecdf_sp = sm.distributions.ECDF(data_sp)\n\n variable_label = \"\"\n size = run.orig.settings.get('size', None)\n if size is not None:\n variable_label = \" (n=%d)\" % size\n\n ax.plot(ecdf_measure.x, ecdf_measure.y, drawstyle='steps', linewidth=2,\n label=\"U-Sphere%s\" % variable_label)\n ax.plot(ecdf_sp.x, ecdf_sp.y, drawstyle='steps', linewidth=2,\n label=u\"Klasični usmerjevalni protokol%s\" % variable_label)\n\n ax.set_xlabel('Obremenjenost povezave')\n ax.set_ylabel('Kumulativna verjetnost')\n ax.grid()\n ax.axis((28, None, 0.99, 1.0005))\n self.convert_axes_to_bw(ax)\n\n legend = ax.legend(loc='lower right')\n if self.settings.GRAPH_TRANSPARENCY:\n legend.get_frame().set_alpha(0.8)\n fig.savefig(self.get_figure_filename())", "def test_2d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n dates = df_iv[df_iv['dte'] == 30]['date']\n impl_vols = df_iv[df_iv['dte'] == 30]['impl_vol']\n db.close()\n\n print df_iv.sort_values('impl_vol').head()\n\n plt.plot(dates, impl_vols)\n plt.xlabel('date')\n plt.ylabel('impl_vols')\n plt.show()", "def scatterplot():\r\n #get the data for the plots\r\n reddata = np.array([[1,1],[1,3],[4,2]])\r\n bluedata = np.array([[0,1],[0,5],[1,2],[2,3],[3,4]])\r\n yellowdata = np.array([[1,4],[2,2],[3,5],[6,2]])\r\n #convert the data to a pd DataFrame\r\n df = pd.DataFrame(reddata, columns=[\"x\",\"y\"])\r\n df1 = pd.DataFrame(bluedata, columns=[\"x\",\"y\"])\r\n df2 = pd.DataFrame(yellowdata, columns=[\"x\",\"y\"])\r\n #create the plot\r\n ax = df.plot.scatter(x=\"x\",y=\"y\",label=\"Red Group\",color=\"Red\",title=\"Scatter Plot in Three Colors\",xlim=(-1,7),ylim=(0,6))\r\n ax1 = df1.plot.scatter(x=\"x\",y=\"y\",label=\"Blue Group\",color=\"Blue\",ax=ax)\r\n ax2 = df2.plot.scatter(x=\"x\",y=\"y\",label=\"Yellow Group\",color=\"Yellow\",ax=ax)\r\n #get the figure from the axes and save it\r\n fig = ax.get_figure()\r\n fig.savefig(\"my_scatter_plot.png\")", "def _plot(\n self, \n frame_idx: int, \n scores: List[float], \n losses: List[float],\n ):\n clear_output(True)\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))\n plt.plot(scores)\n plt.subplot(132)\n plt.title('loss')\n plt.plot(losses)\n plt.show()", "def plotPaths(self, simulationIndex, numberOfPaths):\n for k in range(numberOfPaths):\n path = self.getPath(simulationIndex + k);\n plt.plot(path)\n plt.xlabel('Time')\n plt.ylabel('Realizations of the process')\n plt.show()", "def show_plots():\n plt.show()", "def Plot(self):\n\n ### Create the path names ###\n folder_string = self.params.folder+\"/plots/\"\n u_string = self.params.folder+\"/plots/u.pdf\"\n p_string = self.params.folder+\"/plots/p.pdf\"\n\n ### Check if folder exists ###\n if not os.path.exists(folder_string): os.makedirs(folder_string)\n\n ### Plot the x component of velocity ###\n plot(self.u_next[0],title=\"Velocity in the x Direction\")\n plt.savefig(u_string)\n plt.figure()\n\n ### Plot the pressure ###\n plot(self.p_next,title=\"Pressure\")\n plt.savefig(p_string)\n plt.show()", "def plot(self):\n attr = self.Graph[\"root\"]\n if (self.type == 0 or self.type == 1):\n self.subplot_1(attr, 0)\n else:\n self.subplot_2(attr, 0)", "def figures(self):\n if np.size(self.iceicehorizons_depth1)>0:\n fig, ax = mpl.subplots()\n if self.site1.archive == 'icecore':\n mpl.xlabel(self.site1.label+' ice age (yr b1950)')\n else:\n mpl.xlabel(self.site1.label+' age (yr b1950)')\n if self.site2.archive == 'icecore':\n mpl.ylabel(self.site2.label+' ice age (yr b1950)')\n else:\n mpl.ylabel(self.site2.label+' age (yr b1950)')\n if np.size(self.iceicehorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_age_init(self.iceicehorizons_depth1),\n self.site2.fct_age_init(self.iceicehorizons_depth2),\n color=pccfg.color_init, linestyle='', marker='o', markersize=2,\n label=\"Initial\")\n mpl.plot(self.site1.fct_age_model(self.iceicehorizons_depth1),\n self.site2.fct_age_model(self.iceicehorizons_depth2),\n color=pccfg.color_mod, linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_age(self.iceicehorizons_depth1),\n self.site2.fct_age(self.iceicehorizons_depth2), color=pccfg.color_opt,\n xerr=np.zeros(np.size(self.iceicehorizons_depth1)),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_age(self.iceicehorizons_depth1)-self.iceicehorizons_sigma/2\n ystart = self.site2.fct_age(self.iceicehorizons_depth2)+self.iceicehorizons_sigma/2\n for i in range(np.size(self.iceicehorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.iceicehorizons_sigma[i],\n -self.iceicehorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0)\n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement', zorder=0)\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n if self.site1.archive == 'icecore' and self.site2.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/ice_ice_synchro.pdf')\n elif self.site1.archive == 'icecore' or self.site2.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/ice_synchro.pdf')\n else:\n printed_page = PdfPages(pccfg.datadir+self.label+'/synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()\n\n if self.site1.archive == 'icecore' and self.site2.archive == 'icecore':\n if np.size(self.airairhorizons_depth1)>0:\n fig, ax = mpl.subplots()\n mpl.xlabel(self.site1.label+' air age (yr b1950)')\n mpl.ylabel(self.site2.label+' air age (yr b1950)')\n if np.size(self.airairhorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_airage_init(self.airairhorizons_depth1),\n self.site2.fct_airage_init(self.airairhorizons_depth2),\n color=pccfg.color_init,\n linestyle='',\n marker='o', markersize=2, label=\"Initial\")\n mpl.plot(self.site1.fct_airage_model(self.airairhorizons_depth1),\n self.site2.fct_airage_model(self.airairhorizons_depth2),\n color=pccfg.color_mod,\n linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_airage(self.airairhorizons_depth1),\n self.site2.fct_airage(self.airairhorizons_depth2),\n color=pccfg.color_opt,\n xerr=np.zeros_like(self.airairhorizons_sigma),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_airage(self.airairhorizons_depth1)-\\\n self.airairhorizons_sigma/2\n ystart = self.site2.fct_airage(self.airairhorizons_depth2)+\\\n self.airairhorizons_sigma/2\n for i in range(np.size(self.airairhorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.airairhorizons_sigma[i],\n -self.airairhorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0)\n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement',\n zorder=0)\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_air_synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()\n\n if self.site2.archive == 'icecore':\n if np.size(self.iceairhorizons_depth1)>0:\n fig, ax = mpl.subplots()\n if self.site1.archive == 'icecore':\n mpl.xlabel(self.site1.label+' ice age (yr b1950)')\n else:\n mpl.xlabel(self.site1.label+' age (yr b1950)')\n mpl.ylabel(self.site2.label+' air age (yr b1950)')\n if np.size(self.iceairhorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_age_init(self.iceairhorizons_depth1),\n self.site2.fct_airage_init(self.iceairhorizons_depth2),\n color=pccfg.color_init,\n linestyle='',\n marker='o', markersize=2, label=\"Initial\")\n mpl.plot(self.site1.fct_age_model(self.iceairhorizons_depth1),\n self.site2.fct_airage_model(self.iceairhorizons_depth2),\n color=pccfg.color_mod,\n linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_age(self.iceairhorizons_depth1),\n self.site2.fct_airage(self.iceairhorizons_depth2),\n color=pccfg.color_opt,\n xerr=np.zeros_like(self.iceairhorizons_sigma),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_age(self.iceairhorizons_depth1)-\\\n self.iceairhorizons_sigma/2\n ystart = self.site2.fct_airage(self.iceairhorizons_depth2)+\\\n self.iceairhorizons_sigma/2\n for i in range(np.size(self.iceairhorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.iceairhorizons_sigma[i],\n -self.iceairhorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0) \n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement',\n zorder=0)\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n if self.site1.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/ice_air_synchro.pdf')\n else:\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()\n\n if self.site1.archive == 'icecore':\n if np.size(self.airicehorizons_depth1)>0:\n fig, ax = mpl.subplots()\n mpl.xlabel(self.site1.label+' air age (yr b1950)')\n if self.site2.archive == 'icecore':\n mpl.ylabel(self.site2.label+' ice age (yr b1950)')\n else:\n mpl.ylabel(self.site2.label+' age (yr b1950)')\n if np.size(self.airicehorizons_depth1) > 0:\n if pccfg.show_initial:\n mpl.plot(self.site1.fct_airage_init(self.airicehorizons_depth1),\n self.site2.fct_age_init(self.airicehorizons_depth2),\n color=pccfg.color_init,\n linestyle='', marker='o', markersize=2, label=\"Initial\")\n mpl.plot(self.site1.fct_airage_model(self.airicehorizons_depth1),\n self.site2.fct_age_model(self.airicehorizons_depth2),\n color=pccfg.color_mod,\n linestyle='', marker='o', markersize=2,\n label=\"Prior\")\n mpl.errorbar(self.site1.fct_airage(self.airicehorizons_depth1),\n self.site2.fct_age(self.airicehorizons_depth2),\n color=pccfg.color_opt,\n xerr=np.zeros_like(self.airicehorizons_sigma),\n linestyle='', marker='o', markersize=2,\n label=\"Posterior\")\n xstart = self.site1.fct_airage(self.airicehorizons_depth1)-\\\n self.airicehorizons_sigma/2\n ystart = self.site2.fct_age(self.airicehorizons_depth2)+\\\n self.airicehorizons_sigma/2\n for i in range(np.size(self.airicehorizons_depth1)):\n mpl.arrow(xstart[i], ystart[i], self.airicehorizons_sigma[i],\n -self.airicehorizons_sigma[i], color=pccfg.color_opt,\n width=0.0, head_length=0.0, head_width=0.0)\n x_low, x_up, y_low, y_up = mpl.axis()\n# x_low = self.site1.age_top\n# y_low = self.site2.age_top\n# mpl.axis((x_low, x_up, y_low, y_up))\n rangefig = np.array([min(x_low, y_low), max(x_up, y_up)])\n mpl.plot(rangefig, rangefig, color=pccfg.color_obs, label='perfect agreement')\n mpl.legend(loc=\"best\")\n ax.set_aspect('equal')\n if self.site2.archive == 'icecore':\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_ice_synchro.pdf')\n else:\n printed_page = PdfPages(pccfg.datadir+self.label+'/air_synchro.pdf')\n printed_page.savefig(fig)\n printed_page.close()\n if not pccfg.show_figures:\n mpl.close()", "def plot(self):\n\t\tself.plotOfIP().plot()", "def fdd_plot(result_file_list,default_fdds,ion_default):\n # Getting data from the file\n fdd_param_keys = [result_file.split('_')[0] for result_file in result_file_list]\n# print \" Parameters to plot for:\", fdd_param_keys\n fig = plt.figure(figsize=(14,12))#(8.27,11.69))\n axs = [None]*(len(result_file_list)*5 + 1) # List of axes: 2 ions x (1 uncert + 1 value) + 1 for iterations (axs[0] will not be used)\n for j,result_file_name in enumerate(result_file_list):\n with open(result_file_name,'r') as res_file:\n res_data = res_file.read().split('\\n')\n if res_data[-1] == \"\": # delete empty line if present\n del res_data[-1]\n else:\n pass\n fdd_list, H_list, Hunc_list, D_list, Dunc_list, n_iter_list,fdd_iter_list = [[] for i in range(7)]\n for line in res_data:\n line_split = line.split()\n if line_split[1] == '0' or line_split[1] == '-1':\n fdd_list.append(float(line_split[0].split('_')[-1])) # fdd_list\n fdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n if line_split[1] == '0':\n n_iter_list.append(int(line_split[2])) # number of iterations list\n else:\n n_iter_list.append(-1) # 125, number of iterations\n H_list.append(float(line_split[3])) # H_list\n Hunc_list.append(float(line_split[4])) # Hunc_list\n D_list.append(float(line_split[5])) # D_list\n Dunc_list.append(float(line_split[6])) # Dunc_list\n elif line_split[1] == '1':\n print \" {} = {} <- excluded: error '1' (no f26 file was found, check 'stdout.dat' and 'stderr.dat')\".format(fdd_param_keys[j], line_split[0])\n fdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n n_iter_list.append(0) # number of iterations list\n elif line_split[1] == '2':\n print \" {} = {} <- excluded: error '2' (error happened during VPFITing, check 'stderr.dat')\".format(fdd_param_keys[j], line_split[0])\n\t\tfdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n\t\tn_iter_list.append(0) # number of iterations list\n elif line_split[1] == '3':\n print \" {} = {} <- excluded: error '3' (zero size f26 file, check 'stdout.dat')\".format(fdd_param_keys[j], line_split[0])\n\t\tfdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n\t\tn_iter_list.append(0) # number of iterations list\n elif line_split[1] == '4':\n print \" {} = {} <- excluded: error '4' (new and original f26's have different number of lines, check 'stdout.dat')\".format(fdd_param_keys[j], line_split[0])\n fdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n\t\tn_iter_list.append(-3) # number of iterations list\n elif line_split[1] == '5':\n print \" {} = {} <- excluded: error '5' ('****' uncertainty was found for one of specified ion! Check f26 file)\".format(fdd_param_keys[j], line_split[0])\n fdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n\t\tn_iter_list.append(-2) # number of iterations list\n else:\n print \" Unknown error code was specified in {}\".format(result_file_name)\n\n # Data are ready to plot\n # First column of subplots (uncertainties)\n axs[5*j+1] = plt.subplot(len(fdd_param_keys),3,3*j+1)\n axs[5*j+1].plot(fdd_list,Dunc_list,color='b',drawstyle=\"steps-mid\") # Plotting data\n axs[5*j+1].plot(default_fdds[fdd_param_keys[j]],ion_default[3],'bo',mew=0.0) # plotting default result\n axs[5*j+1].set_ylabel(r'$\\Delta$N(D I)',color='b')\n axs[5*j+1].tick_params('y',colors='b')\n axs[5*j+1].ticklabel_format(useOffset=False)\n axs[5*j+1].set_xscale('log')\n if fdd_param_keys[j] == 'fdbstep':\n axs[5*j+1].set_xlabel('{} [{:g}], km/s'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n else:\n axs[5*j+1].set_xlabel('{} [{:g}]'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n axs[5*j+1].axvline(default_fdds[fdd_param_keys[j]],linewidth=1,color='g') # indicate fdd from vp_setup.dat\n# axs[5*j+1].margins(y=np.ptp(Dunc_list)/2.0) # Include marging above and below the data to show default fdds\n axs[5*j+2] = axs[5*j+1].twinx() # second plot on the same axis\n axs[5*j+2].plot(fdd_list,Hunc_list,color='r',drawstyle=\"steps-mid\")\n axs[5*j+2].plot(default_fdds[fdd_param_keys[j]],ion_default[1],'ro',mew=0.0)\n axs[5*j+2].set_ylabel(r'$\\Delta$N(H I)',color='r')\n axs[5*j+2].tick_params('y',colors='r')\n axs[5*j+2].ticklabel_format(useOffset=False,axis='y')\n # Second column of subplots (middle values)\n axs[5*j+3] = plt.subplot(len(fdd_param_keys),3,3*j+2)\n plot_interal = False # Add 1-sigma interval\n if plot_interal == True:\n median = np.median(D_list)\n med_unc = np.median(Dunc_list)\n print \"median = {} +/- {} for {}\".format(median,med_unc,fdd_param_keys[j])\n axs[5*j+3].fill_between(fdd_list, median-med_unc, median+med_unc,facecolor='green', alpha=0.4)\n axs[5*j+3].plot(fdd_list,D_list,color='b',drawstyle=\"steps-mid\",zorder=2) # Plotting data\n axs[5*j+3].plot(default_fdds[fdd_param_keys[j]],ion_default[2],'bo',zorder=10,mew=0.0)\n axs[5*j+3].set_ylabel(r'N(D I)',color='b')\n axs[5*j+3].ticklabel_format(useOffset=False)\n axs[5*j+3].tick_params('y',colors='b')\n axs[5*j+3].set_xscale('log')\n if fdd_param_keys[j] == 'fdbstep':\n axs[5*j+3].set_xlabel('{} [{:g}], km/s'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n else:\n axs[5*j+3].set_xlabel('{} [{:g}]'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n axs[5*j+3].axvline(default_fdds[fdd_param_keys[j]],linewidth=1,color='g') # indicate fdd from vp_setup.dat\n# axs[5*j+3].margins(y=np.ptp(D_list),tight=False) # Include marging above and below the data to show default fdds\n axs[5*j+4] = axs[5*j+3].twinx()\n axs[5*j+4].plot(fdd_list,H_list,color='r',drawstyle=\"steps-mid\",zorder=1)\n axs[5*j+4].plot(default_fdds[fdd_param_keys[j]],ion_default[0],'ro',zorder=10,mew=0.0)\n axs[5*j+4].set_ylabel(r'N(H I)',color='r')\n axs[5*j+4].tick_params('y',colors='r')\n axs[5*j+4].ticklabel_format(useOffset=False,axis='y')\n # Third column of subplots (number of iterations)\n axs[5*j+5] = plt.subplot(len(fdd_param_keys),3,3*j+3)\n axs[5*j+5].plot(fdd_iter_list,n_iter_list,drawstyle=\"steps-mid\") # Plotting n_iter_list\n# axs[5*j+5].yaxis.tick_right()\n# axs[5*j+5].yaxis.set_label_position(\"right\")\n axs[5*j+5].set_ylabel('N iterations')\n axs[5*j+5].ticklabel_format(useOffset=False)\n axs[5*j+5].set_xscale('log')\n if fdd_param_keys[j] == 'fdbstep':\n axs[5*j+5].set_xlabel('{} [{:g}], km/s'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n else:\n axs[5*j+5].set_xlabel('{} [{:g}]'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n axs[5*j+5].axvline(default_fdds[fdd_param_keys[j]],linewidth=1,color='g') # indicate fdd from vp_setup.dat\n axs[5*j+5].axhline(0,linewidth=1,color='grey',ls=\":\") # indicate fdd from vp_setup.dat\n\n# plt.tight_layout()\n fig.subplots_adjust(hspace=0.3,wspace=0.9) # Set up spaces between subplots\n plt.savefig(\"fdd_plot.pdf\",bbox_inches='tight', pad_inches=0)\n plt.show()\n plt.close()\n print \" Plot is done!\"", "def draw_plot(self):\n # X axis is auto follow.\n XLEN = 100\n xmax = max(len(self.daq.data0), XLEN)\n xmin = xmax - XLEN\n\n # The Y value will lie between 0.0 and 5.0 volts\n ymax = 5.0\n ymin = 0.0\n\n self.main_plot.set_xbound(lower=xmin, upper=xmax)\n self.main_plot.set_ybound(lower=ymin, upper=ymax)\n\n # Add the grid. Grid looks cool and is actually very helpful.\n self.main_plot.grid(True, color='gray')\n\n pylab.setp(self.main_plot.get_xticklabels(), \n visible=True)\n \n self.plot_data.set_xdata(arange(len(self.daq.data0)))\n self.plot_data.set_ydata(array(self.daq.data0))\n \n self.canvas.draw()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def show_data_files(self):\n for idx in self.plot_data:\n self.plot_data[idx].show()", "def plot(self):\n\t\tself.plotOfXray().plot()", "def plot(self, axes):\n if self.is_leaf:\n axes.plot([p.x for p in self.points], [p.y for p in self.points], 'bo')\n else:\n axes.plot([self.centre.x - self.size / 2, self.centre.x + self.size / 2],\n [self.centre.y, self.centre.y], '-', color='gray')\n axes.plot([self.centre.x, self.centre.x],\n [self.centre.y - self.size / 2, self.centre.y + self.size / 2],\n '-', color='gray')\n for child in self.children:\n child.plot(axes)\n axes.set_aspect(1)", "def plotGraph(self, y1, y2, title=\"Y1 and Y2\", xLabel=\"X\", yLabel=\"Y\", yOneLegend=\"Y1\", yTwoLegend=\"Y2\", name=None):\n # Clear the canvas\n plt.clf()\n\n # Plot data\n plt.plot(y1, color=\"black\", label=yOneLegend)\n plt.plot(y2, color=\"magenta\", label=yTwoLegend)\n\n plt.xlabel(xLabel)\n plt.ylabel(yLabel)\n plt.title(title)\n plt.legend(loc=\"upper left\")\n\n if name:\n plt.savefig(\"plots/\"+name, bbox_inches=\"tight\")\n return\n plt.savefig(\"plots/xyplot.png\", bbox_inches=\"tight\")", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def init_plot(self):\n self.dpi = 100\n self.fig = Figure((5.0, 5.0), dpi = self.dpi)\n\n self.main_plot = self.fig.add_subplot(111)\n self.main_plot.set_axis_bgcolor('black')\n self.main_plot.set_title('Dynamic venous flow view', size = 12)\n\n pylab.setp(self.main_plot.get_xticklabels(), fontsize = 8)\n pylab.setp(self.main_plot.get_yticklabels(), fontsize = 8)\n\n # Plot the data as a green line\n self.plot_data = self.main_plot.plot(\n self.daq.data0,\n linewidth = 1,\n color = (0, 1, 0),\n )[0]\n self.main_plot.grid(True, color='gray')", "def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()", "def TwoOrOneValuePlot(no_of_sets, Xax, Ydat1, Ydat2, Label1, Label2,\n xmin, xmax, ymin_1, ymax_1, ymin_2, ymax_2,\n XLab, YLab_1, YLab_2, SupTitle, Title, FileName,\n currentDate, currentTime, Software_version):\n\n rc('font', size=6, weight='bold')\n if no_of_sets == 1:\n fig = plt.figure(figsize=(9, 5))\n ax1 = fig.add_subplot(111)\n elif no_of_sets == 2:\n fig = plt.figure(figsize=(9, 9))\n ax1 = fig.add_subplot(211)\n else:\n print(' ERROR !!!')\n if no_of_sets == 2:\n ax1.plot(Xax, Ydat2, color=u'#ff7f0e', linestyle='-', alpha=0.4, linewidth='1.00')\n ax1.plot(Xax, Ydat1, color=u'#1f77b4', linestyle='-', alpha=1.0, linewidth='1.00', label=Label1)\n ax1.legend(loc='upper right', fontsize=6)\n ax1.grid(visible=True, which='both', color='silver', linestyle='-')\n ax1.axis([xmin, xmax, ymin_1, ymax_1])\n ax1.set_ylabel(YLab_1, fontsize=6, fontweight='bold')\n ax1.set_title(Title, fontsize=6)\n if no_of_sets == 2:\n ax1.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n ax2 = fig.add_subplot(212)\n if no_of_sets == 2:\n ax2.plot(Xax, Ydat1, color=u'#1f77b4', linestyle='-', alpha=0.4, linewidth='1.00')\n ax2.plot(Xax, Ydat2, color=u'#ff7f0e', linestyle='-', alpha=1.0, linewidth='1.00', label=Label2)\n ax2.legend(loc='upper right', fontsize=6)\n ax2.grid(visible=True, which='both', color='silver', linestyle='-')\n ax2.axis([xmin, xmax, ymin_2, ymax_2])\n ax2.set_xlabel(XLab, fontsize=6, fontweight='bold')\n ax2.set_ylabel(YLab_2, fontsize=6, fontweight='bold')\n fig.subplots_adjust(hspace=0.05, top=0.94)\n elif no_of_sets == 1:\n ax1.set_xlabel(XLab, fontsize=6, fontweight='bold')\n fig.subplots_adjust(top=0.92)\n else:\n print(' ERROR !!!')\n fig.suptitle(SupTitle, fontsize = 8, fontweight='bold')\n if no_of_sets == 2:\n fig.text(0.73, 0.06, 'Processed ' + currentDate + ' at ' + currentTime,\n fontsize=4, transform=plt.gcf().transFigure)\n fig.text(0.09, 0.06, 'Software version: ' + Software_version + ', yerin.serge@gmail.com, IRA NASU',\n fontsize=4, transform=plt.gcf().transFigure)\n elif no_of_sets == 1:\n fig.text(0.73, 0.03, 'Processed ' + currentDate + ' at '+currentTime,\n fontsize=4, transform=plt.gcf().transFigure)\n fig.text(0.09, 0.03, 'Software version: ' + Software_version + ', yerin.serge@gmail.com, IRA NASU',\n fontsize=4, transform=plt.gcf().transFigure)\n else:\n print(' ERROR !!!')\n pylab.savefig(FileName, bbox_inches='tight', dpi=160)\n plt.close('all')\n return", "def plots(x_bef,y_bef,z_bef):\r\n # Makes a 3-D plot of the x, y and z axes representing the ball's total trajectory\r\n plt.figure(3)\r\n plot3 = plt.axes(projection=\"3d\")\r\n plot3.plot3D(x_bef,y_bef,z_bef,'blue')\r\n plot3.set_xlabel('x (ft)')\r\n plot3.set_ylabel('y (ft)')\r\n plot3.set_zlabel('z (ft)')\r\n plot3.set_title('Total Trajectory')\r\n \r\n # Makes a 2-D plot of the x, and z axes representing the ball's total 2-D trajectory\r\n plt.figure(4)\r\n plt.plot(x_bef,z_bef)\r\n plt.xlabel('x (ft)')\r\n plt.ylabel('z (ft)')\r\n plt.title('z (ft) vs x (ft)')\r\n plt.show()", "def plot_observed(self):\n \n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1)\n for k in self.observed_data.keys():\n plt.plot(self.observed_data[k][0], self.observed_data[k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n\n fig = plt.figure(figsize=(16,4))\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.observed_data.keys(): \n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.observed_data.keys():\n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def plot_all(show=True):\n fig, axes = plt.subplots(max_iterations, 1, figsize=(6, 12))\n for t in range(max_iterations):\n with open('results/%s/df_%d.pkl' % (id, t), 'rb') as f:\n df = pickle.load(f)\n with open('results/%s/w_%d.pkl' % (id, t), 'rb') as f:\n w = pickle.load(f)\n axes[t].hist2d(x=df['vision'], y=df['metab'], weights=w, density=True,\n bins=((xticks, yticks)), cmap='magma')\n axes[t].set_ylabel('max metabolism')\n axes[t].set_xticks(vision_domain)\n axes[t].set_yticks((2, 3, 4))\n axes[3].set_xlabel('max vision')\n fig.tight_layout()\n if show:\n plt.show()\n else:\n plt.savefig('results/%s/abc_results.pdf' % id)", "def plotting(dataframe, prod_num):\n fig, axs = plt.subplots(2, sharex=True)\n axs[0].plot(dataframe['STU'])\n axs[1].plot(dataframe['STU'].diff().dropna())\n axs[0].set_title(\"Time Series of Product\" + f\"_{prod_num}\")\n axs[1].set_title(\"Differenced Time Series of Product\" + f\"_{prod_num}\")\n plt.savefig(\"Time Series of Product\" + f\"_{prod_num}\" + \".pdf\")", "def plot(self):\n fx = self.fitness_functions(self.archive)\n n = len(fx[0])\n\n if n == 2:\n plt.xlabel(\"F1\")\n plt.ylabel(\"F2\")\n plt.suptitle(\"Pareto Front\")\n plt.scatter(fx[:,0], fx[:,1], label='Archive')\n plt.show()\n elif n == 3:\n plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter(fx[:, 0], fx[:, 1], fx[:, 2])\n ax.set_xlabel(\"F1\")\n ax.set_ylabel(\"F2\")\n ax.set_zlabel(\"F3\")\n plt.suptitle(\"Pareto Front of Archive\")\n plt.show()\n else:\n print(\"Cannot Print Multi-Dimensional Front greater than 3D\")", "def _setup_show(self):\n super(Scatter, self)._setup_show()\n\n # check if pandas is installed\n if pd:\n # if it is we try to take advantage of it's data structures\n # asumming we get an groupby object\n if isinstance(self.values, pd.core.groupby.DataFrameGroupBy):\n pdict = OrderedDict()\n\n for i in self.values.groups.keys():\n self.labels = self.values.get_group(i).columns\n xname = self.values.get_group(i).columns[0]\n yname = self.values.get_group(i).columns[1]\n x = getattr(self.values.get_group(i), xname)\n y = getattr(self.values.get_group(i), yname)\n pdict[i] = np.array([x.values, y.values]).T\n\n self.values = DataAdapter(pdict)\n self.labels = self.values.keys()\n\n # create axis labels from group by object only if the input\n # values is a DataFrameGroupBy\n if self._xlabel is None:\n self._xlabel = self.labels[0]\n\n if self._ylabel is None:\n self._ylabel = self.labels[1]\n\n else:\n self.values = DataAdapter(self.values)\n self.labels = self.values.keys()\n\n else:\n self.values = DataAdapter(self.values)\n self.labels = self.values.keys()", "def plot_acc(acc_watch, x_acc_df):\n\tfig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\tplt.xlabel('Time (ms)')\n\tplt.ylabel('acc. value')\n\tax1.set_title('Acceleration Data from ECG')\n\tax2.set_title('Acceleration Data from Watch')\n\n\t# ecg data\n\tax1.plot(x_acc_df['timestamp'], x_acc_df['x_acc'] )\n\t# ppg data\n\tax2.plot(acc_watch['timestamp'], acc_watch['v0'])\n\n\tplt.show()", "def xyplot(x_vals,y_vals,name):\n # set the figure's size\n set_figsize(figsize=(5, 2.5))\n # detach() is used to get a variable from the current calculation graph\n # in which this variable is the not gradient tracking version\n plt.plot(x_vals.detach().numpy(), y_vals.detach().numpy())\n # set the constant x axis label\n plt.xlabel('x')\n # combine and set the y axis label\n plt.ylabel(name+'(x)')\n plt.show()", "def vis_log(dfs, xs, ys=None, table_ys=None, args_list=None,\n ignore_keys=[], table_width=600):\n # This function can be divided into five parts.\n # 1. Process necessary information from given dataframes.\n # 2. Initialize the components (Static part of the visualization)\n # This includes setting up the figure size,\n # creating data tables and buttons.\n # 3. Confiure dynamic part.\n # This function contains an element of user-interaction.\n # User can click buttons and slides to configure what and how to\n # visualize.\n # 4. Add tools\n # 5. Organize how different elements can be put together in a screen.\n\n if ys is None:\n ys = table_ys.keys()\n ignore_keys += ['index']\n\n # 1. prepare and preprocess dataframes\n dict_args = list_of_dict_to_dict_of_list(args_list)\n valid_keys = get_valid_keys(args_list)\n dict_args = filter_dict(dict_args, valid_keys)\n identifiers = get_identifiers(args_list, valid_keys)\n xs_dict, ys_dict, tables = filter_dataframes(\n dfs, xs, ys, table_ys, args_list, valid_keys)\n\n # 2. Construct elements\n p = bokeh.plotting.figure(plot_width=1800 - table_width, plot_height=825)\n # build empty multi line graph\n multi_l_source = bokeh.plotting.ColumnDataSource(\n {'xs': [], 'ys': [], 'descs': [], 'legend': []})\n multi_l = p.multi_line(\n xs='xs', ys='ys', source=multi_l_source, legend='legend')\n # build datatable\n columns = [bokeh.models.widgets.TableColumn(field=key, title=key) for\n key in tables.keys() if key not in ignore_keys]\n data_table_source = bokeh.models.ColumnDataSource(tables)\n data_table = bokeh.models.widgets.DataTable(\n source=data_table_source,\n columns=columns,\n width=table_width, height=825)\n # Sliders, buttons, menus, legends\n window_slider = bokeh.models.Slider(\n start=1, end=101, value=1, step=10,\n title='window size')\n xs_button = bokeh.models.widgets.RadioButtonGroup(\n labels=xs, active=0, width=600)\n ys_button = bokeh.models.widgets.RadioButtonGroup(\n labels=ys, active=0, width=600)\n menu = ['off', 'top_right', 'top_left', 'bottom_right', 'bottom_left']\n legend_button = bokeh.models.widgets.RadioButtonGroup(\n labels=menu, active=0, width=600)\n\n # 3. Start configuring user-interaction\n def update(attr, old, new):\n raw_indices = data_table_source.selected['1d']['indices']\n # after sorting, the order of index changes\n reordered_keys = data_table_source.data['index']\n selected_indices = []\n for idx in raw_indices:\n selected_indices.append(reordered_keys[idx])\n # get list of selected line data\n selected_xs = []\n selected_ys = []\n selected_descs = []\n selected_identifiers = []\n for idx in selected_indices:\n x = xs[xs_button.active]\n y = ys[ys_button.active]\n selected_xs.append(xs_dict[x][idx])\n selected_ys.append(\n moving_average_1d(ys_dict[y][idx], window_slider.value))\n selected_identifiers.append(identifiers[idx])\n selected_descs.append([identifiers[idx]] * len(xs_dict[x][idx]))\n # get colors\n selected_colors = []\n if len(selected_indices) < 10:\n colors = bokeh.palettes.Category10_10\n color_indices = colors_10_indices\n else:\n colors = bokeh.palettes.Inferno256\n color_indices = colors_255_indices\n for i in range(len(selected_indices)):\n selected_colors.append(colors[color_indices[i]])\n # set data dict\n multi_l.data_source.data = dict(\n xs=selected_xs, ys=selected_ys,\n descs=selected_descs,\n line_color=selected_colors,\n legend=selected_identifiers)\n # set color\n # https://groups.google.com/a/continuum.io/forum/#!topic/bokeh/MMxjMK84n5M\n multi_l.glyph.line_color = 'line_color'\n if menu[legend_button.active] == 'off':\n # TODO: This can be improved\n multi_l.data_source.data.pop('legend')\n else:\n p.legend.location = menu[legend_button.active]\n data_table_source.on_change('selected', update)\n window_slider.on_change('value', update)\n ys_button.on_change('active', update)\n xs_button.on_change('active', update)\n legend_button.on_change('active', update)\n\n # 4. add tools\n p.add_tools(bokeh.models.BoxZoomTool())\n p.add_tools(bokeh.models.ResizeTool())\n p.add_tools(bokeh.models.SaveTool())\n p.add_tools(bokeh.models.WheelZoomTool())\n p.add_tools(bokeh.models.RedoTool())\n p.add_tools(bokeh.models.ResetTool())\n p.add_tools(bokeh.models.UndoTool())\n p.add_tools(bokeh.models.ZoomOutTool())\n p.add_tools(bokeh.models.ZoomInTool())\n p.add_tools(\n bokeh.models.HoverTool(\n tooltips=[(\"y\", \"$y\"), (\"label\", \"@legend\")])\n )\n\n # 5. build layout\n sliders = bokeh.layouts.widgetbox(window_slider)\n xs_ys_widgets = bokeh.layouts.widgetbox(\n xs_button, ys_button)\n legend_widget = bokeh.layouts.widgetbox(legend_button)\n layout = bokeh.layouts.gridplot(\n [[data_table, p],\n [sliders, xs_ys_widgets, legend_widget]], sizing_mode='fixed')\n bokeh.io.curdoc().add_root(layout)", "def plot_data(self, data, backup_frame):\n title = self.filename.split('-')\n final_titles = title[2].split('.')\n self.final_title_sub = final_titles[0].lower()\n\n # Accounts for the three types of graph required\n # date for archival purposes\n # web for the web server and\n # log for the logarithmic graphs\n graph_list = ['date', 'web', 'log']\n for mode in graph_list:\n for column in data.columns:\n data['Rest of the World'] = \\\n backup_frame['Global_Cases'] - data[column]\n x_axis = data.index.values\n\n fig, axes = plt.subplots()\n axes.plot(x_axis, data[column], marker='o',\n label=column)\n axes.plot(x_axis, data['Rest of the World'], marker='s',\n label='Rest of the World')\n fig.autofmt_xdate()\n\n every_nth = 4\n for number, label in enumerate(axes.xaxis.get_ticklabels()):\n if number % every_nth != 0:\n label.set_visible(False)\n\n axes.set(xlabel='Date', ylabel='Cases',\n title=f'Covid-19 {self.final_title_sub} '\n f'cases for {column} - data from '\n f'John Hopkins CSSE')\n axes.grid()\n axes.legend()\n\n # Setting the y-axis\n if mode == 'log':\n axes.set_yscale('log')\n else:\n data_max = data.max(axis=1)\n max_number = data_max[-1]\n rounded_max = self.round_up(max_number, -3)\n rounded_max += 2000\n axes.set_ylim([0, rounded_max])\n\n # -----------------------------------------------------\n # Adds Labels to annotate the last data point for each\n # plot\n y_axis1 = data[column][-1]\n y_axis2 = data['Rest of the World'][-1]\n\n plt.annotate(y_axis1, (x_axis[-1], y_axis1 + 500),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=12)\n plt.annotate(y_axis2, (x_axis[-1], y_axis2 + 500),\n bbox=dict(facecolor='red', alpha=0.5),\n fontsize=12)\n # -----------------------------------------------------\n\n # Required in order to stop the column from summing\n # the total of each run through the loop\n # otherwise this leads to Rest of World values in the\n # millions\n data = data.drop('Rest of the World', axis=1)\n\n if mode == 'log':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'log_' \\\n f'{self.final_title_sub}_for_' \\\n f'{column}.png'\n elif mode == 'date':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'{x_axis[-1]}-2020-' \\\n f'{self.final_title_sub}_for_{column}.png'\n\n elif mode == 'web':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'{self.final_title_sub}_for_{column}.png'\n\n else:\n print('error')\n\n fig.savefig(dir_name, transparent=False, dpi=300,\n bbox_inches=\"tight\")\n\n if os.path.exists(dir_name):\n logging.debug('File saved at: %s', {dir_name})\n print(f'Files saved at:\\n'\n f'{dir_name}\\n')\n else:\n logging.debug('Failed to save')\n logging.debug(os.getcwd())\n plt.close()\n return data", "def generate_plots(self):\n freq_to_channel = {v: k for k, v in self.frequency_dict.iteritems()}\n data_axes = None\n for index, frequency in enumerate(sorted(freq_to_channel)):\n channel = freq_to_channel[frequency]\n td_f = self.frequency_dict[channel]\n title = 'Volume Backscatter (Sv) :Channel #%d: Frequency: %.1f kHz' % (channel, td_f)\n data_axes = self._generate_plot(self.ax[index], self.power_data_dict[channel], title,\n self.min_db, self.max_db)\n\n if data_axes:\n self._display_x_labels(self.ax[2], self.data_times)\n self.fig.tight_layout(rect=[0, 0.0, 0.97, 1.0])\n self._display_colorbar(self.fig, data_axes)", "def plot_dataset(self):\n plt.plot(self.ground_truth, marker='o')\n plt.ylabel('Number of Topics')\n plt.xlabel('Window Number')\n plt.yticks(list(set(self.ground_truth)))\n plt.savefig(os.path.join(self.output_path, 'shift-plot.pdf'))", "def plot_directed(glomnums):\n odor_corrs_means = []\n odor_corrs_SDs = []\n air_corrs_means = []\n air_corrs_SDs = []\n corrs_deltafrate = []\n fig = figure()\n for gni,glomnum in enumerate(glomnums):\n print \"Computing phasic and deltafrate correlations for # of gloms =\",glomnum\n ## Set graph=True below to plot neg corr-ed responses too.\n corr_deltafrate, odor_corrs, air_corrs, overall_odor_mean, overall_air_mean = \\\n plot_decorrs_special([glomnum],graph=True)\n ax = fig.add_subplot(len(glomnums),1,gni+1)\n #hist(air_corrs,20,range=(-1.0,1.0),normed=True,histtype='step',\\\n # color='b',linewidth=2,label='air %2.1f'%overall_air_mean+'Hz')\n hist(odor_corrs,20,range=(-1.0,1.0),normed=True,histtype='step',\\\n color='r',linewidth=2,label='odor %2.1f'%overall_odor_mean+'Hz')\n ax.set_xticks([])\n #ax.set_xticklabels(['0.75','1.25'])\n ## just to scale up the ticks fontsize.\n axes_labels(ax,'','',adjustpos=False,fontsize=34)\n\n corrs_deltafrate.append(corr_deltafrate)\n ## mean and SD of phasic correlations of odor and air\n odor_corrs_means.append(mean(odor_corrs))\n odor_corrs_SDs.append(std(odor_corrs))\n air_corrs_means.append(mean(air_corrs))\n air_corrs_SDs.append(std(air_corrs))\n\n ax.set_yticks([])\n #biglegend(legendlocation='upper left')\n if gni == len(glomnums)-1:\n ax.set_xticks([-1.0,0.0,1.0])\n ax.set_xticklabels(['-1','0','1'])\n axes_labels(ax,'phase correlation','',adjustpos=False,fontsize=30)\n plt.tight_layout()\n\n ## mean phase corr vs number of connected gloms\n fig=figure()\n ax=fig.add_subplot(111)\n #plot(glomnums,air_corrs_means,color='b',linewidth=2,label='air')\n plot(glomnums,odor_corrs_means,color='r',linewidth=2,label='odor')\n ax.set_xticks(glomnums)\n ax.set_xticklabels([str(glomnum) for glomnum in glomnums])\n axes_labels(ax,'# of connected glomeruli','phase correlation mean',\\\n adjustpos=False,fontsize=30)\n #biglegend(legendlocation='lower left')\n plt.tight_layout()\n ## spread of phase corr vs number of connected gloms\n fig=figure()\n ax=fig.add_subplot(111)\n #errorbar(glomnums,air_corrs_SDs,color='b',linewidth=2,label='air')\n errorbar(glomnums,odor_corrs_SDs,color='r',linewidth=2,label='odor')\n ax.set_xticks(glomnums)\n ax.set_xticklabels([str(glomnum) for glomnum in glomnums])\n axes_labels(ax,'# of connected glomeruli','phase correlation spread',\\\n adjustpos=False,fontsize=30)\n #biglegend(legendlocation='upper left')\n plt.tight_layout()\n ## delta frate corr vs number of connected gloms\n fig=figure()\n ax=fig.add_subplot(111)\n plot(glomnums,corrs_deltafrate,color='b',linewidth=2)\n ax.set_xticks(glomnums)\n ax.set_xticklabels([str(glomnum) for glomnum in glomnums])\n axes_labels(ax,'# of connected glomeruli','$\\Delta$frate correlation',\\\n adjustpos=False,fontsize=30)\n tight_layout()", "def plot_data(self):", "def plot(df, ax, myself, names):\n\n df = df.sort_values(by=\"time\", ascending=True)\n offset = df.iloc[0][\"time\"]\n\n nodes = {}\n for name in names:\n nodes[name] = {\n \"master\": [],\n \"observer\": []\n }\n\n for (_id, row) in df[df[\"type\"] != \"R\"].iterrows():\n if row[\"type\"] == \"M\":\n time = row[\"time\"]\n target = row[\"args\"]\n for (name, blocks) in nodes.items():\n if name == target:\n close_block(blocks[\"observer\"], time)\n open_block(blocks[\"master\"], time)\n else:\n open_block(blocks[\"observer\"], time)\n elif row[\"type\"] == \"T\":\n time = row[\"time\"]\n target = row[\"args\"]\n blocks = nodes[target]\n close_block(blocks[\"master\"], time)\n open_block(blocks[\"observer\"], time)\n elif row[\"type\"] == \"F\":\n time = row[\"time\"]\n for blocks in nodes.values():\n close_block(blocks[\"master\"], time)\n close_block(blocks[\"observer\"], time)\n\n for (index, blocks) in enumerate(nodes.values()):\n plot_blocks(ax, index, blocks[\"master\"], offset, \"tab:blue\")\n plot_blocks(ax, index, blocks[\"observer\"], offset, \"tab:orange\")\n\n x_ticks = range(0, 10)\n y_ticks = [10, 20, 30, 40, 50]\n\n ax.title.set_text(\"View of node: {0}\".format(myself))\n ax.set_xlabel(\"seconds since start\")\n ax.set_xticks(x_ticks)\n ax.set_yticks(y_ticks)\n ax.set_yticklabels(names)\n ax.grid(True)\n\n # Add annotations:\n\n index = list(nodes.keys()).index(myself)\n for (_id, row) in df[df[\"type\"] == \"R\"].iterrows():\n x = (row[\"time\"] - offset).total_seconds()\n y = y_ticks[index]\n ax.annotate(\n \"Round {0}\".format(row[\"args\"]),\n xycoords=\"data\",\n xy=(x, y),\n xytext=(x, y + 5),\n arrowprops=dict(\n facecolor=\"black\",\n shrink=0.05\n )\n )", "def plot_subplots(x_list, y_list, z_list):\n # create a line chart with the average rating of the top movies per year\n # min rating = 0 and max = 10\n plot1 = plt.subplot(211)\n plt.plot(x_list, y_list, color = 'lightseagreen')\n plt.axis([START_YEAR, END_YEAR - 1, 0, 10])\n plt.title('Average IMDB Movie Rating per Year', fontsize=12)\n plt.ylabel('Average Rating')\n plt.grid(True)\n\n # make x ticklabels of plot1 invisible\n plt.setp(plot1.get_xticklabels(), visible=False)\n\n # adjust space between subplots\n plt.subplots_adjust(hspace=0.3)\n\n # create a line chart with the average runtime with shared x-axis\n plot2 = plt.subplot(212, sharex=plot1)\n plt.plot(x_list, z_list, color = 'lightseagreen')\n plt.title('Average IMDB Movie Runtime per Year', fontsize=12)\n plt.ylabel('Average Runtime (min)')\n plt.grid(True)\n\n # define axes, with all years (2008 till 2017) on the x-axis\n # min runtime = 0, max runtime = 180\n plt.axis([START_YEAR, END_YEAR - 1, 0, 180])\n plt.xticks(x_list)\n plt.xlabel('Year')\n\n # plot both the subplots\n plt.show()", "def liveplot(x, y, xlim, ylim, title):\n plt.plot(x,y,'b.')\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.xlabel('North-South Axis')\n plt.ylabel('East-West Axis')\n plt.title(title)\n plt.show()", "def plot_type_of_two_topic(data_frame1: pb.DataFrame, data_frame2: pb.DataFrame) -> None:\n plt.interactive(False)\n plt.figure()\n data_frame1.plot(kind='bar', x= data_frame['TopicID'])\n data_frame2.plot(kind='bar', x= data_frame['TopicID'])\n plt.show()", "def Diagnostic_plot3(self):\n\n floc = glob.glob('/home/mxs191/Desktop/MathewSchofield/TRG/DetTest/DetTest1_results/Info2Save/*.csv')\n fig = plt.figure()\n plt.rc('font', size=18)\n #fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'$P_{\\rm det}$')\n gs = gridspec.GridSpec(1, 2, width_ratios=(4,1))\n ax = fig.add_subplot(gs[0])\n\n for idx, i in enumerate(floc):\n\n d = pd.read_csv(i)\n\n if idx == 0:\n fullpdet = d[['f0', 'Pdet_Kepler', 'Pdet_TESS365', 'Pdet_TESS27']]\n else:\n fullpdet = pd.concat([ fullpdet,\\\n d[['f0', 'Pdet_Kepler', 'Pdet_TESS365', 'Pdet_TESS27']] ])\n\n plt.scatter(d['f0'], d['Pdet_Kepler'], color='b',\\\n label=r\"$\\rm Kepler - 4\\ yrs$\" if idx == 0 else '')\n plt.scatter(d['f0'], d['Pdet_TESS365'], color='orange',\\\n label=r'$\\rm TESS - 1\\ yr$' if idx == 0 else '')\n plt.scatter(d['f0'], d['Pdet_TESS27'], color='g',\\\n label=r'$\\rm TESS - 27\\ days$' if idx == 0 else '')\n\n plt.axhline(fullpdet['Pdet_Kepler'].median(), color='b')\n plt.axhline(fullpdet['Pdet_TESS365'].median(), color='orange')\n plt.axhline(fullpdet['Pdet_TESS27'].median(), color='g')\n ax.legend(loc='lower right')\n plt.ylim([0,1])\n ax.set_ylabel(r'$P_{\\rm det}$')\n ax.set_xlabel(r'$\\nu / \\mu \\rm Hz$')\n\n bx = fig.add_subplot(gs[1])\n import seaborn as sns\n bw = 0.4\n sns.kdeplot(fullpdet['Pdet_Kepler'].values, shade=True, vertical=True, \\\n ax=bx, color='b', bw=bw)\n sns.kdeplot(fullpdet['Pdet_TESS365'].values, shade=True, vertical=True, \\\n ax=bx, color='orange', bw=bw)\n sns.kdeplot(fullpdet['Pdet_TESS27'].values, shade=True, vertical=True, \\\n ax=bx, color='g', bw=bw)\n bx.set_ylim([0.0,1.0])\n bx.set_xticks([])\n bx.set_yticks([])\n bx.set_xlabel(r'$\\rm Density$')\n plt.tight_layout()\n\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot3.pdf')\n sys.exit()", "def plot_paths(gdf_dict: Dict) -> None:\n fig, ax = plt.subplots(1, 1)\n\n # TODO change so that get all models plotted\n for model, gdf in gdf_dict.items():\n gdf.plot(ax=ax, legend=True)\n\n plt.show()\n return None", "def plot(self, *args, **kwargs):\n pass", "def draw_table(ax, dfs, legend, x, y):\n col_labels = dfs_all_values(dfs, x)\n column_legend = []\n cell_text = []\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # to allow query y(x) easily\n df = df.set_index(x)\n df_row = df[y]\n # build a row with filled blanks '-'\n row = [\"{:.2f}\".format(df_row[column]) if column in df_row.index else '-' \\\n for column in col_labels]\n cell_text.append(row)\n\n ax.axis('tight')\n ax.axis('off')\n ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \\\n loc='top')", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def create_val_plots(x_vals, vals_zeros,vals_ones):\n plt.plot(x_vals, vals_zeros,label=\"non-fraud\")\n plt.plot(x_vals, vals_ones,label=\"fraud\")\n plt.title('Accuracy per number of iterations')\n plt.xlabel('Number of Iterations')\n plt.ylabel('Accuracy')\n plt.xticks(np.arange(100, 210, 10))\n plt.legend() \n plt.show()\n # plt.savefig('./analysis_deliverable/visualizations/accuracy_plot.png')", "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "def set_figure_variables(self):\n #self.fig.canvas.manager.full_screen_toggle()\n self.gs = self.fig.add_gridspec(2, 3)\n self.ax1 = self.fig.add_subplot(self.gs[0, 0])\n self.ax2 = self.fig.add_subplot(self.gs[0, 1])\n self.ax3 = self.fig.add_subplot(self.gs[0, 2])\n self.ax4 = self.fig.add_subplot(self.gs[1, 0])\n self.ax5 = self.fig.add_subplot(self.gs[1, 1])\n self.ax6 = self.fig.add_subplot(self.gs[1, 2])\n # histogram with indicator scoring\n self.ax1.set_xlabel(\"indicators\")\n self.ax1.set_ylabel(\"score (%)\")\n # graph with flood safety levels\n self.ax2.set_xlabel(\"dike section\")\n self.ax2.set_ylabel(\"chance of flooding occurrence\")\n # graph with water levels vs dike height\n self.ax3.set_xlabel(\"river length (meters)\")\n self.ax3.set_ylabel(\"height (meters)\")\n # graph with overall costs made\n self.ax6.set_ylabel(\"million Euros\")\n \n self.ax1.set_ylim([0, 100])\n self.ax2.set_ylim([0, 100])\n self.ax3.set_ylim([14, 18])\n self.ax6.set_ylim([0, 25000000])\n \n self.ax1.set_title(\"Overall score on indicators\")\n self.ax2.set_title(\"Flood safety levels\")\n self.ax3.set_title(\"Normative water levels vs dike crest height\")\n self.ax6.set_title(\"Budget spent\")\n \n self.x_pos = np.arange(len(self.indicators))\n self.ax1.set_xticks(self.x_pos)\n self.ax1.set_xticklabels(self.indicators)\n \n flood_safety_levels = [100, 200, 400, 600, 800, 1000, 1250]\n self.ax2.set_yticks(flood_safety_levels)\n self.ax2.set_yticklabels([\"1/\"+str(value) for value in flood_safety_levels])\n \n self.plot1 = None\n self.plot2 = None\n self.plot3 = None\n self.plot4 = None\n self.plot5 = None\n self.plot6 = None\n return", "def plot_q10(x_val, y_val1, y_val2):\n num_counties = [111, 290, 896]\n for idx in range(3):\n pyplot.plot(x_val, y_val1[idx], color='red', linestyle='-',\n marker=None, label='Hierarchical Clustering')\n pyplot.plot(x_val, y_val2[idx], color='blue', linestyle='-',\n marker=None, label='k-Means Clustering (5 iterations)')\n pyplot.xlabel('Number of Clusters')\n pyplot.ylabel('Distortion Value (note scale at top of axis)')\n pyplot.title('Comparative Distortion Results for {ctynum}-County Data\\nDesktop Python'.format(ctynum=num_counties[idx]))\n pyplot.legend(loc='upper right')\n pyplot.grid(True)\n pyplot.show()", "def ecdf_plot(ecdf_q1, ecdf_q2, ecdf_q3, ecdf_q4, performance_measure, ecdf_parameter):\n from plotly.offline import iplot\n import plotly.graph_objs as go\n\n performance_measure = performance_measure.replace('_', ' ').capitalize()\n ecdf_parameter = ecdf_parameter.replace('_', ' ').capitalize()\n\n ecdf_1 = go.Scatter(x=ecdf_q1.x,\n y=ecdf_q1.y,\n name='0 to 25',\n mode='lines+markers',\n marker=dict(size='7', color='#0C3383'))\n ecdf_2 = go.Scatter(x=ecdf_q2.x,\n y=ecdf_q2.y,\n name='25 to 50',\n mode='lines+markers',\n marker=dict(size='7', color='#57A18F'))\n ecdf_3 = go.Scatter(x=ecdf_q3.x,\n y=ecdf_q3.y,\n name='50 to 75',\n mode='lines+markers',\n marker=dict(size='7', color='#F2A638'))\n ecdf_4 = go.Scatter(x=ecdf_q4.x,\n y=ecdf_q4.y,\n name='75 to 100 (best wells)',\n mode='lines+markers',\n marker=dict(size='7', color='#D91E1E'))\n\n data = [ecdf_1, ecdf_2, ecdf_3, ecdf_4]\n\n layout = go.Layout(height=650,\n width=650,\n title='ECDF ' + ecdf_parameter,\n titlefont=dict(size=18),\n\n xaxis=dict(title=ecdf_parameter,\n titlefont=dict(size=16),\n type=None,\n zeroline=False,\n showgrid=True,\n showline=False,\n autorange=True),\n\n yaxis=dict(title='Cumulative Probability',\n titlefont=dict(size=16),\n showgrid=True,\n showline=False,\n zeroline=False,\n tickvals=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],\n range=[-0.03, 1.03]),\n\n legend=dict(x=0.65, y=0.1, font=dict(size=14)),\n margin={'l': 50, 'r': 10, 'b': 50, 't': 85})\n\n layout.update(dict(annotations=[go.Annotation(text='Quantiles: ' + performance_measure,\n x=np.max(ecdf_q4.x),\n y=0.3,\n showarrow=False,\n bgcolor='#FFFFFF',\n font=dict(size=16))]))\n\n plot = go.Figure(data=data, layout=layout)\n\n iplot(plot, show_link=False)", "def plot_figs(harbor_data):\n # Creates two subplots to show the temperature/time and altitude/time separately\n # Temperature over time data\n plt.subplot(2, 1, 1)\n plt.plot(harbor_data[\"wx_times\"], harbor_data[\"wx_temperatures\"])\n plt.xlim([0,2.35])\n plt.title(\"Harbor Flight Data\")\n plt.ylabel(\"Temperature, F\")\n # Altitude over time data\n plt.subplot(2, 1, 2)\n plt.plot(harbor_data[\"gps_times\"], harbor_data[\"gps_altitude\"])\n plt.xlabel(\"Mission Elapsed Time, Hours\")\n plt.ylabel(\"Altitude, Feet\")\n plt.show()\n\n # Creates two subplots to show the AltUp/TempUp and AltDown/TempDown separately\n # Altitude up over temperature up data\n plt.subplot(1,2,1)\n plt.plot(harbor_data[\"wx_temp_up\"], harbor_data[\"wx_alt_up\"])\n plt.title(\"Harbor Ascent Flight Data\")\n plt.xlabel(\"Temperature, F\")\n plt.ylabel(\"Altitude, Feet\")\n # Altitude down over temperature down data\n plt.subplot(1,2,2)\n plt.plot(harbor_data[\"wx_temp_down\"], harbor_data[\"wx_alt_down\"])\n plt.title(\"Habor Descent Flight Data\")\n plt.xlabel(\"Temperature, F\")\n plt.show()", "def simple_plot(self):\n for i in np.arange(len(self.e2)):\n self.ax.plot(self.e2[i], 'o', label=self.labels[i])", "def plot3D(*dfs, columns=None, figsize=(5, 5), plot_titles=False):\n # create matplotlib 3d axes\n fig = plt.figure(figsize=figsize)\n ax = Axes3D(fig, azim=-115, elev=15)\n\n for df, color, in zip(dfs, cycle(COLORS)):\n X, Y, Z = (df[col] for col in columns)\n # plot hyperplane\n ax.scatter(X, Y, Z, c=color, marker=MARKER)\n\n # set axis labels\n for axis, col in zip(['x', 'y', 'z'], columns):\n getattr(ax, f'set_{axis}label')(col)\n\n if plot_titles:\n for df in dfs:\n for i, j, k, text in zip(df.iloc[:, 0], df.iloc[:, 1], df.iloc[:, 2], df.index):\n corr = 2\n ax.text(i + corr, j + corr, k + corr, text)\n\n plt.show()", "def plot(self, num_levels=10):\n if num_levels == -1:\n num_levels = len(self.energies())\n print(self.energies(num_levels))\n figure(figsize=(20, 5))\n subplot(1, num_levels + 1, 1)\n self.plot_potential()\n #xlabel('$\\phi$')\n for ii, psi2D in enumerate(self.get_2Dpsis(num_levels)):\n subplot(1, num_levels + 1, ii + 2)\n #imshow(psi2D.real,extent=(self.x[0],self.x[-1],self.y[0],self.y[-1]),interpolation=\"None\",aspect='auto')\n imshow(psi2D.real, interpolation=\"None\", aspect='auto')\n xlabel(ii)", "def plot(self, ax=None, savefile=None, shells=None, color='b', title=None,\n xlabel=None, ylabel=None, withavg=False):\n import matplotlib.pyplot as plt\n if ax is None:\n plt.figure()\n axset=plt\n else:\n axset=ax\n\n cmax = float(max(self.counts))\n total = sum(self.counts)\n nalpha = 0.85 if cmax/total > 0.33 else 0.65\n maxy = 1.\n for di, df in enumerate(self.dfs):\n alpha=nalpha*self.counts[di]/cmax\n axset.plot(df.x, df.df, color=color, alpha=alpha)\n maxy_ = np.max(df.df)\n if maxy_ > maxy:\n maxy = maxy_\n\n if withavg and len(self) > 0:\n x = self.dfs[0].x\n axset.plot(x, self.average, 'r-')\n maxy_ = np.max(self.average)\n if maxy_ > maxy:# pragma: no cover\n maxy = maxy_\n\n if len(self) > 0:\n dtype = self.dfs[0].dtype\n unit = \"Ang.\" if dtype == \"R\" else \"Rad.\"\n tstr = \"Radial\" if dtype == \"R\" else \"Angular\"\n else:# pragma: no cover\n unit = \"unknown units\"\n tstr = \"\"\n \n if ax is None:\n if title is None:\n plt.title(\"{} Distribution Function of Collection\".format(tstr))\n else:\n plt.title(title)\n if xlabel is None:\n plt.xlabel(\"Distance ({})\".format(unit))\n else:\n plt.xlabel(xlabel)\n if ylabel is None:\n plt.ylabel(\"Accumulated Density\")\n else:\n plt.ylabel(ylabel)\n\n _plot_shells(axset, shells, maxy)\n \n if savefile is not None:\n plt.savefig(savefile)\n\n from gblearn.base import testmode\n if not testmode:# pragma: no cover\n plt.show()\n return axset", "def test_render_xy_plot():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n render_xy_plot(gdpinfo, [], \"isp_gdp_xy_none.svg\")\n render_xy_plot(gdpinfo, [\"China\"], \"isp_gdp_xy_china.svg\")\n render_xy_plot(gdpinfo, [\"United Kingdom\", \"United States\"],\n \"isp_gdp_xy_uk+usa.svg\")\n render_xy_plot(gdpinfo, [\"India\", \"China\", \"United Kingdom\", \"United States\", \"Aruba\", \"Andorra\", \"Angola\", \"Afghanistan\", \"Albania\"], \"isp_gdp_xy_countries.svg\")", "def plot(self):\n\t\t\t\n\t\tfig,p1=_plt.subplots(4,sharex=True)\n\t\tp1[0].plot(self.time*1e3,self.eRogA,label='Rogowski A')\n\t\tp1[1].plot(self.time*1e3,self.eRogB,label='Rogowski B')\n\t\tp1[2].plot(self.time*1e3,self.eRogC,label='Rogowski C')\n\t\tp1[3].plot(self.time*1e3,self.eRogD,label='Rogowski D')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "def display2Dpointsets(A, B, ax = None):\n if not ax:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1)\n ax.plot(B[:,0],B[:,1],'b+',markersize=8,mew=1)\n #pylab.setp(pylab.gca(), 'xlim', [-0.15,0.6])\n labels = plt.getp(plt.gca(), 'xticklabels')\n plt.setp(labels, color='k', fontweight='bold')\n labels = plt.getp(plt.gca(), 'yticklabels')\n plt.setp(labels, color='k', fontweight='bold')", "def plot_all(self) -> None:\n self.__plot_si_cf_plane()\n self.__plot_convex_hull()\n self.__plot_fixed_radius()\n self.__plot_delaunay()", "def plot_clusters(self):\n pass" ]
[ "0.7042768", "0.64889604", "0.6424103", "0.62358296", "0.62245584", "0.6214347", "0.6187972", "0.61645967", "0.6090626", "0.6083194", "0.60742915", "0.60631174", "0.6021211", "0.60032755", "0.5985849", "0.5971447", "0.59706855", "0.5946714", "0.59466743", "0.593274", "0.5930092", "0.5907409", "0.5893017", "0.5870112", "0.584623", "0.5845617", "0.58428013", "0.5823288", "0.5816765", "0.57937473", "0.5792791", "0.57615656", "0.575596", "0.573626", "0.5728345", "0.5728117", "0.5704423", "0.569333", "0.5686475", "0.56802624", "0.5658865", "0.56461716", "0.5637557", "0.5635458", "0.5631814", "0.5630831", "0.562647", "0.56253976", "0.56142163", "0.5606378", "0.5593244", "0.559239", "0.5591114", "0.55882525", "0.5576721", "0.5556993", "0.5551243", "0.5550328", "0.55415857", "0.5539223", "0.5539045", "0.5537622", "0.5531883", "0.5530246", "0.5522806", "0.551916", "0.5517429", "0.55132455", "0.55108094", "0.55051434", "0.5503157", "0.5497749", "0.54972804", "0.54965603", "0.5493101", "0.5491043", "0.54908466", "0.54829437", "0.54803336", "0.5479652", "0.54698026", "0.5467798", "0.54614216", "0.5449175", "0.54475623", "0.5442867", "0.5442596", "0.5440135", "0.5437742", "0.5434208", "0.5429059", "0.5426605", "0.54224813", "0.54223824", "0.5421782", "0.5415136", "0.54134923", "0.54111284", "0.54073656", "0.5405082" ]
0.7176058
0
8 microed stepping by faking distance twice as long.
def micro_8(steps, a): df = pd.DataFrame(index=np.arange(0, steps * 16), columns=('v', 's', 'd', 't')) t = 0.0 m = 8 # micro level d = d0 = math.sqrt(1/a/m) # faster accel since distance is longer s = 0 # steps p = 0 # position p_d = 1/m # position delta for s in range(800): if s == 0: d = d0 * 0.676 else: d -= d * 2 / (4 * s + 1) s += 1 p += p_d t += d df.loc[s] = [1/d/m, p, d, t] # m = 1 # p_d = 1/m # d = d * 8 # for s in range(100, 200): # if s == 0: # d = d0 * 0.676 # else: # d -= d * 2 / (4 * s + 1) # s += 1 # p += p_d # t += d # df.loc[s] = [1/d/m, p, d, t] return df.dropna()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drive_eight(n):\n # Variables for the go_diff function\n fast_speed = 80 \n slow_speed = 25\n # Half a lap time, this is the time the robot turns in a direction before switching\n half_lap_time =6.2 \n # To avoid having tu manually stop the robot we set it to drive continuously for x amount of seconds.\n elapsedSecs = 0\n while elapsedSecs < half_lap_time * 2 * n:\n arlo.go_diff(fast_speed, slow_speed, 1, 1)\n sleep(half_lap_time)\n arlo.go_diff(slow_speed, fast_speed, 1, 1)\n sleep(half_lap_time)\n elapsedSecs += half_lap_time * 2", "def get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]", "def _get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]", "def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]", "def test_pos_1024() -> None:\n assert sw.walk_to(1024).distance == 31", "def dist_to_stop(speed):\n return speed ** 2 / 4", "def _step(self) -> None:", "def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4", "def step(self, move):", "def jump(distance):\r\n t.penup()\r\n t.forward(200)\r\n t.pendown()\r\n return None", "def move_coarse(self, direction, count=1):\n if self._direction != direction and self.simulate_backlash:\n self._direction = direction\n backlash_offset = randint(-maximum_backlash, maximum_backlash)\n self._move(direction, 1, 8 + backlash_offset)\n self._move(direction, count - 1, 8)\n self.backlash_count += 1\n else:\n self._direction = direction\n self._move(direction, count, 8)", "def _TIME2STEPS(time):\n return int(time*1000)", "def foward_shimmey(self):\n for x in range(6):\n self.right(primary=60, counter=30)\n time.sleep(.5)\n self.left(primary=70, counter=30)\n time.sleep(.5)\n self.back()\n time.sleep(2) \n self.stop()", "def Advance():\n warp.step()", "def warmup_step(ckpt_step: int) -> float:\n return ckpt_step * 10", "def step(self, delta_l11, delta_l12, delta_l13, delta_l21, delta_l22, delta_l23):\n self.l11 += delta_l11; self.l12 += delta_l12; self.l13 += delta_l13\n self.l21 += delta_l11; self.l22 += delta_l12; self.l23 += delta_l13\n self.l21 += delta_l21; self.l22 += delta_l22; self.l23 += delta_l23\n # check that all tendon lenghts are within limit\n self.l11 = self.l1min if self.l11 < self.l1min else self.l11\n self.l12 = self.l1min if self.l12 < self.l1min else self.l12\n self.l13 = self.l1min if self.l13 < self.l1min else self.l13\n self.l11 = self.l1max if self.l11 > self.l1max else self.l11\n self.l12 = self.l1max if self.l12 > self.l1max else self.l12\n self.l13 = self.l1max if self.l13 > self.l1max else self.l13\n self.l21 = self.l2min if self.l21 < self.l2min else self.l21\n self.l22 = self.l2min if self.l22 < self.l2min else self.l22\n self.l23 = self.l2min if self.l23 < self.l2min else self.l23\n self.l21 = self.l2max if self.l21 > self.l2max else self.l21\n self.l22 = self.l2max if self.l22 > self.l2max else self.l22\n self.l23 = self.l2max if self.l23 > self.l2max else self.l23\n old_tip_vec = self.tip_vec2 # used for potential reward\n self.update_variables()\n new_tip_vec = self.tip_vec2 # used for potential reward\n reward = self.r_static\n return reward", "def get_step():\n\n # Decide which direction to go and how far to go in that direction.\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4, 5, 6, 7, 8])\n step = direction * distance\n\n # Reject moves that go nowhere.\n if step == 0:\n get_step()\n else:\n return step", "def _step(self, board, elapsedTime):\n\t\tpass", "def fifteen():\r\n\r\n currentcell = 1.0\r\n cellpaths = 2.0\r\n \r\n while currentcell < 20.0:\r\n currentcell += 1.0\r\n cellpaths = cellpaths * (4.0 - 2.0/currentcell)\r\n \r\n return cellpaths", "def cooroutine_helper(self):\n prev = yield\n running_distance = 0\n while True:\n nxt = yield running_distance\n running_distance += distance(prev, nxt).meters\n prev = nxt", "def WarpStep(iters=5):\n MSG(\"WarpStep\")\n for j in range(iters):\n warp.step()\n return", "def shiftAsideMark(state, opp, distDemar):\n dest = None\n while True:\n dest = Vector2D.create_random(low=-1, high=1)\n dest.norm = distDemar\n dest += opp.position\n if state.is_valid_position(dest) and \\\n distance_horizontale(dest, state.my_goal) > 10.+distance_horizontale(opp.position, state.my_goal):\n break\n return goTo(state, dest)", "def target_position(self, time):\n \"\"\"\n start_pos = self.points[self.cur_start]\n seg_time = time - self.last_checkpoint_time\n\n #The arguement of target-velocity dosent matter\n cur_pos = self.target_velocity(time)*seg_time + start_pos\n\n \n # or time > (self.total_time / 4)*(self.cur_start + 1)\n cur_pos_norm = length(cur_pos - start_pos)\n\n next_corner = self.points[(self.cur_start + 1)%4]\n \n seg_norm = length(next_corner - start_pos)\n print(\"cur_pos : \", cur_pos, \"segment: \", self.cur_start, seg_norm - cur_pos_norm)\n\n if cur_pos_norm >= seg_norm:\n self.cur_start = (self.cur_start + 1) % 4\n self.last_checkpoint_time = time\n return cur_pos\n \"\"\"\n\n #Possibly use rospy.sleep()\n total_time = self.total_time\n\n\n if time < total_time/4:\n return self.path1.target_position(time)\n\n elif time - total_time/4 == 0:\n rospy.sleep(0.5)\n\n elif time < total_time/2:\n return self.path2.target_position(time - (total_time/4 + 0.5))\n # return self.path2.target_position(time - (total_time/4 ))\n\n\n elif time - total_time/2 == 0:\n rospy.sleep(0.5)\n\n elif time <= total_time/4*3:\n return self.path3.target_position(time - (total_time/2 + 1))\n # return self.path3.target_position(time - (total_time/2))\n\n\n elif time - total_time/4*3 == 0:\n rospy.sleep(0.5)\n\n else:\n return self.path4.target_position(time - (total_time/4*3 + 1.5))\n # return self.path4.target_position(time - (total_time/4*3))", "def compute_step(X):\n return MOVING_STEP", "def step_forward(self):", "def walk(self):\n self.speed = self.speed + (0.2 * self.legs)", "def takeoff(self, n, e, d):\n pass", "def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance", "def nearest_test_pulse(self):", "def _step(self, start):\n #angle = np.random.uniform(0,2*np.pi) # only 2-dim\n #direction = angle2vec(angle)\n\n angle = np.random.randn(self.dim)\n direction = angle / la.norm(angle)\n \n if not self.query(start):\n print(f\"Given an invalid point! {start}\")\n \n testCounter = 0\n max_iter = 1000\n \n ## Case for adding to direction ##\n high = 1\n testCounter = 0\n while(self.query(start + high*direction)):\n high = high*2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus high loop with: \\n\\\n high = {high}\\n\")\n \n low = high/2\n testCounter = 0\n while(not self.query(start + low*direction)):\n low = low/2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus low loop with: \\n\\\n low = {low}\\n\")\n \n # now we know that (start + low * direction) is inside\n #assert(zonoid_membership_def(A, start+low*direction))\n # and that (start + high * direction) is outside\n #assert(not zonoid_membership_def(A, start+high*direction))\n \n tol = 1e-5\n t_plus = (high-low)/2\n old_t = 1\n current = start\n testCounter = 0\n while(abs(t_plus-old_t) > tol):\n old_t = t_plus\n t_plus = (high+low)/2\n testpoint = current + t_plus*direction\n if( self.query(testpoint) ):\n low = t_plus\n else:\n high = t_plus\n \n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_plus loop with: \\n\\\n t_plus = {t_plus}\\n\\\n t_old = {t_old}\\n\\\n high = {high}\\n\\\n low = {low}\\n\")\n t_plus = old_t\n \n ## Case for subtracting from direction\n high = -1\n testCounter = 0\n while(self.query(start + high*direction)):\n high = high*2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus high loop with: \\n\\\n high = {high}\\n\")\n \n low = high/2\n testCounter = 0\n while(not self.query(start + low*direction)):\n low = low/2\n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus low loop with: \\n\\\n low = {low}\\n\")\n \n # now we know that (start + low * direction) is inside\n #assert(zonoid_membership_def(A, start+low*direction))\n # and that (start + high * direction) is outside\n #assert(not zonoid_membership_def(A, start+high*direction))\n \n tol = 1e-10\n t_minus = (high-low)/2\n old_t = 1\n current = start\n testCounter = 0\n while(abs(t_minus-old_t) > tol):\n old_t = t_minus\n t_minus = (high+low)/2\n testpoint = current + t_minus*direction\n if( self.query(testpoint) ):\n low = t_minus\n else:\n high = t_minus\n \n testCounter += 1\n if testCounter > max_iter:\n print(f\"Warning: Stuck in t_minus loop with: \\n\\\n t_minus = {t_minus}\\n\\\n t_old = {t_old}\\n\\\n high = {high}\\n\\\n low = {low}\\n\")\n t_minus = old_t\n \n # Make the step\n final_t = np.random.uniform(t_minus, t_plus)\n #print(f\"Final t = {final_t}\")\n \n # remove extra returns for now for other compatibility\n return start + final_t*direction #, start+t_plus*direction, start+t_minus*direction", "def radius_step(radius, num_longtidues, num_latitudes, time):\n step = int(exp(time))\n radius['long_down'] = radius['long_down'] - step\n if radius['long_down'] <= 0:\n radius['long_down'] = 0\n radius['long_up'] = radius['long_up'] + step\n if radius['long_up'] >= num_longtidues - 1:\n radius['long_up'] = num_longtidues - 1\n radius['lat_down'] = radius['lat_down'] - step\n if radius['lat_down'] <= 0:\n radius['lat_down'] = 0\n radius['lat_up'] = radius['lat_up'] + step\n if radius['lat_up'] >= num_latitudes - 1:\n radius['lat_up'] = num_latitudes - 1", "def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps", "def ramp8(params, phase, args=dict(n=4, guess=[1, 0.0096, 0.35, 5.3e-4])):\n # 2013-12-07 14:08 IJMC: Created.\n\n if params[2]>=phase.min():\n params[2] = phase.min() - np.diff(phase).mean()/1e6\n \n return params[0] * (1. + params[1] * np.log(phase - params[2]) + \\\n params[3] * np.log(phase - params[2])**2)", "def get_step(self):\n direction = choice([1,-1])\n direction = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def backtrack_steps():\n\n # Initialize position and number of steps\n x = 0\n n_steps = 0\n\n # Walk until we get to positive 1\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n\n return n_steps", "def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()", "def calculate_travel_time_complex(distance_meters, accel_mps2):\n time = 0\n distance_progress = 0\n speed = 0\n halfway_point = distance_meters / 2\n while distance_progress < halfway_point:\n time = time + 1\n speed = speed + accel_mps2\n distance_progress = distance_progress + speed\n \"\"\"\n Output progress \n print \"{0} seconds | speed: {1} m/s | distance traveled: {2} m\".format(time, speed, distance_progress)\n \"\"\"\n time = time * 2\n return [time, speed]", "def nine_punishment(self):\n self.direction_clock_wise = not self.direction_clock_wise", "def inter_step(self):\n #https://math.stackexchange.com/questions/1918743/how-to-interpolate-points-between-2-points\n c_loc = self.checkpoint_target.get_location()\n \n self.dist_to_checkpoint = self._calc_distance(c_loc)\n new_y = self.current_location[0] + (self.walk_speed / self.dist_to_checkpoint \\\n * (c_loc[0] - self.current_location[0]))\n new_x = self.current_location[1] + (self.walk_speed / self.dist_to_checkpoint \\\n * (c_loc[1] - self.current_location[1]))\n new_location = [float(new_y), float(new_x)]\n self.current_location = new_location\n self.walk_route.append(new_location)", "def next_step(self):\n\n y_next = []\n y_next.append(0)\n for i in range(1, len(self.x) - 1):\n x = self.x[i]\n\n y = self.constant* (self.y_current[i + 1] + self.y_current[i - 1] - 2 * self.y_current[i])\\\n + 2 * self.y_current[i] - self.y_previous[i]\n\n y_next.append(y)\n\n y_next.append(0)\n\n self.y_previous = copy.copy(self.y_current)\n self.y_current = copy.copy(y_next)\n\n if self.timestep % 10000 is 0:\n self.timeframes[self.timestep] = copy.copy(self.y_current)\n\n self.timestep += 1", "def traveled_distance(self, at_time: int) -> float:\n pass", "def _step(self):\n pass", "def step(self):\n self.latent.step()", "def mark_sq8(self):\n self.drive_inches(15, 400)\n self.turn_degrees(90, 400)\n self.drive_inches(13, 400)\n ev3.Sound.speak('Place Mark').wait()\n self.arm_calibration()\n self.drive_inches(-13, 400)\n self.turn_degrees(-90, 400)\n self.drive_inches(-15, 400)", "def left_steering(measurement):\n measurement = (measurement + CORRECTION_FACTOR)\n return measurement", "def _STEPS2TIME(step):\n return step/1000.", "def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()", "def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()", "def smooth_drive(self, distance, linear_speed):\n ### EXTRA CREDIT\n # TODO\n pass # delete this when you implement your code", "def step_linear_double(step):\n return step * 2", "def sweep50T(self):\n return 35.6", "def calc_time(self, distance):\r\n if distance < 400:\r\n return 2*math.sqrt(distance / 1406.25)\r\n else:\r\n distance -= 400\r\n return distance / 750 + 16 / 15", "def earth_tide(theta, lamda, gtime):\n\n global dsz, dcz, dsl, dcl, ssz, scz, ssl, scl, dpar, sdist # bpos common block\n global h, k, l # love common block\n h = [0.6114, 0.2891, 0.175]\n k = [0.304, 0.09421, 0.043]\n l = [0.0832, 0.0145, 0.0103]\n\n global azt, azs # azimut common block\n global etmut # tdiff common block\n global moon # sunny common block\n moon = 0\n # hardwire these - you can only send it ONE droptime\n deltat = 1\n NPT = 1\n\n temp_time = num2date(gtime)\n\n YY = temp_time.year\n MO = temp_time.month\n DD = temp_time.day\n HH = temp_time.hour\n MM = temp_time.minute\n SS = temp_time.second\n # Initialize variables\n irl = 1\n iflag = 0\n ntotl = 1\n iget = [0, 0, 0, 0, 0, 0, 0] # ' !!!\n ispc = [0, 0, 0, 0] # ' !!!\n ntw = [1, 0, 0] # ' !!!\n ioptn = 't'\n ielement = 0\n # \tdata statements for input and output unit numbers (on terminal I/O)\n inun = 5\n ioun = 6\n nptpb = 6\n\n yr1 = YY - 1900\n day1 = date2num(datetime(YY, MO, DD))\n # \tfind times in hours from 0 hr, 1 jan 1900\n # matlab:\n ts = (\n SS / 3600\n + MM / 60\n + HH\n + 24 * (day1 - 1)\n + 8760 * yr1\n + 24 * np.fix((yr1 - 1) / 4)\n )\n # python:\n dj = date_to_julian_day(datetime(YY, MO, DD))\n djref = date_to_julian_day(datetime(1899, 12, 31, 0, 0, 0))\n delta_dj = (\n dj - djref\n ) # difference in days from current date (0hr) to 0hr, 1 jan 1900\n delta_djhr = float(delta_dj) * 24.0 + HH - 12.0 + MM / 60.0 + SS / 3600.0\n te = ts + (NPT - 1) * deltat / 3600\n d = deltat / 3600\n # terms=(te-ts)/d + 1\n terms = NPT\n\n # done asking questions - begin execution\n i = 1\n tt = ts\n sph(theta, lamda, 0)\n etmut = 41.184 + yr1 - 70\n # matlab:\n # t = (tt+12 + (etmut/3600))/876600\n t = (delta_djhr + etmut / 3600) / 876600\n # t is ephemeris time in julian centuries from 12 hr 0 jan 1900\n ephem(t)\n\n # calculate normalized gravity tides\n [grav, tilt, strain, gdc] = elastd(ntw)\n\n gravtide = 1.0e5 * grav\n # convert m/s² to mgal: 1m/s² = 100 gal = 100 000 mgal\n\n iflag = 1\n\n iterms = np.fix(terms)\n i = 1\n return gravtide", "def update_total_speed_input_step(self,curr_v):\n \n tot_speed_input_east=np.dot(self.W_speed_east,self.speed_inputs_east)/self.N_e\n tot_speed_input_west=np.dot(self.W_speed_west,self.speed_inputs_west)/self.N_e\n tot_speed_input_north=np.dot(self.W_speed_north,self.speed_inputs_north)/self.N_e\n tot_speed_input_south=np.dot(self.W_speed_south,self.speed_inputs_south)/self.N_e\n\n self.tot_speed_input_all_padded[:self.N_e,0]=\\\n tot_speed_input_east+tot_speed_input_west+\\\n tot_speed_input_north+tot_speed_input_south\n \n if self.use_eight_directions is True:\n tot_speed_input_north_east=np.dot(self.W_speed_north_east,\n self.speed_inputs_north_east)/self.N_e\n tot_speed_input_north_west=np.dot(self.W_speed_north_west,\n self.speed_inputs_north_west)/self.N_e\n tot_speed_input_south_east=np.dot(self.W_speed_south_east,\n self.speed_inputs_south_east)/self.N_e\n tot_speed_input_south_west=np.dot(self.W_speed_south_west,\n self.speed_inputs_south_west)/self.N_e\n \n self.tot_speed_input_all_padded[:self.N_e,0]+=\\\n tot_speed_input_north_east+tot_speed_input_north_west+\\\n tot_speed_input_south_east+tot_speed_input_south_west\n \n else:\n \n # diagonal move with four directions\n if abs(curr_v[0])>0 and abs(curr_v[1])>0:\n self.tot_speed_input_all_padded[:self.N_e,0]*=.5", "def _epsilon(self, step):\n if step < 0:\n return self._start\n elif step > self._steps:\n return self._stop\n else:\n return self._step_size * step + self._start", "def _sim_step(self, u):\n raise NotImplementedError", "def consume_move(self) :\n return math.ceil(math.sqrt(self.speed[0]**2 + self.speed[1]**2))", "def lab_run_small(character_id, time_step):\n pass", "def wiggle_breakpoints(y, xi, segment_bdy3, wiggle_width=5, num_iterations=1,\n verbose=False, log_func=sys.stdout.write):\n t0 = time.time()\n count = 0\n segment_bdy4 = map(tuple, segment_bdy3)\n while count < num_iterations:\n did_nothing = True\n bps = [x[0] for x in segment_bdy4] + [segment_bdy4[-1][1]]\n new_bps = [0]\n for index in xrange(1, len(bps)-1):\n b = bps[index]\n lpos = new_bps[index-1]\n rpos = bps[index+1]\n\n wiggle_width = 5\n if b - lpos == 1 or rpos - b == 1:\n new_bps.append(b)\n continue\n lwiggle = max(b-wiggle_width, lpos+1)\n rwiggle = min(b+wiggle_width, rpos-1)\n delta_lls = []\n for pos in xrange(lwiggle, rwiggle):\n lpts = y[lpos:pos]\n rpts = y[pos:rpos]\n allpts = y[lpos:rpos]\n\n lxi = xi[lpos:pos]\n rxi = xi[pos:rpos]\n allxi = xi[lpos:rpos]\n\n mu_l = np.clip(lpts.sum( )/lxi.sum( ), 1e-2, None)*lxi\n mu_r = np.clip(rpts.sum( )/rxi.sum( ), 1e-2, None)*rxi\n mu_all = np.clip(allpts.sum( )/allxi.sum( ), 1e-2, None)*allxi\n\n delta_ll = ((-mu_l + lpts*np.log(mu_l)).sum( ) +\n (-mu_r + rpts*np.log(mu_r)).sum( ) -\n (-mu_all + allpts*np.log(mu_all)).sum( ))\n delta_lls.append(delta_ll)\n new_b = lwiggle + np.argmax(delta_lls)\n try:\n gain = (delta_lls[new_b-lwiggle] - delta_lls[b-lwiggle])\n except:\n print \"-\"*40\n print lpos, b, rpos\n print \"argmax\", np.argmax(delta_lls), len(delta_lls)\n print \"wiggle\", lwiggle, rwiggle\n print new_b-lwiggle, b-lwiggle\n raise Exception(\"Wiggling breakpoints produced an invalid breakpoint configuration\")\n new_bps.append(new_b)\n if new_b != b:\n did_nothing = False\n if verbose:\n print \"%6d -> %6d : (%2d) gain +%.2f\"%(b, new_b, delta_lls[b-lwiggle], gain)\n new_bps.append(bps[-1])\n assert len(new_bps) == len(bps)\n segment_bdy4 = [(new_bps[i], new_bps[i+1]) for i in xrange(0, len(new_bps)-1)]\n count += 1\n if did_nothing:\n break\n log_func(\"%.2f s spent wiggling, %d iterations\\n\"%(time.time()-t0, count))\n return segment_bdy4", "def run(self):\n for direction in self.directions:\n rotation = direction[0]\n steps = direction[1]\n\n self.make_rotation(rotation)\n hq_found = self.travel(steps)\n\n if hq_found:\n return (abs(self.new_loc[0] + self.new_loc[1]))", "def tilt(self) -> int:", "def get_step(self):\n # decide which direction and how far\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def add_step_8(R):\n suitable_positions = [[], [], [], [], []]\n for row_idx in range(8):\n for col_idx in range(8):\n if single_pos_no_conflict_check((row_idx, col_idx), R):\n if single_move_feasible_q(R[6][0], (row_idx, col_idx)):\n suitable_positions[0].append((row_idx, col_idx))\n if single_move_feasible_k(R[6][1], (row_idx, col_idx)):\n suitable_positions[1].append((row_idx, col_idx))\n if single_move_feasible_r(R[6][2], (row_idx, col_idx)):\n suitable_positions[2].append((row_idx, col_idx))\n if single_move_feasible_n(R[6][3], (row_idx, col_idx)):\n suitable_positions[3].append((row_idx, col_idx))\n if single_move_feasible_b(R[6][4], (row_idx, col_idx)):\n suitable_positions[4].append((row_idx, col_idx))\n if [] in suitable_positions:\n return []\n result_list = []\n for pos8 in itertools.product(*suitable_positions):\n if len(set(pos8)) == 5:\n tmp = copy.deepcopy(R)\n tmp.insert(7, pos8) # insert the positions for label 8\n tmp[-2] = [7, 1890, 8, 10080, 20, 840, 144, 1260]\n tmp[-1] = [2744, 36, 375, 336, 108, 240, 20, 504] # restore the products\n for pos in pos8:\n tmp[-2][pos[0]] *= 8\n tmp[-1][pos[1]] *= 8 # update the products\n result_list.append(tmp)\n return result_list", "def lab_run_big(character_id, time_step):\n pass", "def escape_maze(instructions: List[int]) -> int:\n current_index = 0\n steps = 0\n while current_index < len(instructions):\n jumps = instructions[current_index]\n instructions[current_index] += 1\n current_index += jumps\n steps += 1\n\n return steps", "def run_step(self, milliseconds):\n stopDistance = self.params['safeDistance']\n\n timeStep = timedelta(milliseconds=milliseconds)\n newTime = self.time + timeStep # Time after step is performed.\n\n for light in self._lights:\n if newTime > light.getNextSwitchTime():\n light.switch(newTime)\n\n toRemove = [ ]\n for car in self._cars:\n if car.state != Car.DELETED:\n car.prepareMove(timeStep)\n else:\n toRemove.append(car)\n\n for car in toRemove: self._cars.remove(car)\n for car in self._cars: car.finishMove()\n\n # Generate new car.\n # It is always added to the queue and if there is enough place then\n # it will be instantly added to the road.\n carsToAdd, newLastCarTime = self.howManyCarsToAdd(newTime)\n self.addCars(carsToAdd)\n self._lastCarGenerationTime = newLastCarTime\n\n self.addCarsFromQueueToRoad()\n\n # Update time.\n self.time = newTime", "def update_speed_input_step(self,curr_v):\n \n # update speed inputs \n self.speed_inputs_east*=0\n self.speed_inputs_west*=0\n self.speed_inputs_north*=0\n self.speed_inputs_south*=0\n\n if self.use_eight_directions is True: \n self.speed_inputs_north_east*=0\n self.speed_inputs_north_west*=0\n self.speed_inputs_south_east*=0\n self.speed_inputs_south_west*=0\n \n #speed_values=self.rr[:self.N_e,0] \n speed_values=np.ones((self.N_e,1))\n\n if curr_v[0]>0:\n \n # north-east\n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_east=speed_values \n \n # south-east \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_east=speed_values\n \n #east \n else:\n self.speed_inputs_east=speed_values\n\n\n elif curr_v[0]<0:\n\n # north-west \n if self.use_eight_directions is True and curr_v[1]>0:\n self.speed_inputs_north_west=speed_values\n\n # south-west \n elif self.use_eight_directions is True and curr_v[1]<0:\n self.speed_inputs_south_west=speed_values\n \n # west \n else:\n self.speed_inputs_west=speed_values\n\n else: \n # north\n if curr_v[1]>0:\n self.speed_inputs_north=speed_values\n\n # south\n elif curr_v[1]<0:\n self.speed_inputs_south=speed_values", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def sw(n):\n return 4*n*n + 2*n + 1", "def problem_086(limit,verbose):\n\n # Three routes:\n # *------F Sides labeled A, B, C, routes clockwise from S\n # | /| R1^2 = (A + C)^2 + B^2\n # | / n R2^2 = (B + C)^2 + A^2\n # +-----+------+-----F R3^2 = (A + B)^2 + C^2\n # | | / | . `|\n # | A / .|` / |\n # | |/. ` a-n / |\n # +-C---S-b-B--+-----+\n # | ` . |\n # | `|\n # *------+\n # | |\n # | |\n # | |\n # +------F\n \n # Genreate all triples up to perimeter 3M + sqrt((M + M)^2 + M^2)\n # Which is is 3M + sqrt(5M^2)\n\n total_found = 0\n cuboids = defaultdict(set)\n triples = set()\n under_length = []\n \n for batch in count():\n size = (batch + 1) * 500\n max_triple_perimeter = int(3 * size + sqrt(5 * size**2)) + 1\n all_triples = set(generate_triples(max_triple_perimeter))\n this_loop = all_triples - triples\n triples = all_triples\n \n with click.progressbar(this_loop, label=\"{}\".format(total_found)) as bar:\n new_cuboids = (c for t in bar for c in generate_cuboids(t))\n new_cuboids = (c for c in new_cuboids if c.a > 0)\n new_cuboids = (c for c in new_cuboids if is_shortest_route_integral(c))\n for cuboid in new_cuboids:\n cuboids[cuboid.c].add(cuboid)\n \n for i in range(batch * 500, batch * 500 + 500):\n \n total_found += len(cuboids[i])\n if total_found >= limit:\n click.echo(total_found)\n click.echo(i)\n return", "def step5(self):\n\t\tself.j = self.k\n\t\tif self.b[self.k] == 'e':\n\t\t\ta = self.m()\n\t\t\tif a > 1 or (a == 1 and not self.cvc(self.k-1)):\n\t\t\t\tself.k = self.k - 1\n\t\tif self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:\n\t\t\tself.k = self.k -1", "def _walk(drv_lon,drv_lat,i,j,inc):\n\t\tif drv_lon[i,j] == 0:\n\t\t\tif drv_lat[i,j] > 0:\n\t\t\t\ti += inc\n\t\t\telse:\n\t\t\t\ti += -inc\n\t\telif drv_lon[i,j] > 0:\n\t\t\tangle = np.arctan(drv_lat[i,j]/drv_lon[i,j])\n\t\t\tif np.abs(angle) <= np.pi/8.:\n\t\t\t\tj += inc\n\t\t\telif np.pi/8. < angle < 3.*np.pi/8.:\n\t\t\t\tj += inc\n\t\t\t\ti += inc\n\t\t\telif -3.*np.pi/8. < angle < -np.pi/8.:\n\t\t\t\tj += inc\n\t\t\t\ti += -inc\n\t\t\telif angle >= 3.*np.pi/8.:\n\t\t\t\ti += inc\n\t\t\telse:\n\t\t\t\ti += -inc\n\t\telse:\n\t\t\tangle = np.arctan(drv_lat[i,j]/drv_lon[i,j])\n\t\t\tif np.abs(angle) <= np.pi/8.:\n\t\t\t\tj += -inc\n\t\t\telif np.pi/8. < angle < 3.*np.pi/8.:\n\t\t\t\tj += -inc\n\t\t\t\ti += -inc\n\t\t\telif -3.*np.pi/8. < angle < -np.pi/8.:\n\t\t\t\tj += -inc\n\t\t\t\ti += inc\n\t\t\telif angle >= 3.*np.pi/8.:\n\t\t\t\ti += -inc\n\t\t\telse:\n\t\t\t\ti += inc\n\t\treturn i,j", "def make_light_prob(distance):\n if distance <= 1250 / 9:\n return 1\n return .99 * make_light_prob(distance - 250 / 9)", "def speed(self) -> int:", "def speed(self) -> int:", "def compute_trajectory():\n pass", "def puzzle2(offsets):\n return find_jumps_to_exit(offsets, lambda o: o + 1 if o < 3 else o - 1)", "def step(cc, hda):\n cc[1:-1] += hda*(cc[:-2] + cc[2:] - 2*cc[1:-1])\n cc[0] = cc[1]\n cc[-1] = cc[-2]", "def _calc_checkpoint_arrival(self, distance, current_time):\n # From: https://en.wikipedia.org/wiki/Walking, use random float between 4.51() kph (1.25 mps) to 5.43 kph (1.51 mps) to simulate\n # a walking speed\n self.time_step_to_enqueue = int(N.ceil((distance / self.walk_speed)) + current_time)\n self.dist_to_checkpoint = distance\n return self.time_step_to_enqueue", "def side_step(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(-10, -150)\n time.sleep(2)\n r.go(-20)\n time.sleep(1)\n r.go(-10, 150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)\n for i in range(num_repeats):\n r.go(-10, 150)\n time.sleep(2)\n r.go(-20)\n time.sleep(1)\n r.go(-10, -150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)\n for i in range(num_repeats):\n r.go(10, 150)\n time.sleep(2)\n r.go(20)\n time.sleep(1)\n r.go(10, -150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)\n for i in range(num_repeats):\n r.go(10, -150)\n time.sleep(2)\n r.go(20)\n time.sleep(1)\n r.go(10, 150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)", "def take_step(self):\n if self.facing == 0:\n self.new_loc = (self.new_loc[0], self.new_loc[1] + 1)\n elif self.facing == 1:\n self.new_loc = (self.new_loc[0] + 1, self.new_loc[1])\n elif self.facing == 2:\n self.new_loc = (self.new_loc[0], self.new_loc[1] - 1)\n else:\n self.new_loc = (self.new_loc[0] - 1, self.new_loc[1])", "def twist(self):\n self.right()\n time.sleep(1)\n self.stop()\n self.left()\n time.sleep(1)\n self.stop()", "def MidpointFnBuilder(max_speed = 26.8, gain = 0.1, beta = 0.5, duration = 500, bias = 1.0, ratio = 0.5):\n\n def MidpointFn((idx, car), sim, step):\n \"\"\"\n :param idx:\n :param car:\n :param sim:\n :param step:\n :return:\n \"\"\"\n vehID = car[\"id\"]\n\n try:\n [back_car, front_car] = sim.getCars(idx, numBack=1, numForward=1, lane=car[\"lane\"])\n except ValueError:\n # Not enough cars on lane\n return\n\n front_dist = (front_car[\"x\"] - car[\"x\"]) % sim.length\n back_dist = (car[\"x\"] - back_car[\"x\"]) % sim.length\n\n curr_speed = car[\"v\"]\n front_speed = front_car[\"v\"]\n follow_dist = (front_dist + back_dist) * ratio\n delta = front_dist - follow_dist\n # print delta, curr_speed, front_speed, curr_speed-front_speed\n if follow_dist < front_dist and curr_speed < max_speed:\n # speed up\n new_speed = min(curr_speed + beta * (front_speed-curr_speed) + gain * delta + bias, max_speed)\n traci.vehicle.slowDown(vehID, new_speed, duration) # 2.5 sec\n # print \"t=%d, FASTER, %0.1f -> %0.1f (%0.1f) | d=%0.2f = %0.2f vs %0.2f\" % \\\n # (step, curr_speed, new_speed, front_speed, delta, front_dist, follow_dist)\n elif follow_dist > front_dist:\n # slow down\n new_speed = max(curr_speed + beta * (front_speed-curr_speed) + gain * delta + bias, 0)\n traci.vehicle.slowDown(vehID, new_speed, duration) # 2.5 sec\n # print \"t=%d, SLOWER, %0.1f -> %0.1f (%0.1f) | d=%0.2f = %0.2f vs %0.2f\" % \\\n # (step, curr_speed, new_speed, front_speed, delta, front_dist, follow_dist)\n\n return MidpointFn", "def scenario1(height, speed):\n time = math.sqrt((2 * height) / 9.81)\n result = speed * time\n return result", "def time_step(dt, mol):\n f = mol.get_force() \n a1 = -f / mol.p1.m\n a2 = f / mol.p2.m\n v_prev1 = mol.p1.vel - dt * a1 / 2\n v_prev2 = mol.p2.vel - dt * a2 / 2\n v_next1 = v_prev1 + a1 * dt\n v_next2 = v_prev2 + a2 * dt\n mol.p1.vel = v_next1\n mol.p2.vel = v_next2\n mol.p1.pos = mol.p1.pos + v_next1 * dt\n mol.p2.pos = mol.p2.pos + v_next2 * dt", "def calc_distance(self, observation):\n actual_obs = observation[0]\n scrn_player = actual_obs.observation.feature_screen.player_relative\n scrn_select = actual_obs.observation.feature_screen.selected\n scrn_density = actual_obs.observation.feature_screen.unit_density\n\n state_added = scrn_select + scrn_density\n\n marine_center = np.mean(self.xy_locs(scrn_player == 1), axis=0).round()\n\n # first step\n if np.sum(scrn_select) == 0:\n marine_center = np.mean(self.xy_locs(scrn_player == 1), axis=0).round()\n # marine behind beacon\n if isinstance(marine_center, float):\n marine_center = np.mean(self.xy_locs(state_added == 2), axis=0).round()\n else:\n # normal navigation\n marine_center = np.mean(self.xy_locs(state_added == 2), axis=0).round()\n if isinstance(marine_center, float):\n marine_center = np.mean(self.xy_locs(state_added == 3), axis=0).round()\n\n beacon_center = np.mean(self.xy_locs(scrn_player == 3), axis=0).round()\n #\n # print(state_added)\n # print(\"---- Marine {} | {} Beacon ----\".format(marine_center, beacon_center))\n # time.sleep(0.2)\n distance = math.hypot(beacon_center[0] - marine_center[0],\n beacon_center[1] - marine_center[1])\n\n return beacon_center, marine_center, distance", "def determineNextMove(player_location, opponentLocation, coins):\n global route, currentcoin, meta_route, best_weight, best_path, coins_to_search, index\n if opponentLocation in coins_to_search:\n coins_to_search, meta_route, route = change_way(coins, opponentLocation, player_location)[:3]\n index = 0\n elif currentcoin == player_location: \n if len(route) != 0:\n old_dist = algo.dijkstra(mazeMap, player_location)[1][meta_route[index+1]]\n coins_to_search2, meta_route2, route2, new_dist = change_way(coins, opponentLocation, player_location)\n\n #dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n #coins_to_search = get_n_shortest(3, coins, player_location, dists_matrix)\n \t\n #ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n #for c in coins_to_search:\n #if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n # coins_to_search.remove(c)\n #break\n \t\t\n #best_weight = float(\"inf\")\n #best_path = []\n #exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n #meta_route2 = [player_location] + best_path\n #route2 = u.location_list_to_route(meta_route2, route_matrix)\n #new_dist = dist_matrix[player_location][meta_route2[1]]\n\t\t\n if len(route) == 0 or old_dist - new_dist > 3:\n route = route2\n meta_route = meta_route2 \n index = 0\n index += 1\n currentcoin = meta_route[index]\n #api.debug(route)\n return u.direction(player_location, route.pop(0))", "def _calcPlungerMoveTime(self, move_steps):\n sd = self.sim_state\n start_speed = sd['start_speed']\n top_speed = sd['top_speed']\n cutoff_speed = sd['cutoff_speed']\n slope = sd['slope']\n microstep = sd['microstep']\n\n slope *= 2500.0\n if microstep:\n move_steps = move_steps / 8.0\n theo_top_speed = sqrt((4.0 * move_steps*slope) + start_speed ** 2.0)\n # If theoretical top speed will not exceed cutoff speed\n if theo_top_speed < cutoff_speed:\n move_t = theo_top_speed - (start_speed/slope)\n else:\n theo_top_speed = sqrt(((2.0*move_steps*slope) +\n ((start_speed**2.0+cutoff_speed**2.0)/2.0)))\n # If theoretical top speed with exceed cutoff speed but not\n # reach the set top speed\n if cutoff_speed < theo_top_speed < top_speed:\n move_t = ((1 / slope) * (2.0 * theo_top_speed - start_speed -\n cutoff_speed))\n # If start speed, top speed, and cutoff speed are all the same\n elif start_speed == top_speed == cutoff_speed:\n move_t = (2.0 * move_steps) / top_speed\n # Otherwise, calculate time spent in each phase (start, constant,\n # ramp down)\n else:\n ramp_up_halfsteps = ((top_speed ** 2.0 - start_speed ** 2.0) /\n (2.0 * slope))\n ramp_down_halfsteps = ((top_speed ** 2.0 - cutoff_speed ** 2.0) /\n (2.0 * slope))\n if (ramp_up_halfsteps + ramp_down_halfsteps) < (2.0 * top_speed):\n ramp_up_t = (top_speed - start_speed) / slope\n ramp_down_t = (top_speed - cutoff_speed) / slope\n constant_halfsteps = (2.0 * move_steps - ramp_up_halfsteps -\n ramp_down_halfsteps)\n constant_t = constant_halfsteps / top_speed\n move_t = ramp_up_t + ramp_down_t + constant_t\n return move_t", "def _step(self, whence):\n pass", "def time_to_point(distance):\n if distance <= (125 / 9) ** 2:\n return distance ** .5\n return distance * 9 / 250 + 125 / 18", "def _imputation_step(self, current_times, state):\r\n # Does not do anything special if we're jumping across a gap. More advanced\r\n # models, especially probabilistic ones, would want a special case that\r\n # depends on the gap size.\r\n return state", "def remove_jumps(self) -> None:\n q_diff = np.diff(self.array, axis=0)\n jumps = np.nonzero(np.where(np.linalg.norm(q_diff, axis=1)>1, 1, 0))[0]+1\n if len(jumps) % 2:\n jumps = np.append(jumps, [len(q_diff)+1])\n jump_pairs = jumps.reshape((len(jumps)//2, 2))\n for j in jump_pairs:\n self.array[j[0]:j[1]] *= -1.0", "def set_t_FAST(self):\n\t\n\tself.N = 2**7\n\tdt = self.Orbit.Tobs/self.N\n\tself.t = np.linspace(0, self.N-1, self.N)*self.Orbit.Tobs/self.N\n\t\n\treturn", "def q8(array):\n a = array[0]\n b = array[4]\n c = array[8]\n d = array[12]\n a1 = roll(a, 00) + roll(b, 00) + roll(b, 12) + roll(c, 12) + roll(d, 28) + roll(a, 28) + roll(b, 28)\n b1 = roll(b, 19) + roll(c, 19) + roll(d, 3) + roll(a, 3) + roll(b, 3) + roll(c, 7) + roll(d, 23) + roll(a,\n 23) + roll(\n b, 23) + roll(a, 15) + roll(b, 15) + roll(b, 27) + roll(c, 27) + roll(d, 11) + roll(a, 11) + roll(b, 11) + roll(\n d, 31) + roll(a, 31) + roll(b, 31)\n c1 = roll(c, 0) + roll(d, 16) + roll(a, 16) + roll(b, 16) + roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c,\n 20) + roll(\n d, 4) + roll(a, 4) + roll(b, 4) + roll(d, 24) + roll(a, 24) + roll(b, 24)\n d1 = roll(a, 8) + roll(b, 8) + roll(b, 20) + roll(c, 20) + roll(d, 4) + roll(a, 4) + roll(b, 4) + roll(d,\n 24) + roll(a,\n 24) + roll(\n b, 24)\n\n return a1, array[1], array[2], array[3], b1, array[5], array[6], array[7], c1, array[9], array[\n 10], array[11], d1, array[13], array[14], array[15]", "def __call__(self):\n\n self.ndx+=self.delta\n if not 0<=self.ndx<len(self.seq):\n if self.ndx>len(self.seq):\n self.ndx=len(self.seq) # In case this sequence has shrunk.\n if self.yoyo:\n self.delta*=-1\n self.ndx+=self.delta*2\n else:\n self.ndx=0\n return self.seq[self.ndx]", "def ab2_timestep(x, u, u_previous, timestep):\r\n return x + timestep * (1.5 * u - 0.5 * u_previous)", "def main():\n aoc_input = aoc_01_input.get_input()\n\n current_direction = 'N'\n steps_north = 0\n steps_east = 0\n\n # For part 2: Store all the coords visited in a list\n all_coords_list = []\n # A variable to save HQ coordinates in\n hq_coords = None\n\n for instruction in aoc_input:\n # One instruction is eg 'R2' or 'L44'\n input_turn = instruction[0]\n input_steps = int(instruction[1:])\n\n current_direction = change_direction(current_direction, input_turn)\n\n if current_direction == 'N':\n\n for k in range(input_steps):\n current_coords = [steps_north + k, steps_east]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_north += input_steps\n\n elif current_direction == 'E':\n\n for k in range(input_steps):\n current_coords = [steps_north, steps_east + k]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_east += input_steps\n\n elif current_direction == 'S':\n\n for k in range(input_steps):\n current_coords = [steps_north - k, steps_east]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_north -= input_steps\n\n else:\n\n for k in range(input_steps):\n current_coords = [steps_north, steps_east - k]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_east -= input_steps\n\n current_coords = [steps_north, steps_east]\n\n total_distance = abs(steps_north) + abs(steps_east)\n\n total_distance_part2 = abs(hq_coords[0]) + abs(hq_coords[1])\n\n print('Part 1: {}'.format(total_distance))\n print('Part 2: {}'.format(total_distance_part2))\n\n # print('Part 1: {}'.format(get_root(aoc_input[:])['name']))\n # print('Part 2: {}'.format(find_imbalance(aoc_input[:])))", "def shiftAside(state, distDemar, angleInter):\n opp = state.opponent_nearest_ball\n while True:\n dest = Vector2D.create_random(low=-1, high=1)\n dest.norm = distDemar\n dest += state.ball_pos\n if state.is_valid_position(dest) and state.free_trajectory(dest, angleInter) and \\\n distance_horizontale(dest, state.my_goal) > distance_horizontale(opp.position, state.my_goal)-5.:\n break\n return goTo(state, dest)", "def RunExactTimestep(self): \n if self.sim_t == 0:\n randoms = np.random.random(1000) \n self.randoms_log = np.log(randoms)*-1\n self.randoms = np.random.random(1000)\n self.count = 0 \n elif self.count == 1000:\n randoms = np.random.random(1000) \n self.randoms_log = np.log(randoms)*-1\n self.randoms = np.random.random(1000) \n self.count = 0 \n \n self.sim_tau = self.randoms_log[self.count]/float(self.sim_a_0) # reaction time generation\n self.sim_r2 = self.randoms[self.count] # Draw random number 2 [0-1]\n self.count +=1\n \n if (self.sim_t + self.sim_tau) < self.settings.endtime:\n self.sim_t += self.sim_tau # Time update\n self.reaction_index = 0\n sum_of_as = self.sim_a_mu[self.reaction_index]\n criteria = self.sim_r2*self.sim_a_0\n while sum_of_as < criteria: # Use r2 to determine which reaction will occur\n self.reaction_index += 1\t # Index\n sum_of_as += self.sim_a_mu[self.reaction_index] \n\n try:\n self.X_matrix += self.N_matrix_transpose[self.reaction_index]\n self.timestep += 1\n except MemoryError as ex:\n print(ex)\n sys.exit() \n else: \n self.sim_t = self.settings.endtime \n self.reaction_index = np.nan" ]
[ "0.6142764", "0.5769263", "0.57421744", "0.5703275", "0.5681504", "0.5650031", "0.5599577", "0.5527048", "0.55249375", "0.55100065", "0.55055285", "0.549204", "0.54680324", "0.5440701", "0.54385084", "0.54321384", "0.5418194", "0.54137677", "0.54083496", "0.5375917", "0.5365879", "0.5364452", "0.53611696", "0.5351481", "0.5342448", "0.5340236", "0.52958703", "0.52939165", "0.5287813", "0.5273693", "0.52673036", "0.5260609", "0.52391917", "0.5217465", "0.52120936", "0.52088934", "0.51778436", "0.51588595", "0.51547825", "0.5151925", "0.5150062", "0.5141136", "0.5133259", "0.51316166", "0.5130043", "0.51291573", "0.51289964", "0.51289964", "0.5128563", "0.5112309", "0.50997597", "0.5099717", "0.5099065", "0.5097301", "0.5089131", "0.50877404", "0.5077491", "0.5072839", "0.5069121", "0.5065757", "0.5064234", "0.50637513", "0.50617605", "0.5057473", "0.50490505", "0.5038775", "0.5037399", "0.5033509", "0.5033509", "0.50249904", "0.50245565", "0.5023824", "0.5018018", "0.5017656", "0.5013447", "0.5013447", "0.5009818", "0.5008393", "0.50047594", "0.50018513", "0.5001669", "0.50005054", "0.49950764", "0.49943367", "0.49862695", "0.49844518", "0.4982124", "0.4977262", "0.49757633", "0.49743277", "0.49652463", "0.49543524", "0.4952373", "0.49514142", "0.49441016", "0.49437112", "0.49402553", "0.49396268", "0.49385977", "0.49289158" ]
0.6169425
0
Return the result of an elasticsearch query as a pandas DataFrame.
def _ES_res_to_pandas(self, res, columns, thresh=None): sources = [x['hits']['hits'][0]['_source'] for x in res['responses']] if columns is not None: if isinstance(columns, list): sources = [{key: val for key, val in x.items() if key in columns} for x in sources] elif callable(columns): sources = [{key: val for key, val in x.items() if columns(key)} for x in sources] assert sources columns = list(sources[0].keys()) else: raise TypeError('Variable "columns" should be list or callable or None.') dtype = {col: self._choose_dtype(col) for col in columns} ids = [x['hits']['hits'][0]['_id'] for x in res['responses']] tab = pd.DataFrame(sources, index=ids)[columns] # Workaround for pandas bug: https://stackoverflow.com/a/38750433/7856919 for k, v in dtype.items(): if v == bool: tab[k] = tab[k].astype(str) == 'True' else: tab[k] = tab[k].astype(v) if thresh is not None: # Select rows that are not above the threshold sel = ~(tab['__CONFIDENCE'] >= thresh) columns_to_remove = [x for x in tab.columns if '__' in x] tab.loc[sel, columns_to_remove] = pd.np.nan # Dirty fix for np.nan that transforms dtype bool into float. tab['__IS_MATCH'].fillna(False, inplace=True) tab['__IS_MATCH'] = tab['__IS_MATCH'].astype(bool) return tab
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pandas(self):\n return pd.DataFrame(self.results)", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def query_to_df(query):\n df = pd.DataFrame(query.all())\n df.columns = [x['name'] for x in query.column_descriptions]\n return df", "def get_documents_with_q(self, index, query=Q(), source=None, add_index_name = False):\n \n s = Search(using=self.es, index=index)\n if source:\n s = s.source(source)\n # Dotted fields, replace . by __\n q = s.query(query)\n #print(str(q.to_dict()).replace(\"'\",'\"'))\n results = s.query(query).scan()\n \n if add_index_name:\n all_dicts = []\n for hit in results:\n result_dict = hit.to_dict()\n result_dict['_index'] = hit.meta.index\n all_dicts.append(result_dict)\n \n fa = pd.DataFrame.from_dict(all_dicts)\n else:\n fa = pd.DataFrame([hit.to_dict() for hit in results])\n \n return fa", "def get_query_result_to_df(self, query):\r\n try:\r\n return pd.read_sql_query(query, self.conn)\r\n except pd.pandas.io.sql.DatabaseError:\r\n print('Execution failed. Database error')", "def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())", "def query2df(query):\n df = pd.DataFrame(data = list(itertools.product([0, 1], repeat=len(query.variables))), columns=query.variables)\n df['p'] = query.values.flatten()\n return df", "def query_api_df(query_params: APIQueryParams,\n timeout: Optional[float] = None,\n headers: Optional[Dict[str, str]] = None) -> pandas.DataFrame:\n if timeout is None:\n timeout = api_configuration['timeout']\n\n json_data = _submit_post_request(\n json_dict=dict(token=get_api_token(), query=query_params.to_api_struct()),\n headers=headers,\n timeout=timeout)\n\n df_ = pandas.DataFrame(json_data['data'])\n df_.columns = [c.upper() for c in df_.columns]\n\n return df_", "def get_sparql_dataframe(service, query):\n sparql = SPARQLWrapper(service)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n result = sparql.query()\n\n processed_results = json.load(result.response)\n cols = processed_results['head']['vars']\n\n out = []\n for row in processed_results['results']['bindings']:\n item = []\n for c in cols:\n item.append(row.get(c, {}).get('value'))\n out.append(item)\n\n return pd.DataFrame(out, columns=cols)", "def to_pandas_dataframe(self):\n pd_index = self.index().to_pandas_index()\n return pd.DataFrame.from_items(self.collect()).set_index(pd_index)", "def get_sparql_dataframe(query, service = \"https://query.wikidata.org/sparql\"):\n sparql = SPARQLWrapper(service)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n result = sparql.query()\n\n processed_results = json.load(result.response)\n cols = processed_results['head']['vars']\n\n out = []\n for row in processed_results['results']['bindings']:\n item = []\n for c in cols:\n item.append(row.get(c, {}).get('value'))\n out.append(item)\n\n return pd.DataFrame(out, columns=cols)", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(\"set hive.execution.engine = tez\")\n cursor.execute(\"set tez.queue.name = sephora_internal\")\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return df", "def create_dataframe(result):\n # List of elements in the search result\n names = []\n snippet = []\n url = []\n \n # Append search results to list\n for j,item in enumerate(result):\n for i,element in enumerate(result[j]['items']):\n names.append(result[j]['items'][i]['title'])\n snippet.append(result[j]['items'][i]['snippet'])\n url.append(result[j]['items'][i]['link'])\n \n # Create a dataframe\n df = pd.DataFrame(list(zip(names, snippet,url)), \n columns =['name', 'snippet','url']) \n \n return df", "def GridSearchResultToDF(search):\n return(pd.concat([pd.DataFrame(data=search.cv_results_['params']),\n pd.DataFrame(data={'mean': search.cv_results_['mean_test_score'],\n 'std': search.cv_results_['std_test_score']}),\n pd.DataFrame(data={'mean_fit_time': search.cv_results_['mean_fit_time']})],\n axis = 1))", "def extract_data():\n client = MongoClient(HOST, PORT)\n collection = client[DB][COLLECTION]\n df = pd.DataFrame(collection.find().limit(10))\n return df", "def get_df(self) -> pd.DataFrame:\n return pd.DataFrame(self.fetchall(), columns=self.headers())", "def as_dataframe(self) -> \"pd.DataFrame\":\n import pandas as pd\n\n df = pd.DataFrame([row.as_series() for row in self.rows])\n return df", "def es_hit():\n return {\n \"_index\": \"testindex\",\n \"_id\": \"4beb3b3e-a935-442e-a47b-6d386947ea20\",\n \"_version\": 5,\n \"_seq_no\": 0,\n \"_primary_term\": 1,\n \"found\": True,\n \"_source\": {\n \"uuid\": \"4beb3b3e-a935-442e-a47b-6d386947ea20\",\n \"version_id\": 4,\n \"created\": \"2020-09-01T14:26:00+00:00\",\n \"updated\": \"2020-09-02T14:28:21.968149+00:00'\",\n \"id\": \"12345-abcde\",\n \"metadata\": {\n \"title\": \"My record\",\n \"date\": \"2020-09-20\",\n },\n \"pids\": {\n \"oaiid\": {\"value\": \"\", \"provider\": \"local\"},\n },\n },\n }", "def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:\n data = client.read_sparql(query)\n df = None\n if \"results\" in data and \"bindings\" in data[\"results\"]:\n df = pd.DataFrame(data[\"results\"][\"bindings\"])\n df.applymap(lambda x: x[\"value\"])\n else:\n df = pd.DataFrame(data)\n\n return df", "def get_as_pandas_dataframe(self):\n pd_df = pd.DataFrame()\n for name in self.dict_colname_to_index:\n pd_df[name] = np.copy(self[name])\n return pd_df", "def get_dataframe(q):\n cnx = create_engine(postgres_str)\n query = q\n return pd.read_sql_query(query, cnx)", "def to_df(query, cols=None):\n # Try to get column names\n if cols is None:\n cols = [x['name'] for x in query.column_descriptions]\n data = [{k: v for k, v in zip(cols, x)} for x in query]\n if len(data) == 0:\n return pd.DataFrame()\n return pd.DataFrame(data).loc[:, cols]", "def to_dataframe(self, data_dict):\n return pd.DataFrame.from_dict(data_dict, orient='index')", "def return_data_as_pandas_df(self):\n if not self.response:\n return None\n\n data = self.response['data'][self.data_type.value]\n\n # flatten data dictionary by joining property and subproperty names\n data_flat = {}\n for i, entry in enumerate(data):\n id = self.id[i]\n curr_dict = {}\n for key, values in entry.items():\n if isinstance(values, list):\n v = values[0]\n else:\n v = values\n if isinstance(v, str):\n new_key = f\"{key}\"\n curr_dict[new_key] = v\n else:\n for subprop, val in v.items():\n new_key = f\"{key}.{subprop}\"\n curr_dict[new_key] = val\n data_flat[id] = curr_dict\n\n return pd.DataFrame.from_dict(data_flat, orient='index')", "def dataframe(self):\n if not self.all_records:\n print('No rows cached.')\n return\n dict_list = [row.as_dict() for row in self.all_records]\n columns = self.all_records[0].keys\n dataframe = pd.DataFrame(dict_list, columns=columns)\n return dataframe", "def df_from_table(query, carto_sql_client, index=None):\n resp = carto_sql_client.send(query)\n schema = transform_schema(resp['fields'])\n if index:\n return pd.DataFrame(resp['rows']).set_index('cartodb_id').astype(schema)\n else:\n return pd.DataFrame(resp['rows']).astype(schema)", "def as_df(self):\r\n return pd.DataFrame(self.vectors).set_index(self.words)", "def to_pandas_df(self):\n data = self._get_data(pd=True)\n return data", "def athena_to_pandas(query, query_cols):\n \n # Get AWS security credentials:\n s3 = boto3.client('s3')\n a = s3.get_object(Bucket='config-lambda', Key='aws_accessKeys.json')\n aws_key = json.loads(a['Body'].read().decode('utf-8'))\n\n # Conecta à Athena com pacote do Joe.\n cursor = connect(aws_access_key_id=aws_key['aws_access_key_id'],\n aws_secret_access_key=aws_key['aws_secret_access_key'],\n s3_staging_dir='s3://stagging-random/',\n region_name='us-east-1').cursor()\n\n # Executa a query:\n data = cursor.execute(query).fetchall() \n df = pd.DataFrame(data, columns=query_cols)\n \n return df", "def to_pandas(self):\n # TODO Add type translation.\n # Skipping analyzing 'pandas': found module but no type hints or library stubs\n import pandas as pd # type: ignore\n\n map = {}\n for n, c in self._field_data.items():\n map[n] = c.to_pandas()\n return pd.DataFrame(map)", "def to_pandas(df):\n pd_df = pd.concat(ray.get(df._df))\n pd_df.index = df.index\n pd_df.columns = df.columns\n return pd_df", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n ret_df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return ret_df", "def to_df(self):\r\n return pd.DataFrame([dict(self)])", "def result_df(self, regex=None) -> pd.DataFrame:\n if regex:\n # get one random item from dict, and get keys from this random (dict) item\n # FIXME: how to do this better? - this is not efficient...\n keys = self.result[next(iter(self.result))].keys()\n\n if type(regex) == str:\n comp_regexe = re.compile(regex)\n columns = list(filter(comp_regexe.search, keys))\n else:\n columns = list(filter(regex.search, keys))\n\n df = pd.DataFrame.from_dict(self.result, orient='index')\n return df[columns]\n else:\n return pd.DataFrame.from_dict(self.result, orient='index')", "def to_dataframe(self, timeout_sec: int = DEFAULT_TIMEOUT_SEC) -> pd.DataFrame:\n records = [r for r in self.result(timeout_sec=timeout_sec)]\n return pd.DataFrame.from_records(records)", "def to_df(self):\n return pd.DataFrame([dict(self)])", "def to_df(self):\n return pd.DataFrame([dict(self)])", "def to_df(self):\n return pd.DataFrame([dict(self)])", "def to_df(self):\n return pd.DataFrame([dict(self)])", "def to_df(self):\n return pd.DataFrame([dict(self)])", "def dataFrame(self):\n\n memory_file = StringIO(initial_value=self.sparql_result.decode('utf-8'), newline='\\n')\n reader = DictReader(memory_file)\n\n schema = StructType(\n list(map(lambda f: StructField(f, StringType()), reader.fieldnames))\n )\n\n data = list(map(lambda d: [d[f] for f in reader.fieldnames], list(reader)))\n\n return self.spark.createDataFrame(data, schema)", "def to_pandas(self):\n pass", "def to_pandas(self):\n pass", "def save_to_dataframe(self):\n titles, years, months, days, authors = list(), list(), list(), list(), list()\n for doc in self.results[\"documents\"]:\n titles.append(doc['title'])\n years.append(doc['year'])\n months.append(doc['month'])\n days.append(doc['day'])\n authors.append(doc['authors'])\n return pd.DataFrame({\"title\": titles, \"years\": years, \"months\": months, \"days\": days, \"author\": authors})", "def as_DF(self):\n\n hc_df = pd.DataFrame(self.s, index=self.s_names)\n hc_df.columns.name = 'type'\n hc_df.index.name = 's'\n\n return hc_df", "def ga_to_df(v4_response):\n try:\n rows = v4_response['reports'][0]['data']['rows']\n header = v4_response['reports'][0]['columnHeader']\n index_col = header['dimensions']\n _ = index_col.extend([v.get('name') for v in header['metricHeader']['metricHeaderEntries']])\n index_col = [re.sub(r'ga:(.*)', r'\\1', v) for v in index_col]\n _dims = [v.get('dimensions') for v in rows]\n _mets = [v.get('metrics')[0].get('values') for v in rows]\n _ = [u.extend(v) for u, v in zip(_dims, _mets)]\n df = pd.DataFrame(_dims, columns=index_col)\n except KeyError:\n df = pd.DataFrame({'error':pd.Series(['no data for this query'])})\n return df", "def get_sparql_dataframe(self, query: str, text: str = \"\") -> pd.DataFrame:\n\n if self.verbose:\n print(tm.strftime(f\"[%H:%M:%S] Transmission {text} en cours...\"), end='')\n\n self.sparql.setQuery(query)\n\n processed_results: Wrapper.QueryResult = self.sparql.query()\n\n # We will check if the results are incomplete due to server limitations\n if 'x-sparql-maxrows' in processed_results.info():\n max_size: int = int(processed_results.info()['x-sparql-maxrows'])\n warnings.warn(f\"Warning: The server has limited the number of rows to {max_size}: result incomplete.\")\n\n if 'x-sql-state' in processed_results.info():\n warnings.warn(\"Warning: The server has limited the time of queries: partial result for a timed out query\")\n\n processed_results: dict = processed_results.convert()\n\n if self.verbose:\n print(tm.strftime(f\"\\r[%H:%M:%S] Transmission {text} réussi, conversion en Data Frame...\"), end='')\n\n cols: list[str] = processed_results['head']['vars']\n\n out: list[list[str]] = [[row.get(c, {}).get('value') for c in cols] for row in\n processed_results['results']['bindings']]\n\n if self.verbose:\n print(tm.strftime(f\" Effectué\"))\n\n return pd.DataFrame(out, columns=cols)", "def to_pandas(self):\n dataframe = self.get().to_pandas()\n assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series\n\n return dataframe", "def run_query(query):\n db.query(query)\n dbResult = db.store_result()\n dbFetched = dbResult.fetch_row(maxrows = 0, how = 2)\n df = pd.DataFrame.from_records(dbFetched)\n return df", "def to_df(self) -> pd.DataFrame:\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient=\"list\")\n return df[self.fields]", "def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:\n resp = client.read_opencypher(query)\n df = pd.DataFrame.from_dict(resp)\n return df", "def response_to_df_csv():\n results = api.call_api()\n df = t.get_dataframe(results)\n t.save_csv(df)\n return df", "def _make_results_dataframe(self):\n LOG.debug(\"Creating Results Dataframes.\")\n results_df = tfs.TfsDataFrame(index=self.twiss_df.index)\n results_df[\"S\"] = self.twiss_df[\"S\"]\n return results_df", "def fetch_es_response(self, index_name, lucene_query=False):\n body = dict()\n lucene_query = self.build_lucene_query() if lucene_query else None\n if lucene_query is None:\n if self.fetch_querydict():\n body[\"query\"] = self.fetch_querydict()\n if self.fetch_aggregation():\n body[\"aggs\"] = self.fetch_aggregation()\n body[\"size\"] = self.size\n #return body\n try:\n kwargs = {'index': index_name, 'body': body, 'from_': self.from_,\n 'size': self.size, 'sort': self.sort}\n if self.fields:\n # self.field is not list make it a list else let it be\n fields = self.fields.split(\",\") if not \\\n isinstance(self.fields, list) else self.fields\n validated_fields = [item for item in fields if item not in EXCLUDED_FIELDS]\n\n kwargs.update({'fields': validated_fields})\n if lucene_query:\n kwargs.update({'q': self.build_lucene_query()})\n res = self.es_conn.search(**kwargs)\n return res\n except (ConnectionError, TransportError) as exc:\n return {\n \"error\": \"Exception caught for index- {}, contact admin\".format(\n index_name\n )\n }", "def json2pd(json_results):\n\n data = []\n for line in json_results.split(\"\\n\"):\n if line:\n data.append(json.loads(line))\n\n df = pd.DataFrame(data)\n # process some of the fields\n df.timestamp = pd.to_datetime(df.timestamp, unit=\"s\")\n # drop rows whose \"metric\" is \"Timestamp\"\n df = df[[\"Timestamp\" not in x for x in df.metric]]\n # Set a multiindex\n df = df.set_index([\"test\", \"metric\", \"timestamp\"])\n # Keep only some columns\n df = df[[\"labels\", \"value\", \"unit\", \"run_uri\"]]\n return df", "def from_ES(self, columns=None, chunksize=None, thresh=None):\n num_rows = ic.stats(self.index_name)['_all']['total']['docs']['count']\n \n if chunksize is None:\n res = self.fetch_by_id(size=num_rows, from_=0)\n return self._ES_res_to_pandas(res, columns, thresh)\n \n else:\n # Return a generator. Code has to be separate to allow returning pandas.DataFrame\n return self._from_ES_gen(num_rows, columns, chunksize, thresh)", "def query(self, sql):\n df = pd.read_sql(sql, self.conn)\n return df", "def get_study_results():\n study_results_path = \"data/Study_results.csv\"\n df = pd.read_csv(study_results_path)\n return df", "def query_into_pandas(self, query, fields=None, parameters=None, names=None):\n target_url = self.build_query(query, fields=fields, parameters=parameters)\n\n col_id = 'columns'\n col_names = None\n if names is None:\n # If the columns of the query are specified (used for 'tab' or 'txt' value of\n # parameters['format'] only), then we use the same for the DataFrame\n if col_id in parameters:\n col_names = parameters[col_id].split(',')\n else:\n col_names = names\n\n db = pd.read_csv(\n target_url,\n delimiter=\"\\t\",\n skiprows=1,\n header=None,\n names=col_names\n )\n return db", "def db_to_df(query):\n conn = loader.database._connection\n return sql.read_frame(query, conn)", "def get_results(r):\n myDict = {}\n for name in r[\"results\"]:\n myDict[name[\"name\"]] = {\n \"rank\": name[\"rank\"],\n \"ticker\": name[\"ticker\"],\n \"upvotes\": name[\"upvotes\"],\n \"mentions\": name[\"mentions\"],\n \"mentions_24h_ago\": name[\"mentions_24h_ago\"],\n }\n df = pd.DataFrame.from_dict(myDict, orient=\"index\")\n df[\"rank\"] = df[\"rank\"].astype(int)\n df[\"upvotes\"] = df[\"upvotes\"].astype(int)\n df[\"mentions\"] = df[\"mentions\"].astype(int)\n df[\"mentions_24h_ago\"] = df[\"mentions_24h_ago\"].astype(int)\n\n df[\"delta_mentions_24h\"] = df[\"mentions\"] - df[\"mentions_24h_ago\"]\n df = df[~(df[\"upvotes\"] <= 1000)]\n df = df.sort_values(by=[\"delta_mentions_24h\"], ascending=False)\n return df", "def convert_response_to_df(response):\n\n list = []\n\n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n sampled = True if report.get('samplesReadCounts') else False\n\n for row in rows:\n dict = {}\n dict['sampling'] = sampled\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n dict[header] = dimension\n\n for i, values in enumerate(dateRangeValues):\n for metric, value in zip(metricHeaders, values.get('values')):\n if ',' in value or '.' in value:\n dict[metric.get('name')] = float(value)\n else:\n dict[metric.get('name')] = int(value)\n list.append(dict)\n\n df = pd.DataFrame(list)\n return df", "def FetchQueryResultToDF(data, col_name: List[str]) -> pd.DataFrame:\r\n result = []\r\n for row in data:\r\n to_be_append = []\r\n for col in row:\r\n to_be_append.append(col)\r\n result.append(to_be_append)\r\n df = pd.DataFrame(result, columns=col_name)\r\n print(df)\r\n return df", "def frame(self):\n microseconds = np.array(self.results['times']) * 1e6\n return pd.DataFrame(self.results, index=microseconds)", "def do_query(self) -> pd.DataFrame:\n if self.resultSize > self.step:\n query: str = self.query + f\" LIMIT {self.step}\"\n return pd.concat(\n [self.get_sparql_dataframe(query + f\" OFFSET {value}\", f\"{value} sur {self.resultSize}\") for value in\n range(0, self.resultSize, self.step)])\n return self.get_sparql_dataframe(self.query)", "def dataframe(self):\n dictionary = OrderedDict(zip(self.keys, [[value] for value in self.values]))\n dataframe = pd.DataFrame(dictionary)\n return dataframe", "def df(self):\n data = {\"sites\": self.sites, \"values\": self.values,\n \"stdeviations\": self.stdeviations}\n return pd.DataFrame(data, columns=[\"sites\", \"values\", \"stdeviations\"])", "def get_pandas(location: str='') -> 'pandas.core.frame.DataFrame':\n db = CarsDb() # pylint: disable=invalid-name\n results = db.get_pandas(location)\n db.commit()\n db.close()\n return results.set_index('id')", "def to_DataFrame(cls, qs):\n dates = [pd.to_datetime(x[0]) for x in qs.values_list('date')]\n data = qs.values('open', 'close', 'high', 'low', 'volume')\n df = pd.DataFrame.from_records(data, index=dates)\n return df", "def query(self, query, **params):\n chunksize = params.pop(\"chunksize\", 100000)\n to_pandas = params.pop(\"to_pandas\", True)\n with self._cursor() as cursor:\n params = {k: v for k, v in params.items() if k in getargs(cursor.execute).args}\n cursor.execute(query, **params)\n fields = [i[0] for i in cursor.description]\n res = []\n while True:\n result = cursor.fetchmany(chunksize)\n if not result:\n break\n res.append(Frame(result))\n frame = rbind(res, bynames=False)\n if frame.shape == (0, 0):\n frame = Frame({n: [] for n in fields})\n else:\n frame.names = fields\n if to_pandas:\n frame = frame.to_pandas()\n return frame", "def pd(self, *args, **kwargs):\n return pd.DataFrame.from_records(self.aslist(), *args, **kwargs)", "def convert_to_df(data):\r\n ans = pd.DataFrame(data)\r\n return ans", "def parse_result_series(result):\n if isinstance(result, np.ndarray):\n return result\n\n if result is None or not len(result):\n return None\n\n dates, values = result\n return pd.DataFrame({0:dates.astype(int)/1000,1:values})", "def get_rs_data(self, query):\n # Establish connection to Redshift\n self.rs_hook = PostgresHook(postgres_conn_id=self.rs_conn_id)\n\n # Get the data in dataframe\n survey_df = self.rs_hook.get_pandas_df(query)\n\n return survey_df", "async def full_report():\n return DF.to_dict(orient=\"records\")", "def get_dataframe(self, params=None, chunksize=None):\n if chunksize:\n raise NotImplementedError(\"Buffered reading not supported yet\")\n # the resulting `rows` of a query provides a nice way to do this, though\n\n query = self.config[\"query\"]\n params = params or {}\n\n logger.debug(\n \"Fetching query {} with params {}...\".format(\n query, params\n )\n )\n rows = self.db.query(query, fetchall=True, **params)\n df = rows.export(\"df\")\n\n return df", "def df(self) -> \"pandas.DataFrame\":\n titles = []\n comments = []\n alternative_codes = []\n for cat in self.values():\n titles.append(cat.title)\n comments.append(cat.comment)\n alternative_codes.append(cat.codes[1:])\n return pandas.DataFrame(\n index=list(self.keys()),\n data={\n \"title\": titles,\n \"comment\": comments,\n \"alternative_codes\": alternative_codes,\n },\n )", "def to_df(self):\n from ..df import DataFrame\n\n return DataFrame(self)", "def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)", "def sql(q, database_url):\r\n output, cur_description = Q(q, database_url, out=True, description=True)\r\n # print(cur_description)\r\n cols = [i[0] for i in cur_description]\r\n return pd.DataFrame(output, columns=cols)", "def get_frame_from_query(the_query, colnames):\n df = DataFrame.from_records(list(the_query), columns=colnames)\n return df", "def get_tweets(api, query):\n \n results = []\n for tweet in tweepy.Cursor(api.search, q=query).items(1000):\n results.append(tweet)\n \n id_list = [tweet.id for tweet in results]\n #unpack into dataframe\n data = pd.DataFrame(id_list,columns=['id'])\n \n data[\"text\"]= [tweet.text.encode('utf-8') for tweet in results]\n data[\"datetime\"]=[tweet.created_at for tweet in results]\n data[\"Location\"]=[tweet.place for tweet in results]\n \n return data", "def dataframe(self):\n\n if self._dataframe is None:\n try:\n import pandas as pd\n except ImportError:\n raise RuntimeError('To enable dataframe support, '\n 'run \\'pip install datadotworld[pandas]\\'')\n\n self._dataframe = pd.DataFrame.from_records(self._iter_rows(),\n coerce_float=True)\n\n return self._dataframe", "def index():\n data = te.getMarketsData(marketsField='index', output_type='df')\n return jsonify(data.to_dict(orient='records'))", "def fetch_all(): \n client, index_name = connection_es()\n res = client.search(index = index_name+\"*\")\n return res", "def to_df(self) -> pd.DataFrame:\n\n return pd.DataFrame(self.to_dict()).drop(\n DUMMY_ENTITY_ID, axis=1, errors=\"ignore\"\n )", "def to_df(thisdict, name=None, index=None) -> pd.DataFrame:\n df = pd.DataFrame.from_dict(thisdict, orient='index')\n if index:\n df = df.set_index(index)\n if name:\n df.index.name=name\n\n if df.size>0:\n df.sort(inplace=True, ascending=False)\n return df", "def to_pandas(self) -> pd.DataFrame:\n\n data = {column.name: column.to_pandas()\n for column in self.plaincolumns}\n\n return pd.DataFrame(data, columns=self.columns)", "def _get_all(index, includes, formats) -> Response:\n response = target_es.search(\n index=index,\n collapse={\"field\": \"id\"},\n _source=default_includes + includes,\n query={\"bool\": {\"filter\": [{\"terms\": {\"format\": formats}}]}}\n if formats\n else {\"match_all\": {}},\n )\n return {\n \"results\": {\n hit[\"_id\"]: hit[\"_source\"] for hit in response[\"hits\"][\"hits\"]\n },\n \"total\": response[\"hits\"][\"total\"][\"value\"],\n \"took\": response[\"took\"],\n }", "def as_DataFrame (self):\n return DataFrame(self.table)", "def getDataFrame(self):\n return self.df", "def to_pandas(self):\n self.meta = pd.DataFrame(self.meta)\n return", "def check_results_as_data_frame(check_to_check_results: Dict[Check, CheckResult]) -> DataFrame:\n check_names = []\n status = []\n descriptions = []\n for check_result in check_to_check_results.values():\n check_names.append(check_result.check)\n status.append(check_result.status)\n descriptions.append(check_result.description)\n return DataFrame(zip(check_names, status, descriptions), columns=[\"check_name\", \"status\", \"description\"])", "def df(client_ids, start, end):\n obj = search(client_ids, start, end)\n df = DataFrame.from_dict(obj).T\n\n if df.empty:\n return df\n\n df.index.name = 'client_id'\n df = df.rename(columns={ 0: 'inactive', 1: 'active' })\n df['total'] = df.sum(axis=1)\n df = df.fillna(0).astype('int64')\n\n return df", "def get_region_data(region):\n cursor = reg_data_coll.find({REGION_KEY: region})\n df = pd.DataFrame(list(cursor))\n if df.empty:\n app.logger.error(f\"While getting {region} data: no data\")\n return df", "def _data_frame(content):\n response = loads(content)\n key = [x for x in response.keys() if x in c.response_data][0]\n frame = DataFrame(response[key])\n final_frame = _convert(frame)\n return final_frame", "def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df", "def prepareDataframeForTable(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n\n if self._isIndexedDataframe(df):\n if df.size == 0:\n df[\"values\"] = np.nan\n elif len(df.columns) > 1:\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.stack()\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n current_columns_name = list(df.index.names)\n current_columns_name[len(current_columns_name)-1] = \"Measures\"\n df.index.names = current_columns_name\n\n return df", "def query_kusto(query, db, client):\n dataframe = pd.DataFrame([])\n logging.info('Retry is set to 3 times.')\n\n for i in range(4):\n if i > 0:\n logging.info('Retry {}'.format(i))\n try:\n # Execute query\n response = client.execute(db, query)\n # Convert to pandas dataframe\n res = response.primary_results[0]\n if res:\n dataframe = dataframe_from_result_table(res)\n if dataframe.empty:\n time.sleep(10)\n continue\n return dataframe\n except Exception as exp:\n logging.error('Exception occured: {}'.format(exp))\n # wait 10 seconds, then retry\n time.sleep(10)\n continue\n\n return dataframe" ]
[ "0.74036556", "0.70324427", "0.70324427", "0.7026997", "0.6755308", "0.66402763", "0.65239024", "0.6453229", "0.6447718", "0.641856", "0.63828397", "0.63794637", "0.63329643", "0.6315638", "0.63023823", "0.62740153", "0.6256019", "0.6252512", "0.61707205", "0.616961", "0.61680776", "0.6162313", "0.61429036", "0.6131321", "0.6129815", "0.6129034", "0.6068849", "0.6054013", "0.60356593", "0.60350597", "0.60272557", "0.6024868", "0.60038036", "0.5996374", "0.5989838", "0.5981507", "0.5969047", "0.5969047", "0.5969047", "0.5969047", "0.5969047", "0.596104", "0.59532255", "0.59532255", "0.5924391", "0.5923893", "0.5920468", "0.5896925", "0.5894031", "0.5890735", "0.5880592", "0.587189", "0.58678275", "0.58411443", "0.58378744", "0.5836442", "0.58177835", "0.5801504", "0.5794094", "0.5787813", "0.57725185", "0.5765748", "0.5756615", "0.57443804", "0.5734996", "0.5733384", "0.5729501", "0.5718596", "0.5717156", "0.57137287", "0.57021004", "0.5694597", "0.5692845", "0.5681568", "0.5676572", "0.56659013", "0.564614", "0.5619842", "0.56188995", "0.5616122", "0.56138474", "0.5610484", "0.5604813", "0.55836606", "0.55818117", "0.5580895", "0.5572582", "0.55709803", "0.5550576", "0.5540757", "0.5536978", "0.5516006", "0.5512008", "0.5511438", "0.55088353", "0.55081064", "0.550437", "0.55035514", "0.5502345", "0.54838085" ]
0.59003174
47
Load or generate pandas DataFrame from the ES associated to the project.
def from_ES(self, columns=None, chunksize=None, thresh=None): num_rows = ic.stats(self.index_name)['_all']['total']['docs']['count'] if chunksize is None: res = self.fetch_by_id(size=num_rows, from_=0) return self._ES_res_to_pandas(res, columns, thresh) else: # Return a generator. Code has to be separate to allow returning pandas.DataFrame return self._from_ES_gen(num_rows, columns, chunksize, thresh)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(\n self, index_col: str = \"operator\", date_cols: list = [\"updated\", \"created\"]\n ) -> pd.DataFrame:\n df = pd.read_json(self.fspath, convert_dates=date_cols)\n try:\n df = df.set_index(index_col)\n except KeyError:\n raise KeyError(\n f\"Backend has no column named '{index_col}'. Try passing 'index_col = column_name' to the backend constructor. Available columns are: {df.columns.tolist()}\"\n )\n self.source = df\n return self", "def _read_projects_df_from_db(self, include_stats=True):\n\n projects_df = None\n\n # TODO: should cursor be created here or no?\n # https://www.datacamp.com/community/tutorials/tutorial-postgresql-python # noqa\n # shows creation of a cursor even though no methods are called on it\n with self._transaction.dict_cursor():\n # TODO: is there a better way to access this?\n conn = self._transaction._conn\n\n queries = _PROJECT_SQLS if include_stats else [_PROJECT_SQLS[0]]\n for stats_keys, sql_source in queries:\n if callable(sql_source):\n curr_sql = sql_source(*stats_keys)\n else:\n curr_sql = sql_source.format(*stats_keys)\n\n curr_df = pd.read_sql(curr_sql, conn)\n\n if projects_df is None:\n projects_df = curr_df\n else:\n # left join here: the first query produces a df with a\n # COMPLETE list of all projects, whereas subsequent\n # queries only return info on projects relevant to their\n # computed statistic.\n projects_df = pd.merge(projects_df, curr_df,\n how=\"left\", on=[\"project_id\"])\n\n # make the project_id the index of the final data frame, but\n # do NOT drop the project_id column from the data frame.\n projects_df.set_index('project_id', drop=False, inplace=True)\n return projects_df", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=5, exog_idx=[10, 2, 6, 7, 8])", "def load():\n return load_pandas()", "def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())", "def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)", "def read(self):\n self._load_metadata()\n return self._df.compute()", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(\"set hive.execution.engine = tez\")\n cursor.execute(\"set tez.queue.name = sephora_internal\")\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return df", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "def load(cls, path: str):\n df = pd.read_json(path, convert_dates=[\"updated\", \"created\"])\n df = FileIndex(data=df, path=path)\n if \"operator\" in df.columns:\n df = df.set_index(\"operator\")\n return df", "def dataframe(self):\n if not self.all_records:\n print('No rows cached.')\n return\n dict_list = [row.as_dict() for row in self.all_records]\n columns = self.all_records[0].keys\n dataframe = pd.DataFrame(dict_list, columns=columns)\n return dataframe", "def _load_df(self):\n oauth_json = self.plugin_config[\"service_account_credentials\"]\n with tempfile.NamedTemporaryFile(mode=\"w+\", suffix=\".json\") as ntf:\n json.dump(oauth_json, ntf)\n ntf.seek(0)\n\n gc = gspread.service_account(filename=ntf.name)\n \n sheet_url = self.plugin_config[\"sheet_url\"]\n sheet = gc.open_by_url(sheet_url)\n self.worksheet = sheet.get_worksheet(0)\n data = self.worksheet.get_all_values()\n colnames = data.pop(0)\n\n self._df = pd.DataFrame(data, columns=colnames)", "def sourceToDataframe(self):\n df = pd.read_excel(self.filename)\n df.columns = df.iloc[10]\n df = df.drop(df.index[:11])\n self.df = df #makes this df accessible to the whole class now\n self.insertODN()\n display(df.head())", "def df():\n fs.df()", "def _read(**kwargs) -> DataFrame:\n Engine.subscribe(_update_engine)\n\n try:\n pd_obj = FactoryDispatcher.read_csv_glob(**kwargs)\n except AttributeError:\n raise AttributeError(\"read_csv_glob() is only implemented for pandas on Ray.\")\n\n # This happens when `read_csv` returns a TextFileReader object for iterating through\n if isinstance(pd_obj, pandas.io.parsers.TextFileReader):\n reader = pd_obj.read\n pd_obj.read = lambda *args, **kwargs: DataFrame(\n query_compiler=reader(*args, **kwargs)\n )\n return pd_obj\n\n return DataFrame(query_compiler=pd_obj)", "def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data", "def pandas_handler(store_handler, hit):\n nid = hit.nid\n sname = hit.source_name\n\n path = store_handler.get_path_of(nid) + sname\n df = __obtain_dataframe(path)\n return df", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n ret_df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return ret_df", "def to_df(self):\n from ..df import DataFrame\n\n return DataFrame(self)", "def getDataframe(self):\n self._loadCSVFile()\n self._cleanProcessDf()\n return self._df", "def create_data_frame(input_filepath):\n df = pd.read_json(input_filepath)\n logger = logging.getLogger(__name__)\n logger.info('Imported dataframe:')\n logger.info(df.info())\n logger.info(df.describe())\n logger.info(df.head())\n return df", "def _get_data(self):\n project_name, experiment_id = self.parent._get_parent_identifiers()\n\n self._data = self.repository.get_dataframe_data(\n project_name, self.id, experiment_id=experiment_id\n )", "def _load_df_or_array(self, key: str, idx: Union[None, np.ndarray] = None) -> Union[pd.DataFrame, np.ndarray]:\n if key == self.actions_key:\n if idx is None:\n data = pd.read_hdf(self.path, key=key)\n else:\n data = pd.read_hdf(self.path, key=key, where=idx)\n else:\n with h5py.File(self.path, 'r') as f:\n if idx is None:\n data = f[key][...]\n else:\n data = f[key][idx, ...]\n\n return data", "def dataFrame(self):\n\n memory_file = StringIO(initial_value=self.sparql_result.decode('utf-8'), newline='\\n')\n reader = DictReader(memory_file)\n\n schema = StructType(\n list(map(lambda f: StructField(f, StringType()), reader.fieldnames))\n )\n\n data = list(map(lambda d: [d[f] for f in reader.fieldnames], list(reader)))\n\n return self.spark.createDataFrame(data, schema)", "def dataframe(self):\n\n if self._dataframe is None:\n try:\n import pandas as pd\n except ImportError:\n raise RuntimeError('To enable dataframe support, '\n 'run \\'pip install datadotworld[pandas]\\'')\n\n self._dataframe = pd.DataFrame.from_records(self._iter_rows(),\n coerce_float=True)\n\n return self._dataframe", "def get_df(self, version=None):\n pass", "def df(self):\n if os.path.isfile(self.path):\n df = io.parquet_to_df(self.path)\n else:\n df = self.refresh()\n\n if self.cols:\n for col in self.cols:\n if col not in df:\n log.warning(f\"Col {col} missing. Not in the sources?\")\n\n return df", "def make_dataset(self, df, **kwargs):\n\t\treturn df", "def load(self) -> pd.DataFrame:\n if os.path.exists(self.file_name):\n df = pd.read_csv(self.file_name, index_col=0)\n df = self._clean(df)\n else:\n _LOG.debug(\"No file '%s'\", self.file_name)\n df = pd.DataFrame()\n return df", "def get_df(self) -> pd.DataFrame:\n return pd.DataFrame(self.fetchall(), columns=self.headers())", "def get_pandas(self, name):\n val = self.get(name)\n if isinstance(val, dict):\n df = pandas.DataFrame(val)\n return df", "def _get_data(*, from_web: bool) -> pd.DataFrame:\n\n df = read_in_data.SaveFormats.CSV.read(from_web=from_web)\n return df", "def df():\n path, _ = os.path.split(os.path.abspath(__file__))\n project_path = os.path.join(path, os.pardir, os.pardir)\n\n values_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_values.csv\")\n labels_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_labels.csv\")\n\n train = pd.read_csv(values_path, index_col='id', parse_dates=[\"date_recorded\"])\n labels = pd.read_csv(labels_path, index_col='id')\n\n return train.join(labels)", "def run(self) -> DataFrame:\n with self.create_census_api_session():\n logger.info('Retrieving variables...')\n variables: Variables = self.get_variables()\n logger.info('Retrieving ACS tables...')\n tables = self.get_tables()\n\n # Add geometry\n gazetteer_files: List[GazetteerFile] = []\n shapefiles: List[Shapefile] = []\n if self.geometry == 'points':\n logger.info('Retrieving Gazetteer files...')\n gazetteer_files.extend(self.get_gazetteer_files())\n elif self.geometry == 'polygons':\n logger.info('Retrieving shapefiles...')\n shapefiles.extend(self.get_shapefiles())\n dataframe = self.assemble_dataframe(variables, tables, gazetteer_files, shapefiles)\n return dataframe", "def getDataFrame(self):\n return self.df", "def to_df(self):\n # check read only\n if self.__read_only:\n raise IOError(\"Table is for read only.\")\n\n # convert data to dicts\n data = dict(record.to_id_dict()\n for record in self.__data.values())\n\n # make data frame\n df = pd.DataFrame(data).T\n df.index.name = \"_id\"\n return df", "def create_dataframe(self):\n\n df = pd.DataFrame({'date': [],\n 'RUN': [],\n 'CLONE': [],\n 'GEN': pd.Series(0, index=[], dtype='int'),\n 'frame': pd.Series([], index=[], dtype='int'),\n 'time (ns)': [] }) # set the index\n df.set_index('date')\n print(df)\n\n # Save the DataFrame to disk\n\n ### create a file handle to store the data in (a dict-like) HDF5 format\n store = pd.HDFStore(self.dataframe_path)\n print(store)\n store.put('df', df)\n return store", "def pd(self, *args, **kwargs):\n return pd.DataFrame.from_records(self.aslist(), *args, **kwargs)", "def load_e_form():\n path = os.path.join(DATA_DIR, \"eform-materialsproject-85014.csv\")\n df = pd.read_csv(path, index_col=\"mpid\")\n return df", "def dataframe(self):\n return self.get_target().dataframe()", "def read(name, db):\n \n # Make connection with the database\n\tconn = sqlite3.connect(db)\n\tdf = pd.read_sql_query(\"select * from \" + name + ';', conn)\n \n # Print loaded data table name and return DataFrame\n\tprint(name + ': loaded')\n\treturn df", "def make_df(self):\n # read in file\n df = pd.read_csv(self.data_file)\n cols_to_drop = [f'view{x}' for x in range(1,4)]+['response']\n # subtract loc3 viewing from location of interest\n df[self.label_key] = df[self.predictor] - df['view3']\n df.drop(cols_to_drop, axis=1, inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n return df", "def _setup_dataframe(self, serie, metadata=None):\n header = self.get_data_header(serie, dataset='cnv')\n df = self.get_data_in_frame(serie, header, dataset='cnv')\n df = self.df_handler.map_column_names_of_dataframe(df)\n\n return df", "def load_test_as_df(\n key: str, source_base_path: str = SOURCE_BASE_PATH) -> pd.DataFrame:\n\n return _load_target_and_source(key, source_base_path, 'test')", "def load_dataframe(self) -> None:\n with open(self.__data_path.split('.')[0] + '_dtypes.json', 'r') as f:\n dtypes = json.load(f)\n self.__DataFrame = pd.read_csv(self.__data_path, dtype=dtypes)\n self.map_items()", "def get_pandas(self):\n return pd.DataFrame(self.results)", "def load(cls):\n df = Operator_Table.df\n df.operator = df.operator.apply(sp.normalize)\n df.operator_alias = df.operator_alias.apply(sp.normalize)\n df = df.rename(columns={\"operator_alias\": \"alias\"})\n return SQLIndex(data=df).set_index(\"operator\")", "def load_renter_data():\n return pd.read_sql_query(_sql_query, _con)", "def _get_model_df(model_path_or_tfs):\n if isinstance(model_path_or_tfs, basestring):\n LOG.debug(\"Creating TwissOptics from '{:s}'\".format(model_path_or_tfs))\n df = tfs.read_tfs(model_path_or_tfs, index=\"NAME\")\n else:\n LOG.debug(\"Creating TwissOptics from input DataFrame\")\n df = model_path_or_tfs\n if (len(df.index.values) == 0) or not isinstance(df.index.values[0], basestring):\n raise IndexError(\"Index of DataFrame needs to be the element names.\"\n \"This does not seem to be the case.\")\n return df", "def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df", "def get_updated_dataframe():\n # pylint: disable=import-outside-toplevel\n from sotaque_brasileiro.io import fetch_paginated_data\n records = fetch_paginated_data(constants.API_RECORDS_ENDPOINT.value)\n df = parse_records_to_dataframe(records) # pylint: disable=invalid-name\n return df", "def create_data_frame(self):\n column_names = Annotations.create_columns(self.headers, self.annot_types)\n dtypes = Annotations.get_dtypes_for_group_annots(self.headers, self.annot_types)\n df = self.open_file(\n self.file_path,\n open_as=\"dataframe\",\n # Coerce values in group annotations\n converters=dtypes,\n # Header/column names\n names=self.headers,\n # Prevent pandas from reading first 2 lines in file\n # since they're passed in with param 'names'\n skiprows=2,\n )[0]\n self.file = Annotations.convert_header_to_multi_index(df, column_names)", "def to_pandas_dataframe(self):\n pd_index = self.index().to_pandas_index()\n return pd.DataFrame.from_items(self.collect()).set_index(pd_index)", "def data_frame_creator(self):\n\n return pd.DataFrame()", "def get_dataframe(self, params=None, chunksize=None):\n if chunksize:\n raise NotImplementedError(\"Buffered reading not supported yet\")\n # the resulting `rows` of a query provides a nice way to do this, though\n\n query = self.config[\"query\"]\n params = params or {}\n\n logger.debug(\n \"Fetching query {} with params {}...\".format(\n query, params\n )\n )\n rows = self.db.query(query, fetchall=True, **params)\n df = rows.export(\"df\")\n\n return df", "def get_dataframe(project, bucket, blob):\n try:\n logging.info(f'Creating the pandas dataframe for the blob {blob}')\n\n fs = gcsfs.GCSFileSystem(project=project)\n file = bucket + '/' + blob\n\n with fs.open(file) as f:\n df = pd.read_csv(f)\n\n except:\n logging.fatal(f'Error when try to create the dataframe')\n raise\n\n return df", "def to_df(self) -> pd.DataFrame:\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient=\"list\")\n return df[self.fields]", "def db_to_df(query):\n conn = loader.database._connection\n return sql.read_frame(query, conn)", "def _dataframe_from_feather(fn, **kwargs):\n\treturn pd.read_feather(fn, **kwargs)", "def load_data_frame(name):\n\n return DataFrame(name).load()", "def process_data(self):\n logging.debug('process_data called')\n\n pd_time_series = pd.read_csv(f'{self.out_dir}docs/downloaded/'\n f'{self.filename}')\n\n pd_time_series = pd_time_series.drop('Lat', axis=1)\n pd_time_series = pd_time_series.drop('Long', axis=1)\n no_of_dates = len(pd_time_series.columns) - 2\n dateindex = pd.date_range(start='1-22-2020',\n periods=no_of_dates,\n freq='D').strftime('%d-%m')\n\n new_cols = ['Province/State', 'Country/Region']\n for index in dateindex:\n new_cols.append(index)\n pd_time_series.columns = new_cols\n\n pd_time_series = pd_time_series.drop('Province/State', axis=1)\n pd_edit_series = pd_time_series.set_index('Country/Region')\n\n pd_edit_series = pd_edit_series.T\n\n return pd_edit_series", "def load_data_into_pandas(db, sufficiently_old):\n engine = create_engine(db)\n cols = ['eudract_number',\n 'eudract_number_with_country',\n 'date_of_the_global_end_of_the_trial',\n 'trial_is_part_of_a_paediatric_investigation_plan',\n 'end_of_trial_status',\n 'trial_status',\n 'trial_human_pharmacology_phase_i',\n 'trial_therapeutic_exploratory_phase_ii',\n 'trial_therapeutic_confirmatory_phase_iii',\n 'trial_therapeutic_use_phase_iv',\n 'trial_bioequivalence_study',\n 'subject_healthy_volunteers',\n 'trial_condition_being_studied_is_a_rare_disease',\n 'trial_single_blind',\n 'full_title_of_the_trial',\n 'name_or_abbreviated_title_of_the_trial_where_available',\n 'trial_results',\n 'sponsors' ]\n return pd.read_sql_query(\"SELECT {} FROM public.euctr WHERE meta_updated > '{}'\".format(','.join(cols), sufficiently_old), con=engine)", "def sql_return_df(query, params, date_cols):\n conn = sqlite3.connect(db_filepath)\n df = pd.read_sql(query, conn, params=params, parse_dates=date_cols)\n conn.close()\n return df", "def gp_dataframe_import(filename):\n path = os.path.join('..', 'data', filename)\n frame = pd.read_csv(path)\n return frame", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def AsPandasDF(self, columns=[]):\n\n # We do this by first generating a simple ROOT file, then loading it into a dataframe with\n # uproot.\n return ObjectStream(ResultPandasDF(self._ast, columns))", "def to_frame(self) -> DataFrame:\n if not self.is_initialized:\n _logger.info(\"Grid has not been initialized. Ensure to run DataGrid.initialize()\")\n return DataFrame()\n\n return self._post_process()", "def load() -> DataFrame:\n return load_file(__file__, \"default.csv.gz\")", "def to_dataframe(self, include_metadata: bool = True) -> pd.DataFrame:\n # Get all our data first with async\n # Note that all our pandas work will tax CPU so we wouldn't expect any\n # performance gains from doing the data parsing as a callback\n records = self.to_dict()\n data = []\n for series in records:\n df = pd.DataFrame(series.pop(\"data\"), columns=[\"period\", \"value\"])\n if include_metadata:\n df = df.assign(**series)\n data.append(df)\n return pd.concat(data, ignore_index=True)", "def get_pandas(location: str='') -> 'pandas.core.frame.DataFrame':\n db = CarsDb() # pylint: disable=invalid-name\n results = db.get_pandas(location)\n db.commit()\n db.close()\n return results.set_index('id')", "def get_df(self):\n return self.df", "def get_dataframe(self):\n self.logger.info('Fetching movie records...')\n session = connect()\n\n cols = [\n Movie.movie_id,\n Movie.title,\n Movie.start_year,\n Movie.genres,\n Movie.description,\n Movie.kind,\n ]\n\n filters = [\n Movie.description.isnot(None),\n Movie.genres.isnot(None),\n ]\n\n query = session.query(*cols).filter(*filters).order_by(Movie.start_year.desc())\n\n try:\n return pd.read_sql(query.statement, session.bind)\n finally:\n session.close()", "def get_main_dataset(self) -> pd.DataFrame:\n pass", "def __create_data_frame(self, soup):\n self.__data_frame = pd.read_html(str(soup))[0]\n timestamp = self.__navigate_rows(soup)\n # rename dataframe columns by columns name in sqlite\n self.__data_frame = self.__data_frame.rename(\n columns=self.__columns_name)\n self.__data_frame['time'] = pd.Series(timestamp)\n self.__data_frame['chg_perc'] = self.__data_frame['chg_perc'].\\\n str.replace('%', '')\n self.__data_frame['created_date'] = datetime.now()\n # save_file(self.__name_file, self.__data_frame.to_string())", "def df(self):\n return self._df", "def _get_df_from_csv(self, filename):\n df = pd.read_csv(filename)\n df.set_index('Date', drop=True, inplace=True)\n df.index = pd.to_datetime(df.index)\n return df", "def read_dataset():\n\n df = pd.read_csv('fake_job_postings.csv', index_col='job_id')\n return df", "def df_from_table(query, carto_sql_client, index=None):\n resp = carto_sql_client.send(query)\n schema = transform_schema(resp['fields'])\n if index:\n return pd.DataFrame(resp['rows']).set_index('cartodb_id').astype(schema)\n else:\n return pd.DataFrame(resp['rows']).astype(schema)", "def to_pandas(self):\n # TODO Add type translation.\n # Skipping analyzing 'pandas': found module but no type hints or library stubs\n import pandas as pd # type: ignore\n\n map = {}\n for n, c in self._field_data.items():\n map[n] = c.to_pandas()\n return pd.DataFrame(map)", "def get_all_projects(engine): \n # Query db\n# sql = (\"SELECT a.project_id, \"\n# \" b.o_number, \"\n# \" a.project_name, \"\n# \" a.project_description \"\n# \"FROM nivadatabase.projects a, \"\n# \" nivadatabase.projects_o_numbers b \"\n# \"WHERE a.project_id = b.project_id \"\n# \"ORDER BY a.project_id\")\n sql = (\"SELECT project_id, \"\n \" project_name, \"\n \" project_description \"\n \"FROM nivadatabase.projects \"\n \"ORDER BY project_id\")\n df = pd.read_sql(sql, engine)\n\n return df", "def save_to_dataframe(self):\n titles, years, months, days, authors = list(), list(), list(), list(), list()\n for doc in self.results[\"documents\"]:\n titles.append(doc['title'])\n years.append(doc['year'])\n months.append(doc['month'])\n days.append(doc['day'])\n authors.append(doc['authors'])\n return pd.DataFrame({\"title\": titles, \"years\": years, \"months\": months, \"days\": days, \"author\": authors})", "def loadDfResults(self, filename=None, trajectoryName=None):\n # chose HDF file to load\n filename = filename or self.HDF_FILE\n self.pypetTrajectory = pu.loadPypetTrajectory(filename, trajectoryName)\n self.nResults = len(self.pypetTrajectory.f_get_run_names())\n\n exploredParameters = self.pypetTrajectory.f_get_explored_parameters()\n\n # create pandas dataframe of all runs with parameters as keys\n logging.info(\"Creating `dfResults` dataframe ...\")\n niceParKeys = [p[11:] for p in exploredParameters.keys()]\n if not self.parameterSpace:\n niceParKeys = [p.split(\".\")[-1] for p in niceParKeys]\n self.dfResults = pd.DataFrame(columns=niceParKeys, dtype=object)\n for nicep, p in zip(niceParKeys, exploredParameters.keys()):\n self.dfResults[nicep] = exploredParameters[p].f_get_range()", "def get_gs_as_dataset(self, fname):\n return pd.read_csv(f\"{self.gs_base_url}/{fname}\", sep=\"\\t\")", "def get_df(*, refresh_local_data: bool) -> pd.DataFrame:\n\n df = _get_data(from_web=refresh_local_data)\n df = clean_up(df)\n return df", "def test_dataframe(self):\n\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_df(url)\n self.assertIsInstance(readerobject,pd.DataFrame)", "def _to_dask(self):\n import dask.dataframe as dd\n urlpath = self._get_cache(self._urlpath)[0]\n self._df = dd.read_parquet(urlpath,\n storage_options=self._storage_options, **self._kwargs)\n self._load_metadata()\n return self._df", "def get_dataframe(q):\n cnx = create_engine(postgres_str)\n query = q\n return pd.read_sql_query(query, cnx)", "def get_dataframe(config: ModelSettings):\n df = pd.read_excel(config.df_data_source_path, engine=\"openpyxl\")\n\n # only use volumes with more than 30 slices\n if \"z\" in df.columns:\n ddf = df[(df[\"z\"] >= 30)]\n return df", "def load(cls, table_name: str, index_col: str = \"operator\"):\n # df = Operator_Table.df\n # df.operator = df.operator.apply(sp.normalize)\n # df.operator_alias = df.operator_alias.apply(sp.normalize)\n # df = df.rename(columns={\"operator_alias\": \"alias\"})\n try:\n import models\n\n cnxn = models.connect_db()\n cnxn[\"Base\"].prepare(Base.metadata.bind)\n op = Operator\n op.cnames()\n # TODO: Connect this up\n\n except KeyError:\n raise KeyError(\n f\"Backend has no column named '{index_col}'. Try passing 'index_col = column_name' to the backend constructor. Available columns are: {df.columns.tolist()}\"\n )\n return df", "def reader(self):\n df = pd.read_csv(self.path)\n return df", "def from_pandas(self, obj, index=True):\n return Reader(_from_pandas(obj, index=index))", "def loader():\n bucket = data_load_variables[\"bucket\"]\n\n if data_load_variables[\"use_lite_dataset\"]:\n dataset_name = data_load_variables[\"lite_dataset_name\"]\n else:\n dataset_name = data_load_variables[\"dataset_name\"]\n\n s3 = boto3.client('s3')\n\n obj = s3.get_object(Bucket=bucket, Key=dataset_name)\n # get object and file (key) from bucket\n\n df = pd.read_csv(obj['Body'])\n return df", "def dataframe(self, *args, **kwargs):\n\n try:\n return self.url.generator.dataframe(*args, **kwargs)\n except AttributeError:\n pass\n\n try:\n return self.url.dataframe(*args, **kwargs)\n except AttributeError:\n pass\n\n raise NotImplementedError(\"Url '{}' of type '{}' can't generate a dataframe \".format(self.url, type(self.url)))", "def get_as_pandas_dataframe(self):\n pd_df = pd.DataFrame()\n for name in self.dict_colname_to_index:\n pd_df[name] = np.copy(self[name])\n return pd_df", "def h5ToDf(filename):\n log.info(f\"Import data from: {filename}\")\n with h5py.File(filename, \"r\") as hf :\n d = {}\n for name in list(hf.keys()):\n d[name] = np.array(hf[name][:])\n df = pd.DataFrame(data=d)\n return(df)", "def h5ToDf(filename):\n log.info(f\"Import data from: {filename}\")\n with h5py.File(filename, \"r\") as hf :\n d = {}\n for name in list(hf.keys()):\n d[name] = np.array(hf[name][:])\n df = pd.DataFrame(data=d)\n return(df)", "def get_dataframe(self):\n # Using a list here appears faster than using a generator expression\n df = pd.DataFrame.from_records(\n [{'event_id' : x.event_id,\n 'time_delta' : x.time_delta,\n 'src_id' : x.src_id,\n 't' : x.cur_time,\n 'sink_id' : y}\n for x in self.events\n for y in x.sink_ids]\n )\n return df", "def _get_data(self):\n \n print(\"Getting Data...\")\n self.data = sgs.dataframe(self.serie_name, \n start = self.start_date, \n end = self.end_date)\n\n print(f\"Done! {self.data.shape[0]} rows were collected\")\n \n self.data.reset_index(inplace=True)\n self.data.columns = ['date', 'cdi']\n\n return self.data", "def get_dataframe(start_date=INITIAL_DATE, end_date=None) -> pd.DataFrame:\n\n end_date = (\n end_date\n if end_date is not None\n else dt.datetime.utcnow() - dt.timedelta(days=1)\n ).date()\n\n dates = pd.date_range(start_date, end_date)\n\n with futures.ThreadPoolExecutor() as ex:\n\n df = pd.concat(ex.map(get_dataframe_for_date, dates))\n\n df.rename(columns=str.lower, inplace=True)\n\n df.drop(columns=[c for c in df.columns if \"/\" in c], inplace=True)\n\n df[\"datetime\"] = pd.to_datetime(df[\"last_update\"])\n\n df[\"date\"] = df.datetime.map(lambda d: d.date())\n\n # df[\"county\"] = df.admin2\n renames = {\n \"country_region\": \"country\",\n \"province_state\": \"state\",\n \"admin2\": \"county\",\n }\n\n df.rename(columns=renames, inplace=True)\n\n df.drop(\n columns=[\"last update\", \"last_update\", \"lat\", \"long_\", \"combined_key\"],\n inplace=True,\n )\n\n return df" ]
[ "0.6643909", "0.6490127", "0.6398309", "0.63942033", "0.6390223", "0.6387757", "0.6322727", "0.6301943", "0.62897563", "0.62897563", "0.6250521", "0.62502694", "0.6227685", "0.62009954", "0.6197437", "0.6136766", "0.6132035", "0.60477877", "0.60312194", "0.6027194", "0.60256135", "0.59922755", "0.59810674", "0.5979885", "0.5968698", "0.59667194", "0.594673", "0.59356964", "0.59330183", "0.59130406", "0.5894691", "0.5877925", "0.58733386", "0.5873199", "0.5869431", "0.58605176", "0.58589983", "0.5858665", "0.5854277", "0.5844191", "0.58390373", "0.5835322", "0.5832735", "0.58215284", "0.5816", "0.5803874", "0.57994914", "0.57946557", "0.57930386", "0.579124", "0.57909834", "0.57831705", "0.5778069", "0.577127", "0.57706565", "0.5767673", "0.5762699", "0.576106", "0.57585263", "0.5757647", "0.5751433", "0.57510024", "0.5749889", "0.5746473", "0.5745049", "0.5744417", "0.5739633", "0.5736901", "0.57242876", "0.5707627", "0.5696649", "0.56815755", "0.56632394", "0.566227", "0.5659766", "0.56577206", "0.5657093", "0.5645726", "0.5640007", "0.5639524", "0.5638961", "0.5634919", "0.5626728", "0.5620365", "0.5612889", "0.5603653", "0.5602671", "0.55888677", "0.55886364", "0.55806017", "0.55775887", "0.55755633", "0.5574073", "0.5573257", "0.5566708", "0.5566354", "0.5566354", "0.5558708", "0.555746", "0.55556124" ]
0.60417974
18
Add the elements in ref_gen to an existing index.
def update_index(self, ref_gen): testing = True logging.warning('Updating index') es_insert.index(es, ref_gen, self.index_name, testing, action="update") logging.warning('Finished updating')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def build_index(self):\n self.rebuild_index()", "def insert_index(self):\n pass", "def index_add(all_index, this_index, samples, caller):\n for key, record in this_index.iteritems():\n if key not in all_index:\n all_index[key] = {}\n for sample_id in samples:\n if sample_id not in all_index[key]:\n all_index[key][sample_id] = {caller: []}\n elif caller not in all_index[key][sample_id]:\n all_index[key][sample_id][caller] = []\n # NB: If caller was run twice, will have 2 records here\n all_index[key][sample_id][caller].append(record)", "def build_index():\n pass", "def create_index():", "def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))", "def add_index(self, index):\n self.add_index_sig(IndexSignature.from_index(index))", "def _es_push_indexes(self, content):\n for c in self.es_clients:\n c.create_index(content)", "def append(self):\n target_index = get_index_from_alias(self.alias_name)\n if not target_index:\n self.replace()\n else:\n self.index_all(target_index)", "def add_index(self, idx, subproblem_shape):\n self.indices.append(int(idx))\n self.subproblem_shapes.append(subproblem_shape)", "def add_index_sig(self, index_sig):\n self.index_sigs.append(index_sig)", "def add_index_sig(self, index_sig):\n self.index_sigs.append(index_sig)", "def store_index(self, index, doc_type, source_list, init_id):\n\n bulk_actions = []\n doc_id = init_id\n\n for source in source_list:\n data_body = ElasticSearchUtility.__index_data_body(index, doc_type, doc_id, source[\"_source\"])\n bulk_actions.append(data_body)\n doc_id += 1\n\n print 'inserting - ', len(bulk_actions)\n helpers.bulk(self.es, bulk_actions)", "def index(self, index):\n index.column_protein[self.column].add((self.protein,self.protein_res))\n index.protein_domain[(self.protein.id,self.protein_res)] = (self.domain,self.domain_res)\n index.domain_structure[(self.domain.id,self.domain_res)].add((self.structure,self.structure_res))\n index.structure[(self.structure.index, self.structure_res)] = self", "def typesense_index_referral(ref, client=None):\n if not client:\n client = typesense_client()\n\n ref_document = {\n 'id': str(ref.pk),\n 'created': ref.created.timestamp(),\n 'type': ref.type.name,\n 'referring_org': ref.referring_org.name,\n 'regions': [i.name for i in ref.regions.all()],\n 'reference': ref.reference if ref.reference else '',\n 'description': ref.description if ref.description else '',\n 'address': ref.address if ref.address else '',\n 'lga': ref.lga.name if ref.lga else '',\n 'dop_triggers': [i.name for i in ref.dop_triggers.all()],\n }\n if ref.point:\n ref_document['point'] = [ref.point.x, ref.point.y]\n client.collections['referrals'].documents.upsert(ref_document)", "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def reindex(self):", "def reindex(self):", "def gene(self, idx, value):\r\n self.genes[idx] = value", "def add_to_index(self, term_, doc_id_):\n\n if(term_ not in self.inverted_index.keys()):\n postingsList=LinkedList()\n postingsList.insert_at_end(doc_id_)\n #Doc freq\n postingsList.length=postingsList.length+1\n self.inverted_index[term_]=postingsList\n# self.inverted_index[term_].start_node.term_frequency += 1\n elif(not self.is_doc_id_in_posting_list(self.inverted_index[term_],doc_id_,term_)):\n self.inverted_index[term_].insert_at_end(doc_id_)\n self.inverted_index[term_].length=self.inverted_index[term_].length+1", "def add(self, name, index = None):\n if index is None:\n while self.indexDict.has_key(self.count):\n self.count += 1\n index = self.count\n self.fieldDict[name] = index\n self.indexDict[index] = name", "def add_read_to_vec_using_ref(self, read):\n\t\ti = read.offset\n\t\tfor p in self.refmap.gap_map[read.ref_seq_id][read.offset:(read.offset+len(read.seq))]:\n\t\t\ts = self.refmap.fasta[read.ref_seq_id].seq[i]\n\t\t\tif s=='U': s='T'\n\t\t\tif s not in ('A','T','C','G'): s='N'\n\t\t\tDF.add_to_vec(self, nt=s, positions=[p], counts=[read.copy])\n\t\t\ti += 1", "def create_reference_index(target, sclass):\n # Retrieve reference & store in FileStoreID\n ref_path = sclass.unavoidable_download_method(target, 'ref.fasta')\n\n # Tool call\n command = 'samtools faidx {}'.format(sclass.docker_path(ref_path))\n sclass.docker_call(command, tool_name='samtools')\n\n # Update FileStoreID of output\n target.updateGlobalFile(sclass.ids['ref.fai'], ref_path + '.fai')", "def build_index(self):\n \n \n geoids = self.partitions.find_or_new(table='facilities_geoids')\n addresses = self.partitions.find_or_new(table='facilities_addresses')\n facilities = self.partitions.find(table='facilities')\n \n facilities.attach(addresses,'addresses')\n facilities.attach(geoids,'geoids')\n \n q = \"\"\"\n SELECT year, type, oshpd_id, facility_name, dba_city, dba_zip_code, blockgroup_gvid, tract_gvid, county_gvid\n FROM facilities\n JOIN geoids.facilities_geoids AS geoids ON geoids.facilities_id = facilities.id\n JOIN addresses.facilities_addresses AS addresses ON addresses.facilities_id = facilities.id\n \"\"\"\n \n p = self.partitions.find_or_new(table='facilities_index')\n p.clean()\n lr = self.init_log_rate()\n \n with p.inserter() as ins:\n for row in facilities.query(q):\n ins.insert(row)\n lr(str(p.identity))", "def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')", "def add_target_and_index(self, name, sig, signode):\n key = normalize_object_name(name)\n if key in self.state.document.ids:\n return\n\n signode['names'].append(name)\n signode['ids'].append(key)\n signode['first'] = not self.names\n self.indexnode['entries'].append(\n ('single', 'JSON Objects; {}'.format(name), key, '', None))", "def store(self, doc):\n if doc is None:\n return\n assert isinstance(doc, Document)\n idx = doc.features.get(self.idxfeatname())\n if idx is None:\n raise Exception(\"Cannot append document, no __idx_ID feature\")\n self.__setitem__(idx, doc)", "def add_ref(self, irsb_addr, stmt_idx, insn_addr):\n\n ref = (irsb_addr, stmt_idx, insn_addr)\n if ref not in self.refs:\n self.refs.add(ref)", "def addIndex(self, index):\r\n assert type(index)==int\r\n assert 0<=index and index < self._dataset.getSize()\r\n\r\n if not (index in self._indices):\r\n self._indices.append(index)", "def build_index(self):\n\t\tix = self.create_index()\n\t\twriter = AsyncWriter(ix)\n\n\t\tfor i, document in enumerate(self.documents):\n\t\t\tif document:\n\t\t\t\twriter.add_document(**document)\n\t\t\tupdate_progress_bar(\"Building Index\", i, len(self.documents))\n\n\t\twriter.commit(optimize=True)", "def generate_reverse_index(self):", "def _add_to_index_operations(self, which, reconstrained, what, warning):\n if warning and reconstrained.size > 0:\n # TODO: figure out which parameters have changed and only print those\n print(\"WARNING: reconstraining parameters {}\".format(self.hierarchy_name() or self.name))\n index = self._raveled_index()\n which.add(what, index)\n return index", "def _add_to_index_operations(self, which, reconstrained, what, warning):\n if warning and reconstrained.size > 0:\n # TODO: figure out which parameters have changed and only print those\n print(\"WARNING: reconstraining parameters {}\".format(self.hierarchy_name() or self.name))\n index = self._raveled_index()\n which.add(what, index)\n return index", "def contribute_to_class(self, cls):\n if self.db_index:\n new_index = (self.name,)\n if new_index not in cls._meta.indexes:\n cls._meta.indexes = tuple(list(cls._meta.indexes) + [new_index])", "def AddIndex(self, target):\n if \"w\" not in self.mode:\n raise IOError(\"FileStoreImage %s is not in write mode.\", self.urn)\n predicate = (\"index:target:%s\" % target).lower()\n data_store.DB.MultiSet(self.urn, {predicate: target}, token=self.token,\n replace=True, sync=False)", "def _update_index(self, descriptors):\n with self._model_lock:\n if self.read_only:\n raise ReadOnlyError(\"Cannot modify container attributes due \"\n \"to being in read-only mode.\")\n # tee out iterable for use in adding to index as well as hash code\n # generation.\n d_for_index, d_for_hashing = itertools.tee(descriptors, 2)\n\n self._log.debug(\"Updating descriptor index.\")\n self.descriptor_index.add_many_descriptors(d_for_index)\n\n self._log.debug(\"Generating hash codes for new descriptors\")\n prog_reporter = ProgressReporter(self._log.debug, 1.0).start()\n #: :type: collections.deque[numpy.ndarray[bool]]\n hash_vectors = collections.deque() # for updating hash_index\n for d in d_for_hashing:\n h_vec = self.lsh_functor.get_hash(d.vector())\n hash_vectors.append(h_vec)\n h_int = bit_vector_to_int_large(h_vec)\n # Get, update and reinsert hash UUID set object\n #: :type: set\n hash_uuid_set = self.hash2uuids_kvstore.get(h_int, set())\n hash_uuid_set.add(d.uuid())\n self.hash2uuids_kvstore.add(h_int, hash_uuid_set)\n prog_reporter.increment_report()\n prog_reporter.report()\n\n if self.hash_index is not None:\n self._log.debug(\"Updating hash index structure.\")\n self.hash_index.update_index(hash_vectors)", "def add(self, index):\n index_bytes = int(index).to_bytes(self._index_size,\n byteorder=\"little\", signed=False)\n self._fout.write(index_bytes)", "def addEdgeToIndex(self, edge):\n self.idx.add(self.edgecounter, (edge.getMinX(), edge.getMinY(), edge.getMaxX(), edge.getMaxY()),obj=edge)\n # print \"%d/%d -> %d/%d\" % (edge.getMinX(), edge.getMinY(), edge.getMaxX(), edge.getMaxY())\n self.edgeindex_edge[self.edgecounter] = edge\n self.edgecounter = self.edgecounter + 1", "def create_index(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def add_inds(self, var, src, dest, dest_shift = 0):\n func = self.add_inds_func[var]\n inds = self.dest_inds[var]\n func.prepared_async_call(\n func.grid, func.block, None,\n dest, int(dest_shift), inds.gpudata, src, inds.size)", "def _do_index_fields(self, doc, generator, obj, obj_weight):\n for field in self.fields + self.tags:\n # Trying to resolve field value or skip it\n # Отладочка:\n # print(field, field.resolve(obj))\n try:\n value = field.resolve(obj)\n if value is None:\n continue\n except AttributeError:\n continue\n if field.prefix:\n fvalue = field.convert(value)\n doc.add_value(field.number, fvalue)\n prefix = smart_text(field.get_tag())\n value = smart_text(value)\n generator.index_text_without_positions(value, field.weight*obj_weight, prefix)\n if prefix: # if prefixed then also index without prefix\n generator.index_text_without_positions(value, field.weight*obj_weight)", "def build_index(self, dict_pg_info, list_insert):\n flag_exit = True\n if flag_exit is False:\n self.create_new_index(dict_pg_info)\n self.insert_index(dict_pg_info, list_insert)", "def __checkFeatureIndex__(self, index, indexes):\n if index is not False:\n indexes.append(index)", "def set_index(self, idx, rel, attrs):\n\n query = 'CREATE INDEX {} ON {} ({})'.format(idx, rel, ','.join(attrs))\n\n with self.tpch_cxn.cursor() as curs:\n try:\n curs.execute(query)\n except pg.ProgrammingError as e:\n print(e)", "def _extend_index_dim(input_index, new_index, new_index_max):\n # Construct an iterator from new_index\n if isinstance(new_index, (int, np.integer)):\n it = [new_index]\n else:\n if isinstance(new_index, slice):\n # slices don't work very well with multi-dimensional circular mappings.\n it = _conv_slice_to_list(slice_obj=new_index, stop_def=new_index_max)\n else:\n it = new_index\n # Index extension\n if input_index is None:\n output = []\n for i in it:\n output.append(tuple([i]))\n return output\n else:\n output = []\n for _i in input_index:\n output_row = []\n for i in it:\n output_row.append(tuple(list(_i) + [i]))\n output.append(output_row)\n return output", "def create_index_item(doc, destination_index):\n\n action = { 'index' : { '_index' : destination_index, '_type' : doc['_type'] } }\n data = doc['_source']\n return action, data", "def _build_index(self, descriptors):\n with self._model_lock:\n if self.read_only:\n raise ReadOnlyError(\"Cannot modify container attributes due to \"\n \"being in read-only mode.\")\n\n self._log.debug(\"Clearing and adding new descriptor elements\")\n self.descriptor_index.clear()\n self.descriptor_index.add_many_descriptors(descriptors)\n\n self._log.debug(\"Generating hash codes\")\n #: :type: collections.deque[numpy.ndarray[bool]]\n hash_vectors = collections.deque()\n self.hash2uuids_kvstore.clear()\n prog_reporter = ProgressReporter(self._log.debug, 1.0).start()\n for d in self.descriptor_index:\n h_vec = self.lsh_functor.get_hash(d.vector())\n hash_vectors.append(h_vec)\n\n h_int = bit_vector_to_int_large(h_vec)\n\n # Get, update and reinsert hash UUID set object\n #: :type: set\n hash_uuid_set = self.hash2uuids_kvstore.get(h_int, set())\n hash_uuid_set.add(d.uuid())\n self.hash2uuids_kvstore.add(h_int, hash_uuid_set)\n\n prog_reporter.increment_report()\n prog_reporter.report()\n\n if self.hash_index is not None:\n self._log.debug(\"Clearing and building hash index of type %s\",\n type(self.hash_index))\n # a build is supposed to clear previous state.\n self.hash_index.build_index(hash_vectors)", "def init_index(self):\n raise NotImplementedError", "def index(self, bytes_gen: Iterator[bytes] = None, **kwargs):\n self._call_client(bytes_gen, mode='index', **kwargs)", "def generate_inverted_index(self, doc_id, tokenized_document):\n self.unique_doc_ids.add(doc_id)\n for t in tokenized_document:\n self.add_to_index(t, doc_id)", "def __call__(self, doc: Doc) -> Doc:\n for val in self._get_vals(doc):\n key = self._get_key(val)\n try:\n self._table.get(key).append(val)\n except AttributeError:\n self._table.set(key, [val])\n self._size += 1\n return super().__call__(doc)", "def setReference(self, updatedIndices):\n # self.colors[:] = [self.colors[i] for i in updatedIndices]\n self.cellData[:] = [self.cellData[i] for i in updatedIndices]", "def build_index(self):\n self.create_index()\n logger.debug(f\"Building index with {self.n_trees} trees.\")\n\n for i in range(len(self.corpus_embeddings)):\n self.index.add_item(i, self.corpus_embeddings[i])\n self.index.build(self.n_trees)", "def _replace_or_append_index(altered_index):\n for index, existing in enumerate(course_indexes):\n if all(existing[attr] == altered_index[attr] for attr in ['org', 'course', 'run']):\n course_indexes[index] = altered_index\n return\n course_indexes.append(altered_index)", "def setup(self):\n collection = self._get_collection()\n\n indices = copy(self.params[\"indices\"])\n\n if \"when\" not in indices:\n indices[\"when\"] = {}\n\n for index in indices:\n self.log(DEBUG, \"Ensuring we have index for {}\".format(index))\n\n options = indices[index]\n collection.create_index(index, *options)\n self.log(DEBUG, \"Done.\")", "def generate_inv_index(people):\n pass", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def rebuild_all_indexes():\n response = _get_lambda_client().invoke(\n FunctionName=indexer_function_name,\n InvocationType=\"Event\",\n )", "def build_index(self):\r\n date_time('Building indexes in citations table')\r\n self.cursor.execute('DROP INDEX IF EXISTS IDX_citations ;')\r\n self.cursor.execute('CREATE INDEX IDX_citations ON citations (citation);')\r\n self.conn.commit()\r\n gc.collect()", "def add_value(self, value, attr):\n self.index[value] = attr", "def index_object(idxs=None):", "def assign_index(self):\n\n i = 0\n for word in self.words:\n self.index[word] = i\n i += 1", "def reindex(self):\n raise NotImplementedError()", "def _make_index(self, fname, sents, words):\n for w in words:\n # word index for this file only\n findex = []\n\n for ixS, s in enumerate(sents):\n # iterate over each word in the sentencep\n for ixT, token in enumerate(s):\n # could use regex for substring matching instead\n if w == token.lower():\n findex.append((ixS, ixT))\n # keep track of word use frequency\n self._freq[w] += 1\n\n # grow the main index \n self._index[w][fname]= findex", "def _add_related(related, dep, all_related, index, connector=None):\n doc = {}\n doc[\"relationForm\"] = dep\n doc[\"rawName\"] = related\n doc[\"tokenIndex\"] = int(index)\n doc[\"offsetStart\"] = A.lookup[int(index)][\"start\"]\n doc[\"offsetEnd\"] = A.lookup[int(index)][\"end\"]\n doc[\"connector\"] = \"\" if connector is None else connector\n if not doc in all_related:\n all_related.append(doc)\n return all_related", "def instantiate_indexor(prefix, width):\n stdlib = py_ast.Stdlib()\n name = py_ast.CompVar(NAME_SCHEME[\"index name\"].format(prefix=prefix))\n add_name = py_ast.CompVar(f\"{prefix}_add\")\n cells = [\n py_ast.Cell(name, stdlib.register(width)),\n py_ast.Cell(add_name, stdlib.op(\"add\", width, signed=False)),\n ]\n\n init_name = py_ast.CompVar(NAME_SCHEME[\"index init\"].format(prefix=prefix))\n init_group = py_ast.Group(\n init_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 2 ** width - 1), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(init_name, \"done\")\n ),\n ],\n )\n\n upd_name = py_ast.CompVar(NAME_SCHEME[\"index update\"].format(prefix=prefix))\n upd_group = py_ast.Group(\n upd_name,\n connections=[\n py_ast.Connect(\n py_ast.ConstantPort(width, 1), py_ast.CompPort(add_name, \"left\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"out\"), py_ast.CompPort(add_name, \"right\")\n ),\n py_ast.Connect(\n py_ast.CompPort(add_name, \"out\"), py_ast.CompPort(name, \"in\")\n ),\n py_ast.Connect(\n py_ast.ConstantPort(1, 1), py_ast.CompPort(name, \"write_en\")\n ),\n py_ast.Connect(\n py_ast.CompPort(name, \"done\"), py_ast.HolePort(upd_name, \"done\")\n ),\n ],\n )\n\n return (cells, [init_group, upd_group])", "def add_index(self, name, func):\n assert name not in self.indices\n info_name = 'index:%s:%s' % (self.info['name'], name)\n info = self.store._get_info(info_name, index_for=self.info['name'])\n index = Index(self, info, func)\n self.indices[name] = index\n if IndexKeyBuilder:\n self._index_keys = IndexKeyBuilder(self.indices.values()).build\n return index", "def index_together(self, new_value):\n self._index_together = self._normalize_together(new_value)", "def index_update(tensor, indices, values):\n tensor[indices] = values\n return tensor", "def solr_index(serializer, instances):\n connection = __solr_prepare(instances)\n serialized = serializer(instances, many=True)\n data = serialized.data\n connection.add(data)\n connection.commit()", "def new_inv_ind(doc_inds, documents, inv_ind_func):\n temp = dict()\n\n # msgs here is the item dict \n for item in documents:\n # print(item)\n temp[item['id']] = item\n\n new_docs = np.array([])\n for i in doc_inds:\n new_docs = np.append(new_docs, temp[i])\n\n new_inv_ind = inv_ind_func(new_docs)\n return new_inv_ind", "def create_index(self):\n self.send_robust(self.es_index, data=self.es_meta)\n self.set_index_normal_settings()", "def indirectobject(self, index, io):\n if self.indices != '':\n self.indices += ' '\n self.indices += '%d %d' % (index, len(self.ios))\n self.ios += io\n self.objects.append(index)", "def add_doc_to_get(self,action,update_spec,action_buffer_index):\n doc = {'_index' : action['_index'],\n '_type' : action['_type'],\n '_id' : action['_id']}\n self.doc_to_get.append((doc,update_spec,action_buffer_index))", "def addAtIndex(self, index, val):\n if 0 <= index < len(self.nums):\n self.nums.insert(index, val)\n elif index == len(self.nums):\n self.nums.append(val)", "def add_read_to_vec(self, read, copy=None):\n\t\tfor i,s in enumerate(read.seq):\n\t\t\t# the i-th non-gapped position for ref_seq_id starting at offset read.offset\n\t\t\tgapped_pos = self.refmap.ungapped_to_gapped(read.ref_seq_id, read.offset + i)\n\t\t\tDF.add_to_vec(self, nt=s, positions=[gapped_pos], counts=[read.copy if copy is None else copy])", "def add(self, batch_size=10000):\n if self.N <= batch_size:\n self.index.add(self.database)\n else:\n [self.index.add(self.database[i:i + batch_size])\n for i in tqdm(range(0, len(self.database), batch_size),\n desc='[index] add')]", "def setIndex(self,index):\n if isinstance(index,str):\n index = MaterialIndex(index)\n self[0].refractiveindex = index", "def _feed(self, SeqSeqToken, token_to_index, index_to_token):\n for SeqToken in SeqSeqToken:\n for Token in SeqToken:\n if not token_to_index.has_key(Token):\n i = len(index_to_token)\n token_to_index[Token] = i\n index_to_token.insert(i, Token)", "def index(self, index):\n\n self._index = index", "def _key_generated(self, key, index):\n self.keys[self.get_address(key)] = key\n self.last_generated_index = index", "def write_index(self):\n self.Lock = True\n self.file_out.seek(self.index_offset)\n for identifier, offset in self.index.items():\n self._write_identifier(identifier)\n self._write_offset(offset)", "def document_add(index_name, doc_type, doc, doc_id=None):\n resp = es.index(index=index_name, doc_type=doc_type, body=doc, id=doc_id)\n print(resp)", "def add(self, idx):\n self.g += graph[self.visited[-1], self.not_visited[idx]]\n self.visited.append(self.not_visited.pop(idx))\n if len(self.not_visited) > 0:\n self.h = minimum_spanning_arborescence(self)\n else:\n self.h = 0", "def __generate_features_index__(self, feature_names, dictionaries):\n keys = []\n for name, dictionary in zip(feature_names, dictionaries):\n features = []\n for feature in dictionary.keys():\n if dictionary.get(feature) > self._cutoff:\n features.append((name, feature))\n self.feature_freq[name] += 1\n keys.extend(features)\n for i in range(len(keys)):\n self._features_index[keys[i]] = i\n self.features_list = tuple(keys)\n self._features_vector_length = len(keys)", "def index(self, index):\n \"\"\"\n if index is None:\n raise ValueError(\"Invalid value for `index`, must not be `None`\")\n \"\"\"\n\n self.container['index'] = index", "def index_batch(self,batch):\n pass", "def add_to_index(index,keyword,url):\n\tif keyword in index:\n\t\tif url not in index[keyword]:\n\t\t\tindex[keyword].append(url)\n\telse:\n\t\tindex[keyword] = [url]", "def index(self):\n for block_dir_relative in sorted(next(os.walk(self.data_dir))[1]):\n td_pairs = self.parse_block(block_dir_relative)\n index_id = 'index_'+block_dir_relative\n self.intermediate_indices.append(index_id)\n with ii.InvertedIndexWriter(index_id, directory=self.output_dir, \n postings_encoding=\n self.postings_encoding) as index:\n self.invert_write(td_pairs, index)\n td_pairs = None\n self.save()\n with ii.InvertedIndexWriter(self.index_name, directory=self.output_dir, \n postings_encoding=\n self.postings_encoding) as merged_index:\n with contextlib.ExitStack() as stack:\n indices = [stack.enter_context(\n ii.InvertedIndexIterator(index_id, \n directory=self.output_dir, \n postings_encoding=\n self.postings_encoding)) \n for index_id in self.intermediate_indices]\n self.merge(indices, merged_index)", "def init_index(clear=False):\n return _run_indexer_func(\"init_index\", clear)", "def add_mode_index(self) -> None:", "def addNodeToIndex(self, node):\n # self.nodeidx.add(self.nodecounter, (node.getPoint()[0], node.getPoint()[1]), obj=node)\n self.nodeidx.add(self.nodecounter, (node.getPoint()[0], node.getPoint()[1], node.getPoint()[0], node.getPoint()[1]))\n\n self.node_counter__node[self.nodecounter] = node", "def write_genre_index(self):\n for giEntry in self.genreIndex:\n # Write to file\n self.db_file.write(giEntry.get_representation())", "def install(self, index_set):\n index_set.indices = map(self.stem, index_set.indices)\n index_set.required_indices = map(self.stem, index_set.required_indices)\n self.unique_target_concepts[index_set.target_concept] = True\n for index in index_set.indices:\n if not index in self.target_concepts.get(index, []):\n self.target_concepts[index] = ([index_set.target_concept] +\n self.target_concepts.get(index, []))\n if not index_set in self.index_sets.get(index, []):\n self.index_sets[index] = [index_set] + self.index_sets.get(index, [])", "def offset_index(self, offset):\n if self.has_index:\n self.index += offset", "def handle(self, *args, **options):\n self.create_indices()\n self.bulk()", "def update_index(signum):\n cdx = redis_cli.zrange('ipfs:cdxj', 0, -1)\n cdx = ''.join(cdx)\n buff = BytesIO(cdx)\n\n # Add New Index\n res = ipfs_api.add(CustomNameStream(buff, 'index.cdxj'))\n print('Updating Index: ' + str(res))\n\n # Register with IPNS\n res = ipfs_api.name_publish(res['Hash'])\n print res", "def index(self, *index):\n # .index() resets\n s = self._clone()\n if not index:\n s._index = None\n else:\n s._index = (self._index or []) + list(index)\n return s", "def insert(self, index, p_object): # real signature unknown; restored from __doc__\n pass" ]
[ "0.6223327", "0.6165989", "0.6162208", "0.6124569", "0.5892641", "0.5827929", "0.5818835", "0.5818835", "0.5806102", "0.57331675", "0.5731806", "0.57138515", "0.57138515", "0.569752", "0.5685238", "0.5673912", "0.56449544", "0.5584928", "0.5584928", "0.55495024", "0.5501107", "0.54773134", "0.54759943", "0.54496276", "0.5429104", "0.5428337", "0.54098165", "0.5406296", "0.5398407", "0.5395309", "0.5389927", "0.5368321", "0.5368143", "0.5368143", "0.5348377", "0.5346112", "0.5334828", "0.533262", "0.53320146", "0.532851", "0.53253615", "0.5321226", "0.52903575", "0.5289711", "0.52796054", "0.52586514", "0.52477473", "0.52447665", "0.5236339", "0.5235498", "0.52281815", "0.52257967", "0.5219444", "0.5213583", "0.52013814", "0.51773065", "0.51755226", "0.5172997", "0.51660633", "0.5162047", "0.5141804", "0.51362854", "0.5125586", "0.51213986", "0.5113202", "0.5102267", "0.50999504", "0.5097956", "0.50976896", "0.5097634", "0.509544", "0.50721383", "0.5068376", "0.5061008", "0.5052031", "0.5047461", "0.5040505", "0.5033261", "0.50239795", "0.5021728", "0.50198215", "0.5014421", "0.50143445", "0.50126487", "0.50091857", "0.5005727", "0.50037116", "0.49963626", "0.49957916", "0.49899316", "0.49862984", "0.49669495", "0.4965515", "0.49586642", "0.49580833", "0.49559072", "0.4950784", "0.49498785", "0.49452364", "0.4938816" ]
0.7487727
0
Create new CSP crossword generate.
def __init__(self, crossword): self.crossword = crossword self.domains = { var: self.crossword.words.copy() for var in self.crossword.variables }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate():", "def create_word(self):\r\n\r\n template = self.word_constructions.get()\r\n word = \"\"\r\n for c in template:\r\n if c == \"v\":\r\n letter = self.get_letter(100)\r\n else:\r\n letter = self.get_letter(0)\r\n word += letter\r\n\r\n while not any(letter in self.vowels for letter in word):\r\n length = len(word)\r\n if length == 1:\r\n index = 0\r\n elif length == 2:\r\n index = random.randrange(0, 2)\r\n else:\r\n a = len(word) / 2\r\n index = a + random.randrange(-a / 2, a / 2)\r\n word = word[:index] + self.get_letter(100) + word[index + 1:]\r\n\r\n if random.random() > self.capital_chance:\r\n word = word.capitalize()\r\n self.words.append(word)\r\n self.word_count += 1\r\n return word", "def generate(name, domain, country, state, locale, email,\n keytype, keylength):\n if not domain:\n logger.error(\n \"ctl:info:generate\", \"Choose a fully-qualified domain name of the \"\n \"certificate. Must match a domain present on the system\"\n )\n domain = click.prompt(\"Domain name\")\n if not country:\n logger.info(\n \"ctl:cert:generate\",\n \"Two-character country code (ex.: 'US' or 'CA')\"\n )\n country = click.prompt(\"Country code\")\n if not state:\n state = click.prompt(\"State/Province\")\n if not locale:\n locale = click.prompt(\"City/Town/Locale\")\n if not email:\n email = click.prompt(\"Contact email [optional]\")\n try:\n cmd = client().certificates.generate\n job, data = cmd(\n name, domain, country, state, locale, email, keytype, keylength)\n handle_job(job)\n except Exception as e:\n raise CLIException(str(e))", "def Generar_Claves():\n salida=Keypp()\n savekey(salida)\n savecomp(salida)", "def generate(self):", "def create_word(self):\n return self.random.choice(CONSONANTS) + self.random.choice(VOWELS)", "def generate_for_construct(self, construct):\n\t\treturn", "def chipseq_cross_correlation():\n\n mkdir(CROSS_CORRELATION_DIR)\n \n template = \"\"\"Rscript {run_spp} -c={input_bam} -savp={srr}.pdf -out={srr}.txt\"\"\"\n\n printp(\"\"\"\\n#\\n# ChIP-seq QC\\n#\\n\"\"\")\n printp(\"\"\"# drmr:label cross-correlation\\n\"\"\")\n printp(\"\"\"\\n# drmr:job nodes=1 processors=1 memory=15g working_directory={} time_limit=4h\"\"\".format(CROSS_CORRELATION_DIR))\n\n run_spp = os.getenv(\"RUN_SPP_PATH\")\n\n for sample, info in DATA.items():\n for x in ['treatment', 'control']:\n input_bam = get_pruned_bam(sample, control = False) if x == 'treatment' else get_pruned_bam(sample, control = True)\n srr = get_srr(sample) if x == 'treatment' else get_input_control_srr(sample)\n printp(template.format(**locals()))\n\n printp(\"\"\"\\n# drmr:wait\"\"\")", "def create(ctx):\n pass", "def make(self, *args, **kwargs):\n return _TestA_swig.cleanslate_sptr_make(self, *args, **kwargs)", "def generateByWord(model, voc, maxlen=20, diversity=0.5, numwords=42):\n\n text, sym_indices, indices_sym = voc\n syms = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n \n #generated += sentence\n generated += ' '.join(sentence)\n print('----- Generating with seed: \"' + ' '.join(sentence) + '\"')\n sys.stdout.write(generated)\n\n for i in range(numwords):\n x = np.zeros((1, maxlen, len(syms)))\n for t, sym in enumerate(sentence):\n x[0, t, sym_indices[sym]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_sym = indices_sym[next_index]\n generated += ' '+next_sym\n sentence.append(next_sym)\n tmpsentence = sentence[1:]\n sentence = tmpsentence\n sys.stdout.write(next_sym+' ')\n sys.stdout.flush()\n print()", "def CreateWord(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def generateWord2(self, parameters=None):\n\n\t\t##Initial set-up\n\t\t#A syllable consists of an optional onset, a nucleus, and an optional coda\n\t\t#Sources:\n\t\t# http://en.wikipedia.org/wiki/English_phonology#Phonotactics\n\t\t# http://en.wiktionary.org/wiki/Appendix:English_pronunciation\n\t\tonsets = [\"ch\", \"pl\", \"bl\", \"cl\", \"gl\", \"pr\", \"br\", \"tr\", \"dr\", \"cr\", \"gr\", \"tw\", \"dw\", \"qu\", \"pu\",\n\t\t\t\t \"fl\", \"sl\", \"fr\", \"thr\", \"shr\", \"wh\", \"sw\",\n\t\t\t\t \"sp\", \"st\", \"sk\", \"sm\", \"sn\", \"sph\", \"spl\", \"spr\", \"str\", \"scr\", \"squ\", \"sm\"] #Plus the normal consonants\n\t\tnuclei = [\"ai\", \"ay\", \"ea\", \"ee\", \"y\", \"oa\", \"au\", \"oi\", \"oo\", \"ou\"] #Plus the normal vowels\n\t\tcodas = [\"ch\", \"lp\", \"lb\", \"lt\", \"ld\", \"lch\", \"lg\", \"lk\", \"rp\", \"rb\", \"rt\", \"rd\", \"rch\", \"rk\", \"lf\", \"lth\",\n\t\t\t\t \"lsh\", \"rf\", \"rth\", \"rs\", \"rsh\", \"lm\", \"ln\", \"rm\", \"rn\", \"rl\", \"mp\", \"nt\", \"nd\", \"nch\", \"nk\", \"mph\",\n\t\t\t\t \"mth\", \"nth\", \"ngth\", \"ft\", \"sp\", \"st\", \"sk\", \"fth\", \"pt\", \"ct\", \"kt\", \"pth\", \"ghth\", \"tz\", \"dth\",\n\t\t\t\t \"ks\", \"lpt\", \"lfth\", \"ltz\", \"lst\", \"lct\", \"lx\",\"rmth\", \"rpt\", \"rtz\", \"rst\", \"rct\",\"mpt\", \"dth\",\n\t\t\t\t \"nct\", \"nx\", \"xth\", \"xt\"] #Plus normal consonants\n\n\t\tsimpleLetterChance = 65 #percent, whether a single letter is chosen instead of an onset/nucleus/coda\n\t\tbasicLetterChance = 75 #percent, whether a simple consonant/vowel is chosen over a more rare one\n\n\t\t#Prevent unnecessary and ugly code repetition\n\n\t\t#Start the word\n\t\trepeats = 1\n\t\tif parameters and len(parameters) > 0:\n\t\t\trepeats = SharedFunctions.parseInt(parameters[0], 1, 1, 25)\n\n\t\twords = []\n\t\tfor i in xrange(0, repeats):\n\t\t\tsyllableCount = 2\n\t\t\tif random.randint(1, 100) <= 50:\n\t\t\t\tsyllableCount -= 1\n\t\t\tif random.randint(1, 100) <= 35:\n\t\t\t\tsyllableCount += 1\n\n\t\t\tword = u\"\"\n\t\t\tfor j in range(0, syllableCount):\n\t\t\t\t#In most cases, add an onset\n\t\t\t\tif random.randint(1, 100) <= 75:\n\t\t\t\t\tif random.randint(1, 100) <= simpleLetterChance:\n\t\t\t\t\t\tword += self.getBasicOrSpecialLetter(\"consonant\", basicLetterChance)\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(onsets)\n\n\t\t\t\t#Nucleus!\n\t\t\t\tif random.randint(1, 100) <= simpleLetterChance:\n\t\t\t\t\tword += self.getBasicOrSpecialLetter(\"vowel\", basicLetterChance)\n\t\t\t\telse:\n\t\t\t\t\tword += random.choice(nuclei)\n\n\t\t\t\t#Add a coda in most cases (Always add it if this is the last syllable of the word and it'd be too short otherwise)\n\t\t\t\tif (j == syllableCount - 1 and len(word) < 3) or random.randint(1, 100) <= 75:\n\t\t\t\t\tif random.randint(1, 100) <= simpleLetterChance:\n\t\t\t\t\t\tword += self.getBasicOrSpecialLetter(\"consonant\", basicLetterChance)\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(codas)\n\n\t\t\tword = word[0].upper() + word[1:]\n\t\t\twords.append(word)\n\n\t\treturn u\", \".join(words)", "def generate():\n PackCommandExecutor().pack()\n GenerateCommandExecutor().generate()", "def crossword_words(crossword: list) -> list:\n pass", "def make(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_make(self)", "def generate(model, voc, maxlen=20, diversity=0.5, numchars=100):\n\n text, char_indices, indices_char = voc\n chars = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n #print(\"Insert text to start from [min 20 chars]:\")\n #sentence = str(raw_input())\n #sentence = sentence[:maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(numchars):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()", "def generate(self):\n pass", "def cmd_generate(argv):\n description = inspect.getdoc(cmd_generate)\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-o\",\"--output\", action=\"store\", dest=\"output\",\n default=\"dhall_key\", help=\"outpuf filename\")\n args = parser.parse_args(argv)\n\n import dhall.util\n dhall.util.generate_keys(key=args.output)", "def _create_generate_input(self):\n self.keep_prob = 1.", "def vector_cross(v, w):\n res = np.cross(v, w)\n\n if len(v) == 3:\n return Vector(*res)\n else:\n return res", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self, text):\n return self.generate_from_text(text)", "def cbow(currentWord, C, contextWords, tokens, inputVectors, outputVectors,\n dataset, word2vecCostAndGradient = softmaxCostAndGradient):\n\n # Implement the continuous bag-of-words model in this function.\n # Input/Output specifications: same as the skip-gram model\n # We will not provide starter code for this function, but feel\n # free to reference the code you previously wrote for this\n # assignment!\n\n #################################################################\n # IMPLEMENTING CBOW IS EXTRA CREDIT, DERIVATIONS IN THE WRIITEN #\n # ASSIGNMENT ARE NOT! #\n #################################################################\n\n ### YOUR CODE HERE\n (N, D) = inputVectors.shape\n\n target_index = tokens[currentWord]\n\n assert len(contextWords) == 2*C\n context_indices = np.zeros(2*C, dtype = np.uint32)\n for i, w in enumerate(contextWords): context_indices[i] = tokens[w]\n\n v = inputVectors[context_indices, :]\n\n vhat = np.mean(v, axis = 0)\n\n (cost, gin, gradOut) = word2vecCostAndGradient(vhat, target_index, outputVectors, dataset)\n\n gradIn = np.zeros_like(inputVectors)\n np.add.at(gradIn, context_indices, gin / (2*C))\n\n ### END YOUR CODE\n\n return cost, gradIn, gradOut", "def create_word(char_list):", "def make(self):\n return _spacegrant_swig.binary_sink_sptr_make(self)", "def build_csp(puzzle):\n # Enter your code here and remove the pass statement below\n variables = [(a,b) for a in range(0,9) for b in range(0,9)]\n domain = {}\n for x in variables:\n if x in puzzle:\n domain[x] = {puzzle[x]}\n else:\n domain[x] = {1, 2, 3, 4, 5, 6, 7, 8, 9}\n neighbors = get_neighbors(variables)\n constraint = get_constrains(neighbors)\n\n mySudoku = csp.CSP(domain, neighbors, constraint)\n return mySudoku", "def pp_gen(source, target, env, indent):\n action = []\n nenv = env.Clone()\n cccom = nenv.subst(\"$CCCOM\").replace(\" -o \", \" \")\n for src, tgt in zip(source, target):\n action.append(\"%s -E -P %s | %s > %s\" % (cccom, src, indent, tgt))\n return action", "def create_new_doc(self, doc: Doc, min_prob: float = 0.25) -> Doc:\n\n # print(\"running on\", doc[:10])\n\n if not self.form_frequencies:\n raise RuntimeError(\n \"Cannot truecase without a dictionary of form frequencies\")\n\n tokens = []\n spaces = []\n doctext = doc.text\n for tok in doc:\n toktext = tok.text\n\n # We only change casing for words in Title or UPPER\n if tok.is_alpha and toktext[0].isupper():\n cond1 = tok.is_upper and len(toktext) > 2 # word in uppercase\n cond2 = toktext[0].isupper(\n ) and not tok.is_sent_start # titled word\n if cond1 or cond2:\n token_lc = toktext.lower()\n if token_lc in self.form_frequencies:\n frequencies = self.form_frequencies[token_lc]\n if frequencies.get(toktext, 0) < min_prob:\n alternative = sorted(\n frequencies.keys(), key=lambda x: frequencies[x])[-1]\n\n # We do not change from Title to to UPPER\n if not tok.is_title or not alternative.isupper():\n toktext = alternative\n\n tokens.append(toktext)\n\n # Spacy needs to know whether the token is followed by a space\n if tok.i < len(doc)-1:\n spaces.append(doctext[tok.idx+len(tok)].isspace())\n else:\n spaces.append(False)\n\n # Creates a new document with the tokenised words and space information\n doc2 = Doc(self.model.vocab, words=tokens, spaces=spaces) #type: ignore\n # print(\"finished with doc\", doc2[:10])\n return doc2", "def create_sudoku_csp(filename):\n csp = CSP()\n board = map(lambda x: x.strip(), open(filename, 'r'))\n\n for row in range(9):\n for col in range(9):\n if board[row][col] == '0':\n csp.add_variable('%d-%d' % (row, col), map(str, range(1, 10)))\n else:\n csp.add_variable('%d-%d' % (row, col), [ board[row][col] ])\n\n for row in range(9):\n csp.add_all_different_constraint([ '%d-%d' % (row, col) for col in range(9) ])\n for col in range(9):\n csp.add_all_different_constraint([ '%d-%d' % (row, col) for row in range(9) ])\n for box_row in range(3):\n for box_col in range(3):\n cells = []\n for row in range(box_row * 3, (box_row + 1) * 3):\n for col in range(box_col * 3, (box_col + 1) * 3):\n cells.append('%d-%d' % (row, col))\n csp.add_all_different_constraint(cells)\n\n return csp", "def newKeyGenerate():\n generate()\n return '', 204", "def cross(vec1, vec2):\n result = np.zeros(3)\n return cross_(vec1, vec2, result)", "def generate(self):\r\n raise NotImplementedError", "def generate_command(ctx, template):\n config_extension = '.py'\n template_extension = '.html'\n output_extension = '.pdf'\n\n context = Context(\n config=path.join(\n ctx.obj.get(\"config_path\"),\n template + config_extension\n ),\n template_path=path.join(\n ctx.obj.get(\"template_path\"),\n template + template_extension\n ),\n variables=ctx.obj.get(\"varibales\"),\n output_path=path.join(\n ctx.obj.get(\"output_path\"),\n template, output_extension),\n )\n\n generator = Generator(context)\n generator.execute()\n exit(0)", "def make(*args, **kwargs):\n return _TestA_swig.cleanslate_make(*args, **kwargs)", "def dctCreate(pdct, msgBufSize):\n return _dctmcc.dctCreate(pdct, msgBufSize)", "def generate(cls, **kwargs) -> str:\n pass", "def _make_cpp_trade(id, timestamp, maker_orders=None, taker_order=None):\n return TradeCpp(id, timestamp, maker_orders or deque(), taker_order)", "def crossover(v1, v2):\n idx1 = np.random.choice(v1.size, size=int(v1.size/2))\n idx2 = np.random.choice(v2.size, size=int(v2.size/2))\n data = np.array([v1.data[i] for i in idx1] +\n [v2.data[i] for i in idx2])\n idx = np.array([v1.indices[i] for i in idx1] +\n [v2.indices[i] for i in idx2])\n v3 = sp.sparse.csc_matrix((data, (idx, np.zeros(idx.shape, dtype=int))),\n shape=v1.shape)\n return v3", "def generate(self, num_words):\n\n if self.save_training_data:\n data = open(self.save_filename, 'r').read()\n self.main_hash = json.loads(data)\n\n first_word = self.__random_word(self.main_hash)\n second_word = self.__random_word(self.main_hash[first_word]) if self.main_hash[first_word] else self.__random_word(self.main_hash)\n new_book = first_word + \" \" + second_word\n\n while num_words > 0:\n third_word = self.__generate_word(first_word, second_word)\n first_word = second_word\n second_word = third_word\n\n new_book += \" \" + third_word\n num_words -= 1\n\n return new_book", "def gen_data(self,do_print=True,force_gen_inputs=False):\n\n \n if do_print:\n print\n print 'Generating corr space data, id = %s'%self.id\n \n self.post_init(force_gen_inputs=force_gen_inputs)\n self.run()\n self.post_run()", "def generate(v, vendors):\n return vendors[v].new_card()", "def make_text(markov_chains):\n\n random_num = generate_random_number(markov_chains.keys())\n\n random_text = []\n\n start_words = generate_start_words(random_num, markov_chains.keys())\n \n random_text.extend(start_words)\n\n\n for i in range(500):\n word_tuple = (random_text[-2],random_text[-1])\n next_word = add_next_word(word_tuple, markov_chains)\n random_text.append(next_word)\n\n return random_text", "def generate_cross_experiment_key(self):\n parameters = dict(\n metrics_params=self.metrics_params,\n cv_params=self.cv_params,\n target_column=self.target_column,\n id_column=self.id_column,\n do_predict_proba=self.do_predict_proba,\n prediction_formatter=self.prediction_formatter,\n train_dataset=self.train_dataset,\n test_dataset=self.test_dataset,\n holdout_dataset=self.holdout_dataset,\n cross_experiment_params=self.cross_experiment_params.copy(),\n to_csv_params=self.to_csv_params,\n )\n\n #################### Revert Aliases for Compatibility ####################\n # If any aliases were used during call to `Environment.__init__`, replace the default names\n # in `parameters` with the alias used. This ensures compatibility with Environment keys\n # made in earlier versions\n aliases_used = getattr(self, \"__hh_aliases_used\", {})\n\n # noinspection PyUnusedLocal\n def _visit(path, key, value):\n if key in aliases_used:\n key = aliases_used.pop(key)\n return (key, value)\n\n if aliases_used:\n parameters = remap(parameters, visit=_visit)\n\n #################### Make `cross_experiment_key` ####################\n self.cross_experiment_key = CrossExperimentKeyMaker(parameters)", "async def generate(self):\n lastword = \"\"\n previous = \"\"\n out = []\n\n while True:\n key = self._make_key(self.separator.join([previous, lastword]))\n word = await self.db.random(key)\n if not word or word == self.stop_word:\n break\n out.append(word)\n previous = lastword\n lastword = word\n return \" \".join(out)", "def make_word(self, offset):\n self.ret = idaapi.create_word(offset, 2)\n return self.ret", "def card_generator(mw, prob_dist_dict, gensim_model):\n\n # First things first: make sure that the word is actually in the word2vec vocab.\n # word_vectors = gensim_model.wv\n if mw not in gensim_model.wv.vocab:\n return False\n\n # Generate five categories with the weighted probabilities based on their frequency in the gold standard data.\n five_semrels_list = select_five_categories(prob_dist_dict)\n five_semrels = pd.Series(five_semrels_list)\n\n # Count the number of instances of each semrel category in that list.\n semrels_counts = dict( five_semrels.value_counts() )\n\n # Generate the semantic relations dictionary.\n srdict = sr.make_semrel_dict(mw)\n\n # Rejig five_semrels_list, if need be, to one whose labels are compatible with the cardinality of the sets available\n # in srdict.\n good_five_labels = get_good_label_distrib(srdict, semrels_counts)\n\n # Now we just populate a list with the required number of each kind of word!\n # First, initialise list to contain the five final Taboo words (yay!)\n tws = []\n\n # Go through good_five_labels and, for the labels that aren't 'collocation', access their list in the dictionary and\n # randomly select however many out of it.\n for label, count in good_five_labels.items():\n if label != 'collocation':\n tws.extend( rd.sample( tuple( srdict[label] ), count ) )\n\n # Now, take the number of collocations needed and return the most similar words according to gensim, removing the\n # words that are forbidden (i.e. the main word and also the other words that are already in tws)\n forbidden_words = set(tws + [mw])\n num_coll = good_five_labels['collocation']\n collocates = sr.get_collocations(mw, forbidden_words, gensim_model, num_collocates = num_coll)\n\n # If there are more collocates than needed, randomly select num_coll of them and add to tws. Else just add list to tws.\n if len(collocates) > num_coll:\n tws.extend( rd.sample( tuple(collocates), num_coll ) )\n else:\n tws.extend(collocates)\n\n return {mw: tws}", "def generate():\n s = random_data.random_bytes(100)\n return generate_from_string(s)", "def create(cls):\n ret_val = gxapi_cy.WrapDMPPLY._create(GXContext._get_tls_geo())\n return GXDMPPLY(ret_val)", "def generate(env):\n\n indent = find_indent()\n\n generator = lambda source, target, env, for_signature: pp_gen(source,\n target,\n env, indent)\n\n # Only handle C for now\n preprocess = Builder(generator=generator, suffix=\"_pp.c\",\n emitter=preprocess_emitter, src_suffix=\".c\")\n\n env.Append(BUILDERS={\"Preprocess\":preprocess})", "def cbow(currentWord, C, contextWords, tokens, inputVectors, outputVectors,\n dataset, word2vecCostAndGradient=softmaxCostAndGradient):\n\n cost = 0.0\n gradIn = np.zeros(inputVectors.shape)\n gradOut = np.zeros(outputVectors.shape)\n\n ### YOUR CODE HERE\n target = tokens[currentWord]\n\n contextWordVector = np.zeros(inputVectors.shape[1])\n contextWordTokens =[]\n for word in contextWords:\n contextWordTokens.append(tokens[word])\n contextWordVector += inputVectors[tokens[word],:]\n contextWordVector = contextWordVector\n \n #use the context words to predict the target/current word\n singleCost, singleGradIn, singleGradOut = word2vecCostAndGradient(contextWordVector,target,outputVectors,dataset)\n \n ### END YOUR CODE\n\n cost = singleCost\n gradOut = singleGradOut\n\n #should be able to deal with repeated context words\n for inToken in contextWordTokens:\n gradIn[inToken,:] += singleGradIn\n \n return cost, gradIn, gradOut", "def createCasaTool(mytool):\n if (type(casac.Quantity) != type): # casa 4.x\n myt = mytool()\n else: # casa 3.x\n myt = mytool.create()\n return(myt)", "def generate_text_owc(model: Dict[str, Set[str]], n: int) -> str:\n # ACCUMULATOR: a list of the randomly-generated words so far\n words_so_far = []\n # We've provided this template as a starting point; you may modify it as necessary.\n words_so_far.append(generate_new_word(model))\n for x in range(0, n-1):\n key = words_so_far[x]\n new_word = generate_next_word(model,key)\n if new_word == \".\":\n words_so_far[x] = words_so_far[x]+'.'\n new_word= generate_new_word(model)\n elif new_word == {}:\n new_word = generate_new_word(model)\n words_so_far.append(new_word)\n\n return str.join(' ', words_so_far)", "def make_words(self,lm):\n if \" \" in self.corpus[0] and \" \" in self.corpus[1]: \n print \"assuming BLICK\"\n self.corpus = [convert_to_disc(i) for i in self.corpus]\n else:\n self.disc = 1\n print \"assuming Disc\" \n if not os.path.isfile(self.f): ##check if it already exists\n print \"generating 10 million words\"\n outfile = open(self.f, \"w\")\n outfile.write(\"word,blick,ngram,Real,T,disc\\n\")\n for word in self.corpus:\n write_row_of_bigmatch(word, self.disc, outfile, lm, \"Real\", \"1\")\n while len(self.wordlist)<10000000: \n words = lm.generate(100)\n for word in words:\n if word not in self.wordlist and len(word) < 9: #keep only words less than len9\n write_row_of_bigmatch(word, self.disc, outfile, lm, \"Simulated\", \"0\")\n self.wordlist[word] = 0\n return", "async def generate(self) -> result.GeneratedCircuit:\n # TODO: There something distorted with regards to the singleton and the configuration. Also, the need to pass\n # conf here and not in init is weird.\n wrapper = api_wrapper.ApiWrapper()\n generation_result = await wrapper.call_generation_task(self._constraints)\n\n if generation_result.status != result.GenerationStatus.SUCCESS:\n raise Exception(f\"Generation failed: {generation_result.details}\")\n\n return generation_result.details", "def generate_sentence():\n markov_chain = makeMarkovDict(\"text.txt\")\n\n # Pick a random word to begin with.\n first_word = random.choice(markov_chain.keys()) # Illegall\n\n # print first_word\n # random_choice = random.randint(0, len(markov_chain.keys()))\n # index = 0\n # first_word = \"\"\n # for word in markov_chain:\n # print word\n # if index == random_choice:\n # first_word = word\n # break\n # index += 1\n\n # Based on that word, call function to chose the next word.\n # print markov_chain[first_word]\n # print word_selection(markov_chain[first_word])\n\n lenght_of_sentence = 10\n sentence = [first_word] # First word already in there\n for i in range(lenght_of_sentence):\n sentence.append(word_selection(markov_chain[sentence[i]]))\n # Sentence after loop: ['fish', 'red', 'fish', 'two', 'fish', 'red', 'fish', 'red', 'fish', 'two', 'fish']\n\n # Cap with letter and add period at the end.\n final_sentece = \" \".join(sentence) + \".\"\n return final_sentece.capitalize()", "def _create_crosslist(document_id: int, categories: List[str], paper_id: str,\n version: int, submission: Submission,\n created: datetime) -> models.Submission:\n dbs = models.Submission(type=models.Submission.CROSS_LIST,\n document_id=document_id,\n version=version)\n dbs.update_cross(submission, categories, paper_id, version, created)\n return dbs", "def crss(nds):\n cross_product = []\n nds = nodes[:, array(nds) - 1]\n for i, j, k, l in zip([2, 3, 1, 4, 6, 7, 5, 8], [4, 1, 3, 2, 8, 5, 7, 6], \n [5, 6, 8, 7, 1, 2, 4, 3], [1, 2, 4, 3, 5, 6, 8, 7]):\n p1 = nds[:, i - 1]\n p2 = nds[:, j - 1]\n p3 = nds[:, k - 1]\n p0 = nds[:, l - 1] \n u = p1 - p0\n v = p2 - p0\n w = p3 - p0 \n c = array([u[1]*v[2] - u[2]*v[1],\n u[2]*v[0] - u[0]*v[2],\n u[0]*v[1] - u[1]*v[0]])\n cross_product.append(dot(w, c)) \n \n for i in range(4, 8):\n cross_product[i] = - cross_product[i]\n \n return cross_product", "def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)", "def CreateCutFlowSvc( svcName=\"CutFlowSvc\", seq=None, addMetaDataToAllOutputFiles=True ):\n # Create a message logger\n from AthenaCommon.Logging import logging\n msg = logging.getLogger( \"Create\"+svcName )\n\n # Get the service manager\n from AthenaCommon.AppMgr import ServiceMgr as svcMgr\n\n # Determine current input stream name\n inputStreamName = GetCurrentStreamName( msg=msg )\n msg.debug(\"CreateCutFlowSvc: Have inputStreamName = %s\" % (inputStreamName) )\n\n # Create the CutFlowSvc instance\n import AthenaCommon.CfgMgr as CfgMgr\n if not hasattr(svcMgr,\"CutFlowSvc\"): svcMgr += CfgMgr.CutFlowSvc()\n svcMgr.CutFlowSvc.InputStream = inputStreamName\n\n # Make sure MetaDataSvc is ready\n if not hasattr(svcMgr,'MetaDataSvc'):\n from AthenaServices.AthenaServicesConf import MetaDataSvc\n svcMgr += MetaDataSvc( \"MetaDataSvc\" )\n\n # Add BookkeeperTools\n from EventBookkeeperTools.EventBookkeeperToolsConf import BookkeeperTool\n\n # Standard event bookkeepers\n primary_name = \"CutBookkeepers\"\n cutflowtool = BookkeeperTool(primary_name + \"Tool\",\n InputCollName = primary_name,\n OutputCollName= primary_name)\n svcMgr.ToolSvc += cutflowtool\n\n # Add tool to MetaDataSvc\n svcMgr.MetaDataSvc.MetaDataTools += [cutflowtool]\n\n # Check if we have a sequence given\n if not seq :\n # Fetch the AthAlgSeq, i.e., one of the existing master sequences where one should attach all algorithms\n seq = CfgMgr.AthSequencer(\"AthAlgSeq\")\n pass\n\n # First of all, schedule EventCounterAlg\n if not hasattr(seq,\"AllExecutedEvents\"):\n if not seq.isLocked():\n # Need to schedule it after the xAODMaker::EventInfoCnvAlg such that xAOD::EventInfo is present\n index = 0\n if hasattr( seq, \"xAODMaker::EventInfoCnvAlg\" ):\n for alg in seq:\n index += 1\n if alg.getName() == \"xAODMaker::EventInfoCnvAlg\": break\n pass\n pass\n msg.debug(\"Adding EventCounterAlg with name AllExecutedEvents to sequence with name %s at position %i\" % (seq.getName(),index))\n seq.insert( index, CfgMgr.EventCounterAlg(\"AllExecutedEvents\") )\n pass\n else :\n msg.info(\"Could NOT add EventCounterAlg with name AllExecutedEvents to locked sequence with name %s\" % seq.getName())\n pass\n pass\n\n # If wanted, add the meta-data to all output files\n if addMetaDataToAllOutputFiles:\n msg.debug(\"Adding CutBookkeepers the the output meta data of all output streams\")\n from OutputStreamAthenaPool.MultipleStreamManager import MSMgr\n # Explicitely add file metadata from input and from transient store,\n # but only the ones that we always create.\n MSMgr.AddMetaDataItemToAllStreams( \"xAOD::CutBookkeeperContainer#\"+primary_name )\n MSMgr.AddMetaDataItemToAllStreams( \"xAOD::CutBookkeeperAuxContainer#\"+primary_name+\"Aux.*\" )\n MSMgr.AddMetaDataItemToAllStreams( \"xAOD::CutBookkeeperContainer#Incomplete\"+primary_name )\n MSMgr.AddMetaDataItemToAllStreams( \"xAOD::CutBookkeeperAuxContainer#Incomplete\"+primary_name+\"Aux.*\" )\n pass\n\n return", "def generate_new_word(model: Dict[str, Set[str]]) -> str:\n all_keys = list(dict.keys(model))\n return random.choice(all_keys)", "def generate(env) -> None:\n c_file, cxx_file = SCons.Tool.createCFileBuilders(env)\n\n # C\n c_file.add_action(\".l\", LexAction)\n c_file.add_emitter(\".l\", lexEmitter)\n\n c_file.add_action(\".lex\", LexAction)\n c_file.add_emitter(\".lex\", lexEmitter)\n\n # Objective-C\n cxx_file.add_action(\".lm\", LexAction)\n cxx_file.add_emitter(\".lm\", lexEmitter)\n\n # C++\n cxx_file.add_action(\".ll\", LexAction)\n cxx_file.add_emitter(\".ll\", lexEmitter)\n\n if sys.platform == 'win32':\n # ignore the return, all we need is for the path to be added\n _ = get_lex_path(env, append_paths=True)\n\n env.SetDefault(\n LEX=env.Detect(BINS),\n LEXFLAGS=CLVar(\"\"),\n LEX_HEADER_FILE=\"\",\n LEX_TABLES_FILE=\"\",\n )\n\n if sys.platform == 'win32':\n env.SetDefault(LEXUNISTD=CLVar(\"\"))\n env[\"LEXCOM\"] = \"$LEX $LEXUNISTD $LEXFLAGS $_LEX_HEADER $_LEX_TABLES -t $SOURCES > $TARGET\"\n else:\n env[\"LEXCOM\"] = \"$LEX $LEXFLAGS $_LEX_HEADER $_LEX_TABLES -t $SOURCES > $TARGET\"\n\n env['_LEX_HEADER'] = '${LEX_HEADER_FILE and \"--header-file=\" + str(LEX_HEADER_FILE)}'\n env['_LEX_TABLES'] = '${LEX_TABLES_FILE and \"--tables-file=\" + str(LEX_TABLES_FILE)}'", "def cross(a, b):\n c1 = a[1]*b[2] - a[2]*b[1]\n c2 = a[2]*b[0] - a[0]*b[2]\n c3 = a[0]*b[1] - a[1]*b[0]\n return sp.array([c1,c2,c3])", "def calculate_construction(self, word):\r\n \r\n construction = \"\"\r\n for c in word.lower():\r\n if c in self.vowels:\r\n construction += \"v\"\r\n elif c in letters:\r\n construction += \"c\"\r\n return construction", "def create():", "def create():", "def initializeFixationCross(win):\n fixationCross = visual.TextStim(win=win, ori=0, name='fixationCross',\n text='+', font='Arial',\n pos=[0, 0], height=0.1, wrapWidth=None,\n color='white', colorSpace='rgb', opacity=1,\n depth=0.0, units = \"norm\")\n return(fixationCross)", "def generate(self):\n lastword = \"\"\n previous = \"\"\n out = []\n\n while True:\n key = self._make_key(self.separator.join([previous, lastword]))\n word = self.db.random(key)\n if not word or word == self.stop_word:\n break\n out.append(word)\n previous = lastword\n lastword = word\n return \" \".join(out)", "def create_character(c: Character) -> Character:\n c.create_character(c.dna_generator)\n return c", "def generate(env):\n\n gcc.generate(env)\n\n # Set up standard folder locations\n env.SetDefault(SDK_TOOLS = env['TOOLS_ROOT'] + '/tools')\n env.SetDefault(KCC_DIR = env['SDK_TOOLS'] + '/kcc/bin')\n\n env['KCC'] = _detect(env)\n env['AS'] = '$KCC'\n env['CC'] = '$KCC'\n env['OBJSUFFIX'] = '.o'\n env['BUILDERS']['AsmObject'] = _kccAsmBuilder", "def __init__(self):\n super().__init__(\"ccx\", 3, [])", "def genSCID():\n scid_hex = getRandomBytes(8)\n scid_hex = getSHA256Hex(scid_hex)\n scid_hex = scid_hex[0:8]\n return scid_hex", "def command_create(self):\n command = []\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['pre_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n command.extend(self.pre_chth)\n command.append(Template('@CMD_BEGIN@ $short_name').substitute(self.shell_dict))\n command.extend(self.tool_chth)\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['post_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n\n return '\\n'.join(command)", "def generate(self):\n raise NotImplementedError", "def generateWord(self, parameters=None):\n\t\t# Initial set-up\n\t\tvowels = ['a', 'e', 'i', 'o', 'u']\n\t\tspecialVowels = ['y']\n\n\t\tconsonants = ['b', 'c', 'd', 'f', 'g', 'h', 'k', 'l', 'm', 'n', 'p', 'r', 's', 't']\n\t\tspecialConsonants = ['j', 'q', 'v', 'w', 'x', 'z']\n\n\t\tnewLetterFraction = 5\n\t\tvowelChance = 50 #percent\n\n\t\t#Determine how many words we're going to have to generate\n\t\trepeats = 1\n\t\tif parameters and len(parameters) > 0:\n\t\t\trepeats = SharedFunctions.parseInt(parameters[0], 1, 1, 25)\n\n\t\twords = []\n\t\tfor i in xrange(0, repeats):\n\t\t\tword = u\"\"\n\t\t\tcurrentVowelChance = vowelChance\n\t\t\tcurrentNewLetterFraction = newLetterFraction\n\t\t\tconsonantCount = 0\n\t\t\twhile random.randint(0, currentNewLetterFraction) <= 6:\n\t\t\t\tif random.randint(1, 100) <= currentVowelChance:\n\t\t\t\t\tconsonantCount = 0\n\t\t\t\t\t#vowel. Check if we're going to add a special or normal vowel\n\t\t\t\t\tif random.randint(1, 100) <= 10:\n\t\t\t\t\t\tword += random.choice(specialVowels)\n\t\t\t\t\t\tcurrentVowelChance -= 30\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(vowels)\n\t\t\t\t\t\tcurrentVowelChance -= 20\n\t\t\t\telse:\n\t\t\t\t\tconsonantCount += 1\n\t\t\t\t\t#consonant, same deal\n\t\t\t\t\tif random.randint(1, 100) <= 25:\n\t\t\t\t\t\tword += random.choice(specialConsonants)\n\t\t\t\t\t\tcurrentVowelChance += 30\n\t\t\t\t\telse:\n\t\t\t\t\t\tword += random.choice(consonants)\n\t\t\t\t\t\tcurrentVowelChance += 20\n\t\t\t\t\tif consonantCount > 3:\n\t\t\t\t\t\tcurrentVowelChance = 100\n\t\t\t\tcurrentNewLetterFraction += 1\n\n\t\t\t#Enough letters added. Finish up\n\t\t\tword = word[0].upper() + word[1:]\n\t\t\twords.append(word)\n\n\t\t#Enough words generated, let's return the result\n\t\treturn u\", \".join(words)", "def __init__(self, dense_weight=1.0, cls_weight = 1.0, mixup_active=True, smoothing=0.1,\n classes = 1000):\n super(RelabelPooledCrossEntropy, self).__init__()\n\n\n self.CE = SoftTargetCrossEntropy()\n\n self.dense_weight = dense_weight\n self.smoothing = smoothing\n self.mixup_active = mixup_active\n self.classes = classes\n self.cls_weight = cls_weight\n assert dense_weight+cls_weight>0", "def crossword_solution() -> int:\n import os\n input_f = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'downloadable_input.txt')\n with open(input_f, 'r') as f:\n lines = f.readlines()\n n, m = (int(x) for x in lines[0].split(' '))\n r = [int(x) for x in lines[1].split(' ')]\n c = [int(x) for x in lines[2].split(' ')]\n k = int(lines[3])\n return solution(n, m, r, c, k)", "def generate(cls):\n raise NotImplementedError()", "def generate(self, src_fname: str):\n fname, _ = os.path.splitext(src_fname)\n graph_name, _ = os.path.splitext(os.path.basename(self.pb_file))\n header_fname = '{}.hpp'.format(fname)\n header_snippet = Snippet(\"get_ctx.hpp\")\n header_snippet.template_vars[\"header_guard\"] = \"_{}_H\".format(fname.upper())\n header_snippet.template_vars[\"graph_name\"] = graph_name\n header_snippet.template_vars[\"placeholders\"] = []\n\n composer = Composer()\n container = SnippetContainer(\"get_ctx.cpp\")\n container.template_vars[\"graph_name\"] = graph_name\n container.template_vars[\"placeholders\"] = []\n container.add_header('\"{}\"'.format(header_fname))\n\n print(\"Parsing {}\".format(self.pb_file))\n graph_info, layers = parse_pb(self.pb_file)\n\n # TODO better snippet construction abstraction\n for layer_id, layer in enumerate(layers, 1):\n for op_name in layer:\n op_info = graph_info[op_name]\n op_type = op_info[\"op_type\"]\n if op_type == \"Placeholder\":\n out_tname, _, _ = op_info[\"output_tensor\"][0]\n container.template_vars[\"placeholders\"].append(out_tname)\n header_snippet.template_vars[\"placeholders\"].append(out_tname)\n elif op_type == 'Const':\n for out_tname, out_dtype, _ in op_info[\"output_tensor\"]:\n pre_tname = self._prepare_tensor_name(out_tname)\n idx_fname = \"{}.idx\".format(pre_tname)\n snippet = CreateTensorIdxSnippet(self.embed_data_dir, out_tname,\n idx_fname=idx_fname,\n tf_dtype=out_dtype)\n container.add_snippet(snippet)\n idx_path = os.path.join(self.idx_dir, idx_fname)\n value = op_info[\"output_content\"][out_tname]\n self._save_data(idx_path, value, out_dtype)\n elif op_type == \"Add\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, _, _ = op_info[\"output_tensor\"][0]\n tf_dtype = op_info[\"input_tensor\"][0][1]\n snippet = AddOpSnippet(inputs, output, tf_dtype=tf_dtype)\n container.add_snippet(snippet)\n elif op_type == \"ArgMax\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, _ = op_info[\"output_tensor\"][0]\n _, in_dtype, _ = op_info[\"input_tensor\"][0]\n snippet = ArgMaxOpSnippet(inputs, output, in_dtype, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Dequantize\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, _ = op_info[\"output_tensor\"][0]\n snippet = DequantizeOpSnippet(inputs, output, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Max\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, out_shape = op_info[\"output_tensor\"][0]\n if len(out_shape) == 0: # dirty hack for uTensor\n out_shape = [1]\n snippet = MaxOpSnippet(inputs, output, out_dtype, out_shape)\n container.add_snippet(snippet)\n elif op_type == \"Min\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, out_dtype, out_shape = op_info[\"output_tensor\"][0]\n if len(out_shape) == 0: # dirty hack for uTensor\n out_shape = [1]\n snippet = MinOpSnippet(inputs, output, out_dtype, out_shape)\n container.add_snippet(snippet)\n elif op_type == \"QuantizeV2\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n out_dtype = op_info[\"output_tensor\"][0][1]\n snippet = QuantizeV2OpSnippet(inputs, outputs, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"QuantizedMatMul\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n x_dtype = op_info[\"input_tensor\"][0][1]\n w_dtype = op_info[\"input_tensor\"][1][1]\n out_dtype = op_info[\"output_tensor\"][0][1]\n snippet = QuantizedMatMulOpSnippet(inputs, outputs, x_dtype, w_dtype, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"QuantizedRelu\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n _, in_dtype, _ = op_info[\"input_tensor\"][0]\n _, qout_dtype, _ = op_info[\"output_tensor\"][0]\n out_dtypes = [t[1] for t in op_info[\"output_tensor\"][1:]]\n snippet = QuantizedReluOpSnippet(inputs, outputs, in_dtype, out_dtypes, qout_dtype)\n container.add_snippet(snippet)\n elif op_type == \"RequantizationRange\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n _, out_dtype, _ = op_info[\"output_tensor\"][0]\n snippet = RequantizationRangeOpSnippet(inputs, outputs, out_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Requantize\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n outputs = [tname for tname, _, _ in op_info[\"output_tensor\"]]\n _, qout_dtype, _ = op_info[\"output_tensor\"][0]\n _, range_dtype, _ = op_info[\"output_tensor\"][1]\n snippet = RequantizeOpSnippet(inputs, outputs, qout_dtype, range_dtype)\n container.add_snippet(snippet)\n elif op_type == \"Reshape\":\n inputs = [tname for tname, _, _ in op_info[\"input_tensor\"]]\n output, _, _ = op_info[\"output_tensor\"][0]\n snippet = ReshapeOpSnippet(inputs, output)\n container.add_snippet(snippet)\n else:\n raise ValueError(\"unsupported op type in uTensor: {}, try quantizing your graph\".format(op_type))\n if self.debug_cmt:\n comments = [\"<<< Graph Layer {}\".format(layer_id), \n \">>> Graph Layer {}\".format(layer_id+1)]\n cmt_snippet = CommentSnippet(comments)\n container.add_snippet(cmt_snippet)\n composer.add_snippet(container)\n\n print(\"Generate header file: {}\".format(header_fname))\n with open(header_fname, \"w\") as wf:\n wf.write(header_snippet.render())\n print(\"Generate source file: {}\".format(src_fname))\n with open(src_fname, \"w\") as wf:\n wf.write(composer.compose())", "def make(self):\n return _spacegrant_swig.message_debug_sptr_make(self)", "def _generate_string_seq():\n input_word_num = random.randint(1, config.MAX_INPUT_WORD_NUMBER)\n return ' '.join(resources.get_random_words(input_word_num))", "def create_program(template, func, loc=None):\n\n k_args = []\n\n func.set_cl_kernel_args()\n k_args.extend(func.cl_args_name)\n\n # Build the kernel args string.\n kernel_args = ',\\n '.join(k_args)\n \n # Get the kernel workgroup code\n workgroup_code = func.get_cl_workgroup_code()\n \n # Construct the neighbor loop code.\n neighbor_loop_code = \"for (int src_id=0; src_id<nbrs; ++src_id)\"\n\n return template%(locals())", "def cross(length = 10, width = 3, layer = 0):\n D = Device(name = 'cross')\n R = rectangle(size = (width, length), layer = layer)\n r1 = D.add_ref(R).rotate(90)\n r2 = D.add_ref(R)\n r1.center = (0,0)\n r2.center = (0,0)\n return D", "def generate_wordcloud_from_probabilities_and_words(prob, words, return_image=True, wordcloud_instance=None,\n **wordcloud_kwargs):\n\n if len(prob) != len(words):\n raise ValueError('`prob` and `words` must have the name length')\n if hasattr(prob, 'ndim') and prob.ndim != 1:\n raise ValueError('`prob` must be a 1D array or sequence')\n if hasattr(words, 'ndim') and words.ndim != 1:\n raise ValueError('`words` must be a 1D array or sequence')\n\n weights = dict(zip(words, prob))\n\n return generate_wordcloud_from_weights(weights, return_image=return_image,\n wordcloud_instance=wordcloud_instance, **wordcloud_kwargs)", "async def gpt2_generate(self, ctx, *, arg=''):\n print('Command gpt2_generate triggered')\n if gpt2.is_gpt2_downloaded(model_name=self.config['model_name']):\n generate_args = parse_generate_arguments(self.config)\n await ctx.send(\"Generating...\")\n sample = gpt2.generate(self.sess, prefix=arg, return_as_list=True, **generate_args)[0]\n await ctx.send(sample)\n else:\n await ctx.send(f\"ERROR: Model {self.config['model_name']} is not downloaded\")", "def generate_codewords(data: str, version: int, ec: EC_LEVEL) -> str:\n\n data_mode = optimal_data_mode(data)\n encoded_data = encode(data, data_mode)\n binary_data = _convert_to_binary(encoded_data, data_mode, version, ec)\n\n # Check if data is too long for specified version and EC level\n max_length = STREAM_LENGTH.get(str(version) + ec.name)\n if len(binary_data) > max_length:\n raise ValueError(\"The message to encode is too large for the specified version and EC level.\")\n\n # Retrieve constants for version and EC level\n ec_short = EC_SHORT.get(str(version) + ec.name)\n ec_long = EC_LONG.get(str(version) + ec.name)\n total_blocks = ec_short + ec_long\n\n blocks: List[str] = []\n ec_blocks: List[str] = []\n\n # Calculate block length\n short_length = len(binary_data) // total_blocks\n long_length = 0\n if ec_long > 0:\n # If there are long blocks, round short_length down to nearest multiply of 8 and add 1 byte to long_length\n short_length = short_length - (short_length % 8)\n long_length = short_length + 8\n\n # Divide the total amount of ecc codewords needed by the number of blocks to get # of ecc codewords per block\n ecc_amount = NUMBER_OF_ECC.get(str(version) + ec.name) // total_blocks\n\n for i in range(ec_short):\n # Short blocks\n # Generates the Reed-Solomon EC codewords and adds them to the ec_blocks list\n block = binary_data[short_length * i:short_length * (i + 1)] # get sublist that is 'short_length' long\n blocks.append(block)\n\n codewords = [block[j:j+8] for j in range(0, len(block), 8)]\n ec_blocks.append(rs.create_ecc_block(codewords, ecc_amount))\n\n offset = ec_short * short_length # create an offset to start reading after the data already read in short blocks\n for i in range(ec_long):\n # Long blocks\n block = binary_data[offset + long_length * i:offset + long_length * (i + 1)]\n blocks.append(block)\n\n codewords = [block[j:j + 8] for j in range(0, len(block), 8)]\n ec_blocks.append(rs.create_ecc_block(codewords, ecc_amount))\n\n final_stream = \"\"\n\n i = 0\n # The data from different blocks needs to be interleaved, meaning if we had 4 blocks, stream would look like:\n # A1, B1, C1, D1, A2, B2, C2, D2, ...\n while i < len(blocks[0]): # interleave up to length of short block (first block is always short block)\n for block in blocks:\n final_stream += block[i:i+8] # append byte, not bit\n i += 8\n\n # if there are long blocks, finish the interleaving using only data from long blocks\n # ie: A1, B1, C1, D1, C2, D2 if A, B are short and C, D are long\n while i < len(blocks[-1]): # last block is either short and we don't loop, or it is long and we loop until end.\n for block in blocks[ec_short:]: # get sublist of blocks list to only use long blocks\n final_stream += block[i:i+8]\n i += 8\n\n # Interleave the same way but with ECCs. Loop is simpler since EC blocks all have same length\n for i in range(0, ecc_amount * 8, 8): # increase i by 8 to add data byte by byte instead of bit by bit\n for block in ec_blocks:\n final_stream += block[i:i+8]\n\n # add remainder bits if needed\n final_stream += \"0\" * REMAINDER_BITS[version]\n\n return final_stream", "def generate(self) -> List[str]:\n\n self._reset()\n\n res = self._get_interactions(\n random.randint(self._min_seq_len, self._max_seq_len))\n\n self._add_guarded_first_named_alloc(res)\n\n if random.randint(0, 1):\n # Add some noise between source and destination\n # Is this helpful? Why?\n noise = self._get_interactions(\n random.randint(self._min_intervening_len,\n self._max_intervening_len))\n res.extend(noise)\n\n res.append(self._get_second_named_alloc())\n\n return stringify_sequence(res)", "def generate(self):\n\t\traise BuilderException(\"You can not use this class directly!\")", "def cross(v1, v2):\n return np.cross(v1, v2)", "def d_cross(a, b):\n d_cross = np.zeros((3, 3), dtype=float)\n for i in range(3):\n ei = np.zeros(3, dtype=float)\n ei[i] = 1.0\n d_cross[i] = np.cross(ei, b)\n return d_cross", "def generate(self):\n return self.gen.generate()", "def generate(env):\n\n # FIXME: this is already too late\n #if env.get('quiet', False):\n # quietCommandLines(env)\n\n # shortcuts\n debug = env['debug']\n machine = env['machine']\n platform = env['platform']\n x86 = env['machine'] == 'x86'\n gcc = env['platform'] in ('linux', 'freebsd', 'darwin')\n msvc = env['platform'] in ('windows', 'winddk', 'wince')\n\n # Tool\n if platform == 'winddk':\n env.Tool('winddk')\n elif platform == 'wince':\n env.Tool('wcesdk')\n else:\n env.Tool('default')\n\n # Put build output in a separate dir, which depends on the current\n # configuration. See also http://www.scons.org/wiki/AdvancedBuildExample\n build_topdir = 'build'\n build_subdir = env['platform']\n if env['dri']:\n build_subdir += \"-dri\"\n if env['llvm']:\n build_subdir += \"-llvm\"\n if env['machine'] != 'generic':\n build_subdir += '-' + env['machine']\n if env['debug']:\n build_subdir += \"-debug\"\n if env['profile']:\n build_subdir += \"-profile\"\n build_dir = os.path.join(build_topdir, build_subdir)\n # Place the .sconsign file in the build dir too, to avoid issues with\n # different scons versions building the same source file\n env['build'] = build_dir\n env.SConsignFile(os.path.join(build_dir, '.sconsign'))\n\n # C preprocessor options\n cppdefines = []\n if debug:\n cppdefines += ['DEBUG']\n else:\n cppdefines += ['NDEBUG']\n if env['profile']:\n cppdefines += ['PROFILE']\n if platform == 'windows':\n cppdefines += [\n 'WIN32',\n '_WINDOWS',\n '_UNICODE',\n 'UNICODE',\n # http://msdn2.microsoft.com/en-us/library/6dwk3a1z.aspx,\n 'WIN32_LEAN_AND_MEAN',\n 'VC_EXTRALEAN',\n '_CRT_SECURE_NO_DEPRECATE',\n ]\n if debug:\n cppdefines += ['_DEBUG']\n if platform == 'winddk':\n # Mimic WINDDK's builtin flags. See also:\n # - WINDDK's bin/makefile.new i386mk.inc for more info.\n # - buildchk_wxp_x86.log files, generated by the WINDDK's build\n # - http://alter.org.ua/docs/nt_kernel/vc8_proj/\n cppdefines += [\n ('_X86_', '1'),\n ('i386', '1'),\n 'STD_CALL',\n ('CONDITION_HANDLING', '1'),\n ('NT_INST', '0'),\n ('WIN32', '100'),\n ('_NT1X_', '100'),\n ('WINNT', '1'),\n ('_WIN32_WINNT', '0x0501'), # minimum required OS version\n ('WINVER', '0x0501'),\n ('_WIN32_IE', '0x0603'),\n ('WIN32_LEAN_AND_MEAN', '1'),\n ('DEVL', '1'),\n ('__BUILDMACHINE__', 'WinDDK'),\n ('FPO', '0'),\n ]\n if debug:\n cppdefines += [('DBG', 1)]\n if platform == 'wince':\n cppdefines += [\n '_CRT_SECURE_NO_DEPRECATE',\n '_USE_32BIT_TIME_T',\n 'UNICODE',\n '_UNICODE',\n ('UNDER_CE', '600'),\n ('_WIN32_WCE', '0x600'),\n 'WINCEOEM',\n 'WINCEINTERNAL',\n 'WIN32',\n 'STRICT',\n 'x86',\n '_X86_',\n 'INTERNATIONAL',\n ('INTLMSG_CODEPAGE', '1252'),\n ]\n if platform == 'windows':\n cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_USER']\n if platform == 'winddk':\n cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_DISPLAY']\n if platform == 'wince':\n cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_CE']\n env.Append(CPPDEFINES = cppdefines)\n\n # C preprocessor includes\n if platform == 'winddk':\n env.Append(CPPPATH = [\n env['SDK_INC_PATH'],\n env['DDK_INC_PATH'],\n env['WDM_INC_PATH'],\n env['CRT_INC_PATH'],\n ])\n\n # C compiler options\n cflags = []\n if gcc:\n if debug:\n cflags += ['-O0', '-g3']\n else:\n cflags += ['-O3', '-g3']\n if env['profile']:\n cflags += ['-pg']\n if env['machine'] == 'x86':\n cflags += [\n '-m32',\n #'-march=pentium4',\n '-mmmx', '-msse', '-msse2', # enable SIMD intrinsics\n #'-mfpmath=sse',\n ]\n if env['machine'] == 'x86_64':\n cflags += ['-m64']\n cflags += [\n '-Wall',\n '-Wmissing-prototypes',\n '-Wno-long-long',\n '-ffast-math',\n '-pedantic',\n '-fmessage-length=0', # be nice to Eclipse\n ]\n if msvc:\n # See also:\n # - http://msdn.microsoft.com/en-us/library/19z1t1wy.aspx\n # - cl /?\n if debug:\n cflags += [\n '/Od', # disable optimizations\n '/Oi', # enable intrinsic functions\n '/Oy-', # disable frame pointer omission\n ]\n else:\n cflags += [\n '/Ox', # maximum optimizations\n '/Oi', # enable intrinsic functions\n '/Ot', # favor code speed\n #'/fp:fast', # fast floating point \n ]\n if env['profile']:\n cflags += [\n '/Gh', # enable _penter hook function\n '/GH', # enable _pexit hook function\n ]\n cflags += [\n '/W3', # warning level\n #'/Wp64', # enable 64 bit porting warnings\n ]\n if env['machine'] == 'x86':\n cflags += [\n #'/QIfist', # Suppress _ftol\n #'/arch:SSE2', # use the SSE2 instructions\n ]\n if platform == 'windows':\n cflags += [\n # TODO\n ]\n if platform == 'winddk':\n cflags += [\n '/Zl', # omit default library name in .OBJ\n '/Zp8', # 8bytes struct member alignment\n '/Gy', # separate functions for linker\n '/Gm-', # disable minimal rebuild\n '/WX', # treat warnings as errors\n '/Gz', # __stdcall Calling convention\n '/GX-', # disable C++ EH\n '/GR-', # disable C++ RTTI\n '/GF', # enable read-only string pooling\n '/G6', # optimize for PPro, P-II, P-III\n '/Ze', # enable extensions\n '/Gi-', # disable incremental compilation\n '/QIfdiv-', # disable Pentium FDIV fix\n '/hotpatch', # prepares an image for hotpatching.\n #'/Z7', #enable old-style debug info\n ]\n if platform == 'wince':\n # See also C:\\WINCE600\\public\\common\\oak\\misc\\makefile.def\n cflags += [\n '/Zl', # omit default library name in .OBJ\n '/GF', # enable read-only string pooling\n '/GR-', # disable C++ RTTI\n '/GS', # enable security checks\n # Allow disabling language conformance to maintain backward compat\n #'/Zc:wchar_t-', # don't force wchar_t as native type, instead of typedef\n #'/Zc:forScope-', # don't enforce Standard C++ for scoping rules\n #'/wd4867',\n #'/wd4430',\n #'/MT',\n #'/U_MT',\n ]\n # Automatic pdb generation\n # See http://scons.tigris.org/issues/show_bug.cgi?id=1656\n env.EnsureSConsVersion(0, 98, 0)\n env['PDB'] = '${TARGET.base}.pdb'\n env.Append(CFLAGS = cflags)\n env.Append(CXXFLAGS = cflags)\n\n # Assembler options\n if gcc:\n if env['machine'] == 'x86':\n env.Append(ASFLAGS = ['-m32'])\n if env['machine'] == 'x86_64':\n env.Append(ASFLAGS = ['-m64'])\n\n # Linker options\n linkflags = []\n if gcc:\n if env['machine'] == 'x86':\n linkflags += ['-m32']\n if env['machine'] == 'x86_64':\n linkflags += ['-m64']\n if platform == 'winddk':\n # See also:\n # - http://msdn2.microsoft.com/en-us/library/y0zzbyt4.aspx\n linkflags += [\n '/merge:_PAGE=PAGE',\n '/merge:_TEXT=.text',\n '/section:INIT,d',\n '/opt:ref',\n '/opt:icf',\n '/ignore:4198,4010,4037,4039,4065,4070,4078,4087,4089,4221',\n '/incremental:no',\n '/fullbuild',\n '/release',\n '/nodefaultlib',\n '/wx',\n '/debug',\n '/debugtype:cv',\n '/version:5.1',\n '/osversion:5.1',\n '/functionpadmin:5',\n '/safeseh',\n '/pdbcompress',\n '/stack:0x40000,0x1000',\n '/driver',\n '/align:0x80',\n '/subsystem:native,5.01',\n '/base:0x10000',\n\n '/entry:DrvEnableDriver',\n ]\n if env['profile']:\n linkflags += [\n '/MAP', # http://msdn.microsoft.com/en-us/library/k7xkk3e2.aspx\n ]\n if platform == 'wince':\n linkflags += [\n '/nodefaultlib',\n #'/incremental:no',\n #'/fullbuild',\n '/entry:_DllMainCRTStartup',\n ]\n env.Append(LINKFLAGS = linkflags)\n\n # Default libs\n env.Append(LIBS = [])\n\n # Custom builders and methods\n createConvenienceLibBuilder(env)\n createCodeGenerateMethod(env)\n createInstallMethods(env)\n\n # for debugging\n #print env.Dump()", "def New(*args, **kargs):\n obj = itkCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def make(self, width=1500.0, height=1000.0):\n return self._meta.template1(width, height).encode('utf-8')", "def CreateCrtFile(keyfile, csrfile):\n crtfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'x509',\n '-req',\n '-days', '1',\n '-in', csrfile,\n '-signkey', keyfile,\n '-out', crtfile\n ]\n _RunCommand(cmd)\n return crtfile", "def do_new(argv):\n\n global PRIVATE_KEY\n\n if not PRIVATE_KEY:\n PRIVATE_KEY = wallet.get_private_key()\n else:\n get_new = yes_or_no(\"Private key already exist, do you want generate new one ?\")\n if get_new:\n PRIVATE_KEY = wallet.get_private_key()\n print(\"Private Key: '\" + PRIVATE_KEY + \"'\")\n cmpr_pub_key = wallet.get_compressed_public_key(PRIVATE_KEY, 1)\n addr = wallet.public_key_to_address(cmpr_pub_key, 0)\n open(\"data/address\", \"w\").write(addr)\n print(\"Public key was saved to 'data/cmpr_pub_key'\")" ]
[ "0.61295784", "0.57595176", "0.57502925", "0.5485153", "0.5463034", "0.5440735", "0.54096925", "0.5384929", "0.5324468", "0.52978784", "0.52702993", "0.5268702", "0.52018815", "0.51922834", "0.51729023", "0.51533055", "0.5123092", "0.5111853", "0.5078365", "0.5064279", "0.50641847", "0.5062572", "0.5062572", "0.5062572", "0.5043473", "0.49886894", "0.49865717", "0.49646503", "0.49582782", "0.49571937", "0.49483067", "0.49439582", "0.4941816", "0.49405336", "0.4920811", "0.4917935", "0.4917513", "0.4900723", "0.48898372", "0.4863611", "0.48618612", "0.48502958", "0.48488766", "0.48471552", "0.48465097", "0.48413977", "0.48380074", "0.48304406", "0.48274294", "0.48202944", "0.48153973", "0.4808604", "0.48042613", "0.47967288", "0.47934753", "0.4787032", "0.47862813", "0.47831985", "0.47820148", "0.47814682", "0.47806147", "0.4774054", "0.4765354", "0.4759443", "0.47590065", "0.47543553", "0.47528005", "0.47528005", "0.47474876", "0.4745046", "0.4737392", "0.47333512", "0.47203085", "0.47109905", "0.47078225", "0.47063226", "0.47045758", "0.47028708", "0.4697711", "0.46941003", "0.46870002", "0.4684607", "0.46845302", "0.46844363", "0.4681211", "0.4679926", "0.46788636", "0.46739182", "0.46667504", "0.46625108", "0.46567845", "0.46540433", "0.4648322", "0.4643733", "0.46331066", "0.4632202", "0.46305114", "0.4630374" ]
0.5178984
16
Return 2D array representing a given assignment.
def letter_grid(self, assignment): letters = [ [None for _ in range(self.crossword.width)] for _ in range(self.crossword.height) ] for variable, word in assignment.items(): direction = variable.direction for k in range(len(word)): i = variable.i + (k if direction == Variable.DOWN else 0) j = variable.j + (k if direction == Variable.ACROSS else 0) letters[i][j] = word[k] return letters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getArray2d(self):\n\t\treturn self.array2d", "def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n array = np.empty((2,2), dtype=dtype)\n array[0,0] = A11\n array[0,1] = A12\n array[1,0] = A21\n array[1,1] = A22\n return array", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def _build(self):\n ary = np.zeros( (3,3,3), float )\n ary[0,0,0] = ary[1,1,1] = ary[0,1,2] = ary[1,0,2] = 1.\n ary[0,2,0] = ary[0,2,2] = ary[2,0,0] = ary[2,0,2] = 0.5\n ary[1,2,1] = ary[1,2,2] = ary[2,1,1] = ary[2,1,2] = 0.5\n ary[2,2,0] = ary[2,2,1] = 0.25\n ary[2,2,2] = 0.5\n return ary", "def Solution(self):\n self.solver.check()\n m = self.solver.model()\n answer = [[0] * self.width for i in range(self.height)]\n for y in range(self.height):\n for x in range(self.width):\n answer[y][x] = int(str(m[self.grid[(x, y)]]))\n return answer", "def as_numpy_array_2D(self):\n wx = []\n wy = []\n for wp in self.waypoints:\n wx.append(wp.location.x)\n wy.append(wp.location.y)\n return np.array([wx, wy])", "def get_field_array(self):\n array_j = []\n array_i = []\n n = 3\n i = self.square_size_y / 2\n while i <= self.field_height:\n if n % 2 == 1:\n j = self.square_size_x / 2\n while j <= self.field_width:\n array_j.append((j, i))\n j += self.square_size_x\n array_i.append(array_j)\n array_j = []\n n += 1\n else:\n j = 0\n while j <= self.field_width:\n array_j.append((j, i))\n j += self.square_size_x\n array_i.append(array_j)\n array_j = []\n n += 1\n i += self.square_size_y\n self.array = array_i\n return array_i", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def create_numpy(board, my_board, cars):\n \n count = 2\n for i in range(len(board.board)):\n for n in range(len(board.board)):\n my_board[i][n] = cars[board.board[n][i]]\n return my_board", "def make_2d(x):\n return x.reshape((1, len(x)))", "def algi(C):\n return np.array([ C[0,2], C[1,2], C[1,0] ])", "def create2d(row_count, col_count, value=None):\n a = [None] * row_count\n for row in range(row_count):\n a[row] = [value] * col_count\n return a", "def to_array(self) -> np.ndarray:\n return self.A", "def toarray(self, order=None, out=None):\n d = self._process_toarray_args(order, out)\n for i, row in enumerate(self.rows):\n for pos, j in enumerate(row):\n d[i, j] = self.data[i][pos]\n return d", "def ndarray(self):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n data = self.clear().data.collect()\n\n result = np.zeros(self._shape, dtype=self._dtype)\n\n for e in data:\n result[e[0], e[1]] = e[2]\n\n return result", "def createarray(m,n):\n return( np.ones((m,2,n)) )", "def get_ndarray(name, arr_shape, arr_type):\n arr_str = get_from_db(key=name)\n return np.fromstring(arr_str, dtype=np.dtype(arr_type)) \\\n .reshape(arr_shape)", "def array(self):\n return array(self.get_values())", "def get_2Darray(file,cols='all',nrows='all',verbose='no'):\n if cols=='all':\n #Get the number of columns in the file\n for line in open(file).readlines():\n pieces=split(line)\n if len(pieces)==0: continue\n if line[0]=='#':continue\n nc=len(pieces)\n cols=list(range(nc))\n if verbose=='yes': print('cols=',cols)\n break\n else:\n nc=len(cols)\n \n lista=get_data(file,cols,nrows)\n nl=len(lista[0])\n x=zeros((nl,nc),float)\n for i in range(nc):x[:,i]=lista[i]\n return x", "def __array__(self):\n return pa.column(\"dummy\", self.data).to_pandas().values", "def get_A3():\n\n return array([[0.68557183+0.46550108j, 0.12934765-0.1622676j,\n 0.24409518+0.25335939j],\n [0.1531015 + 0.66678983j, 0.45112492+0.18206976j,\n -0.02633966+0.43477693j],\n [-0.10817164-1.16879196j, -0.18446849+0.03755672j,\n 0.06430325-0.44757084j]])", "def as_matrix(self) -> types.Matrix:", "def getShortArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def gen_flow_array(self, set_choice):\r\n chosen_set = self.split_flow[set_choice]\r\n all_poi_indices = chosen_set['poi_index'].drop_duplicates().tolist()\r\n flow_array = chosen_set['flow'].to_numpy().reshape(len(all_poi_indices), -1) # (num_loc, total_len)\r\n return all_poi_indices, flow_array", "def make_matrix():\n row, col = [int(x) for x in input().split()]\n island = [[int(x) for x in input().split()] for _ in range(row)]\n return row, col, island", "def to_array(self):\n return np.array(self.to_image())", "def to_matrix(self):\n return self.to_operator().data", "def get_array(self):\n \n if self.array == None:\n self.array = self.HN_algorithm(self._string2matrix(self.string))\n \n return self.array", "def np(self):\n return np.array([self.x, self.y])", "def board_array(self, board_fen=None):\n\n if board_fen is None:\n board_fen = self.board_fen()\n\n # initialize 8*8=64 1D numpy array\n arr = np.zeros(64)\n\n # iterate over FEN string and save the integer id of pieces.\n i = 0\n for char in board_fen:\n if char.isdigit():\n i += int(char)\n continue\n elif char == \"/\":\n continue\n else:\n arr[i] = FIGDICT.get(char)\n i += 1\n\n # reshape 1D (64) array to 2D (8,8) array\n return arr.reshape(8, 8)", "def array (self, length, width):\n\t\treturn [[0 for i in range(width)] for j in range(length)] #List comprehensions (Works like two for loops)", "def asarray(self):\n from numpy import asarray\n return asarray(self)", "def _parameter_study_to_numpy(self, data_type):\n data = []\n for set_hash, data_row in self.parameter_study.sel(data_type=data_type).groupby(_hash_coordinate_key):\n data.append(data_row.squeeze().to_array().to_numpy())\n return numpy.array(data, dtype=object)", "def __array__(self):\n return np.asarray(self.data)", "def _make_2D_array(df, data_col='Sample DNA Concentration',\n well_col='Well', rows=8, cols=12):\n # initialize empty Cp array\n cp_array = np.empty((rows, cols), dtype=object)\n\n # fill Cp array with the post-cleaned values from the right half of the\n # plate\n for record in df.iterrows():\n row = ord(str.upper(record[1][well_col][0])) - ord('A')\n col = int(record[1][well_col][1:]) - 1\n cp_array[row, col] = record[1][data_col]\n\n return cp_array", "def _file_to_matrix(self, input_field: str, depth: int) -> np.char.array:\n maze_rows = input_field.splitlines()\n maze_lists = [list(row) for row in maze_rows]\n maze = np.array(maze_lists)\n maze = np.pad(maze, pad_width=1, constant_values=EMPTY)\n\n multidim_maze = np.char.array([np.char.array(maze, itemsize=2)\n for _ in range(depth)])\n return multidim_maze", "def array_form(self):\n return tuple(self)", "def get_array(self) -> numpy.array:\r\n \r\n return self.pic_array", "def __array__(self):\n return self.to_array()", "def array_form(self):\n if self._array_form is not None:\n return self._array_form\n if not isinstance(self.args[0][0], list):\n self._array_form = self.args[0]\n return self._array_form\n size = 0\n cycles = self.args[0]\n for c in cycles:\n size += len(c)\n perm = [None]*size\n for c in cycles:\n for i in range(len(c)-1):\n perm[c[i]] = c[i+1]\n perm[c[-1]] = c[0]\n self._array_form = perm\n return perm", "def get_constraint_array(self, x):\n return np.array(self.constraint.get_g(x)).reshape((-1, 1))", "def ToMatrix(lines):\r\n #print lines\r\n arr = np.zeros([4, 4])\r\n for j in xrange(4):\r\n arr[j, :] = np.array([int(num) for num in lines[j].split(\" \")])\r\n #print np.array([int(num) for num in lines[j].split(\" \")])\r\n return arr", "def create_array( n ):", "def array(self):\n return np.array([self.w, self.x, self.y, self.z])", "def array(self):\n return np.asarray(self)", "def build(xaxis, yaxis, zaxis):\n matrix = []\n for floor in range(zaxis):\n roomnum = 1\n matrix.append([])\n for row in range(yaxis):\n matrix[floor].append([])\n for column in range(xaxis):\n matrix[floor][row].append(str(roomnum))\n roomnum += 1\n return matrix", "def as_matrix(self):\n return self._data", "def to_numpy(self):\n return numpy.vstack((self.mz, self.intensities)).T", "def to_array(self):\n return self.dungeon_map", "def cells(self):\n return ((row, col) for row in self.rows for col in self.cols)", "def config_to_array(data):\n return np.array(data[\"data\"]).reshape(data[\"rows\"], data[\"cols\"])", "def model(self):\n return numpy.array([\n [self.x1, self.y1],\n [self.x2, self.y2],\n [self.x3, self.x3]\n ])", "def string_to_array(self):\n temp_map = copy.deepcopy(self.map.replace(\" \", \"\"))\n map_list = [[a for a in row] for row in temp_map.splitlines()]\n\n # Checks that all lines are of equal length\n for line in map_list:\n for index in range(len(map_list)):\n if len(map_list[index]) == len(line):\n continue\n else:\n raise SyntaxError(\"Island geography multi-line string \"\n \"must have lines of same length.\")\n map_arr = np.array(map_list)\n\n # Checks that there are only 'O's at the edges.\n edge = []\n edge += list(map_arr[0, :])\n edge += list(map_arr[-1, :])\n edge += list(map_arr[1:-1, 0])\n edge += list(map_arr[1:-1, -1])\n if set(edge) == {'O'}:\n pass\n else:\n raise SyntaxError(\"Island geography multi-line string \"\n \"must have 'O' around the edges. \")\n\n return map_arr", "def create_nodes_arr(row,col):\r\n arr = np.ones((row, col))\r\n for i in range(row):\r\n for j in range(col):\r\n arr[i,j] = arr[i,j]*(col*i+1)+j\r\n return arr", "def get_array(self,vname):\n v=self.f.variables[vname]\n v=pylab.flipud(v)\n return v", "def get_array(self,vname):\n v=self.f.variables[vname]\n v=v[self.istep,:,:]\n v=pylab.flipud(v)\n return v", "def getLongArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def transform(self, x: Array2D) -> Array2D:", "def column(self):\n return self.reshape((self.size, 1))", "def c_():\r\n c = np.array([[0, 0], [0, 100], [100, 100], [100, 80], [20, 80],\r\n [20, 20], [100, 20], [100, 0], [0, 0]])\r\n return c", "def prepare_arrays(series: pd.Series) -> np.array:\n\n series = series.map(string_to_array)\n\n # transform the array of array into a 2d-array\n return np.stack(np.array(series.array))", "def np_image_matrix(self):\n return np.array(self.crop_image())", "def compute(self, node, input_vals):\r\n assert len(input_vals) == 2\r\n #assert len(input_vals[1].shape) ==1\r\n return input_vals[0].reshape(tuple(input_vals[1]) )", "def copy_grid (grid):\r\n c=[]\r\n for i in range(len(grid)):\r\n c.append(grid[i])\r\n return eval(str(c))", "def __array__(self):\n return self.array", "def alloc2d(x,y,iv=0):\n return [[iv for j in range(int(x))] for i in range(int(y))]", "def getStringArray2D(self) -> typing.List[typing.List[str]]:\n ...", "def get_array(self):\n return numpy.array(self._ar)", "def getArray(self, varName):\n objNames = varName.split('.')\n\n objList = self.stars\n for name in objNames:\n #objList = [obj.__getattribute__(name) for obj in objList]\n # returns NaN by default if the attribute doesn't exist\n objList = [getattr(obj,name,np.nan) for obj in objList]\n\n if (type(objList[0]) == type('')):\n # Strings shouldn't be numpy arrays\n objArray = objList\n else:\n objArray = np.array(objList)\n\n return objArray", "def get_puzzle(self):\n return [[str(self.puzzle[i][j]) for j in range(len(self.puzzle[0]))] for i in range(len(self.puzzle))]", "def assure_2d(array):\n array = np.array(array, copy=False, subok=True, ndmin=1)\n if array.ndim == 2:\n return array\n elif array.ndim == 1:\n return array[:, np.newaxis]\n else:\n raise RuntimeError(\"Array must be 1 or 2 dimensional.\")", "def readArray(input):\n data = gdal.Open(input)\n band = data.GetRasterBand(1)\n \n return band.ReadAsArray()", "def as_matrix(self, *, ink=1, paper=0):\n return tuple(\n tuple(ink if _c == self._1 else paper for _c in _row)\n for _row in self._pixels\n )", "def returnArray(self, dataSheet, lowRange, highRange):\n cells = dataSheet[lowRange:highRange]\n cells = np.transpose(cells)\n cells = np.reshape(cells, cells.size)\n values = [cell.value for cell in cells]\n return values", "def get_results_array(self):\n print('Making results array')\n r_a_grid = Grid(origin=self.array_grid_origin, far_corner=self.array_grid_far_corner, spacing=self.spacing)\n\n nx, ny, nz = r_a_grid.nsteps\n results_array = np.zeros((nx, ny, nz), dtype=tuple)\n rec_spacing = 1 / self.spacing\n\n def set_value(x, y, z):\n r_x = x + int(d_coor[0])\n r_y = y + int(d_coor[1])\n r_z = z + int(d_coor[2])\n\n if isinstance(results_array[r_x][r_y][r_z], tuple):\n results_array[r_x][r_y][r_z] += (ar[x][y][z],)\n else:\n results_array[r_x][r_y][r_z] = (ar[x][y][z],)\n\n vset_value = np.vectorize(set_value, otypes=[tuple])\n\n for i in range(self.tup_max_length):\n g = Grid.from_file(self.grid_list[i])\n d_coor = [(g.bounding_box[0][b] - r_a_grid.bounding_box[0][b])*rec_spacing for b in range(3)]\n print('D_coor:',d_coor)\n ar = fxn.grid_to_numpy(g)\n ind_ar = np.where(ar > 0)\n vset_value(ind_ar[0], ind_ar[1], ind_ar[2])\n\n self.results_array = results_array", "def AsNumpy(self):\n return np.array([\n self._center[0], self._center[1], self._width, self._length, self._angle\n ])", "def getByteArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def data_for_grouping():\n return RaggedArray(\n [[1, 0], [1, 0], [], [], [0, 0], [0, 0], [1, 0], [2, 0]])", "def asMatrix(self):\n output = np.zeros((self.size[0],self.size[1]))\n for pos in self.matrixDict:\n output[pos[0]][pos[1]] = self.matrixDict[pos]\n return output", "def get_array(input_data, modes):\n\n shares = []\n for mode in modes:\n shares.append([float(i) for i in input_data[mode]])\n\n shares_array = np.asarray(shares.pop(0))\n for share in shares:\n shares_array = np.vstack((shares_array, share))\n\n return shares_array", "def get_assignment(self):\n assignment = Assignment()\n for effect in self._sub_effects:\n if not effect._negated:\n assignment.add_pair(effect.get_variable() + \"'\", effect.get_value())\n else:\n assignment.add_pair(effect.get_variable() + \"'\", ValueFactory.none())\n return assignment", "def read(self, *args):\n return_values = [[] for _ in range(len(args)+2)]\n for row in self.iter_rows(*args):\n for return_array, value in zip(return_values, row):\n return_array.append(value)\n\n return [np.array(x) for x in return_values]", "def array(self):", "def grid(self) -> aa.Grid2D:\r\n return self.analysis.dataset.grid", "def makearray(self, *args, **kwargs):\n return _image.image_makearray(self, *args, **kwargs)", "def pyplot_to_numpy(pyplot_figure):\n pyplot_figure.canvas.draw()\n x = np.fromstring(pyplot_figure.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n x = x.reshape(pyplot_figure.canvas.get_width_height()[::-1] + (3,))\n return x", "def make_game_grid(self):\n return numpy.array([[random.choice(string.ascii_uppercase) for breath in range(self.grid_size)] for depth in\n range(self.grid_size)])", "def to_2dnp_array(X):\r\n if isinstance(X, np.ndarray):\r\n if X.ndim == 1:\r\n return X.reshape((-1, 1))\r\n if X.ndim == 2:\r\n return X\r\n if isinstance(X, Number):\r\n X = [X]\r\n X = np.array(X)\r\n X = X.reshape([-1, np.prod(X.shape) // X.shape[0]])\r\n return X", "def array(self):\n return self.get_array()", "def to_ndarray(self):\n # Create an ndarray of the right shape, filled with self.defval.\n ndshape = type(self).flatten_shape(self.shape)\n res = np.full(ndshape, self.defval, dtype=self.dtype)\n if 0 in ndshape:\n return res\n shp, qhp = type(self)._sorted_shape_qhape(tensor=self)\n # ranges is like shape, but every number d is replaced by a tuple\n # (a, a+d) where a is the sum of all the previous entries in the same\n # dim.\n ranges = []\n for dim in shp:\n prv = dim[0]\n r = [(0, prv)]\n for d in dim[1:]:\n nxt = prv + d\n r.append((prv, nxt))\n prv = nxt\n ranges.append(r)\n # Copy the elements of each sector to the right place in the result.\n for k, v in self.sects.items():\n slc = ()\n for i, qnum in enumerate(k):\n r = ranges[i][qhp[i].index(qnum)]\n slc += (slice(r[0], r[1]),)\n res[slc] = v\n return res", "def init_two_d_array(dimens, val):\n w, x = dimens\n return [[val for j in range(x)] for i in range(w)]", "def get_2d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['2d'][:, to_select, :][:, to_sort, :]", "def init_probability_2d(self):\n retval = []\n for a in range(0,28):\n retval.append([])\n for b in range(0,28):\n #first element: # of '+', second element: # of '#', third element: # of ' ', fourth element: total test elements\n retval[a].append([0,0,0,0])\n return retval", "def reshape_exercises(args):\n # Create dataframe object here\n # args contain alpha, bravo, charlie, delta, echo\n\n # exercise_array = np.empty(\n # [0, len(Worker.columns) *\n # len(args) *\n # Worker.frames_per_excersise])\n\n exercise_array = np.array([])\n\n patientgroup = args[0].catagorie\n for exercise in args: # Alpha Bravo Charlie Delta Echo\n row_selection = Worker.select_rows(\n exercise.dataframe_size(), Worker.frames_per_excersise)\n\n np_exercise = exercise.dataframe[Worker.columns].to_numpy()\n for rowindex in row_selection:\n exercise_array = np.append(\n exercise_array, np_exercise[rowindex])\n return [exercise_array, patientgroup]", "def values(self):\n arr = self.view(np.ndarray).copy()\n return arr", "def generateNumbersArr(mines, num_rows, num_cols):\n minesIn2D = [convertFrom1Dto2D(mine, num_cols) for mine in mines] \n minesSet = set(minesIn2D) #set of tuples\n\n #initialize a 2D array\n #newArr = new Array(rows).fill(0).map(() => new Array(cols).fill(0));\n newArr = np.zeros(shape=(num_rows, num_cols))\n for i in range(num_rows):\n for j in range(num_cols):\n if ((i, j) in minesSet):\n newArr[i][j]=9\n else:\n newArr[i][j]=numNeighbors(minesSet, i, j, num_cols, num_rows)\n return newArr", "def get_2d_array(logger, hdf5_data, path_attribute, expected_dim=-1):\n path = getattr(structure, path_attribute)\n dset = hdf5_data.get(path)\n default_name = {\n \"description\": path\n }\n name = str(getattr(structure, path + \"_ATTR\", default_name)[\"description\"])\n check_dataset_type(dset, name=name, location=path)\n\n check_array_ndim(dset, name=name, expected_ndim=2)\n\n if expected_dim > -1:\n check_array_dim(logger, dset, name=name, expected_dim=expected_dim, dim_idx=1)\n\n return dset", "def GetEdgeArray(self, p_int):\n ..." ]
[ "0.606624", "0.5862392", "0.55966055", "0.5584392", "0.54055727", "0.5379813", "0.5306427", "0.52944845", "0.52937496", "0.5269656", "0.52278423", "0.52027124", "0.51681507", "0.5129247", "0.5119349", "0.5079877", "0.50548553", "0.50341636", "0.50252825", "0.5023556", "0.49972185", "0.49965763", "0.49824563", "0.49816155", "0.4978913", "0.4969045", "0.49618694", "0.4954092", "0.49513388", "0.4943173", "0.49348763", "0.4934476", "0.49259582", "0.49240047", "0.49197635", "0.49016538", "0.49008855", "0.48977908", "0.48902836", "0.48852175", "0.48838323", "0.48771062", "0.48760793", "0.4871182", "0.48697287", "0.48666254", "0.48452103", "0.4841156", "0.48262298", "0.4825553", "0.48216176", "0.4821333", "0.48202023", "0.4803088", "0.47853658", "0.4784078", "0.47796878", "0.4778054", "0.47756153", "0.47720695", "0.47665414", "0.4766271", "0.47652486", "0.4761948", "0.47534427", "0.47512934", "0.47495353", "0.47477055", "0.47472382", "0.47448927", "0.47445008", "0.47383592", "0.4735684", "0.4733497", "0.4721197", "0.4719179", "0.47183076", "0.47101402", "0.4708729", "0.47069377", "0.4704041", "0.4703785", "0.47023278", "0.47015175", "0.46985385", "0.46985087", "0.46872324", "0.46792722", "0.46775812", "0.46774822", "0.46761438", "0.46736166", "0.4672285", "0.4671858", "0.46713093", "0.46690467", "0.46660912", "0.46642348" ]
0.5062015
17
Print crossword assignment to the terminal.
def print(self, assignment): letters = self.letter_grid(assignment) for i in range(self.crossword.height): for j in range(self.crossword.width): if self.crossword.structure[i][j]: print(letters[i][j] or " ", end="") else: print("█", end="") print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_word_scheme(self) -> None:\n print(\"\".join(self.word2))", "def print(self):\n\n def format_guessed_word(word):\n return ' '.join(list(word))\n\n def format_blank_word(word):\n return ' '.join(list('_' * len(word)))\n\n print('\\n' + \"Board\" + '=' * 75)\n for word in self._words:\n word_str = format_guessed_word(word) \\\n if word in self._words_guessed \\\n else format_blank_word(word)\n print(word_str)\n print(\"{}/{} words remaining\".format(self._num_words - len(self._words_guessed),self._num_words))\n print('=' * 80 + '\\n')", "def print(self):\n self.print_avec_separateur(\" \")", "def show(self):\n print('\\n'+'\\n'.join([' '.join([['.', 'O', 'X'][self.board[3*j + i]]\n for i in range(3)]) for j in range(3)]))", "def printBoard(self):\n\t\tkey = [' ', 'X', 'O']\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[0][0]] + ' | ' + key[self.state[0][1]] + ' | ' + key[self.state[0][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[1][0]] + ' | ' + key[self.state[1][1]] + ' | ' + key[self.state[1][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[2][0]] + ' | ' + key[self.state[2][1]] + ' | ' + key[self.state[2][2]])\n\t\tprint(' | |')", "def text_output(self):\n print(self.board)\n print()", "def print_colored(word):\n for char in word:\n print(c.rc() + char + c.x, end='')", "def __str__(self):\n s = 'word chain: ' + '\\n'\n for word in self._used_words[:-1]:\n s += word + ' -> '\n s += self._used_words[-1] + '\\ntarget word: ' + self._target\n return s", "def printBoard(self):", "def printBoard(self):\n print(\"\"\"\nSpace 1 Space 2 Space 3 Space 4 Space 5 Space 6\n------- ------- ------- ------- ------- -------\"\"\")\n print(\"{:>4}{:>10}{:>10}{:>10}{:>10}{:>10}\".format(str(self.space1), str(self.space2), str(self.space3), str(self.space4), str(self.space5), str(self.space6)))\n print()", "def print_board(self):\n \n # How to show empty/p1/p2\n VALS = \".XO\"\n\n print(\"\\n a b c d e f g\")\n print(\" /--+-+-+-+-+-+--\\\\\")\n for r in range(_HEIGHT - 1, -1, -1):\n s = \"%s |\" % r\n for c in range(_WIDTH):\n # Print mark next to most recent move\n mark = \">\" if self.last_play_rc == (r, c) else \" \"\n s += mark + VALS[self.board[r * 7 + c]]\n print(s + \" |\")\n print(\" \\\\--+-+-+-+-+-+--/\")\n print(\" a b c d e f g\\n\")", "def verse_2():\n print(\"Old MacDonald had a farm\")\n print(\"E-I-E-I-O\")", "def shout(word):\n print(word+\"!\")", "def print_line():\n print('+ - - - - + - - - - +'),", "def print(self):\n for word in self.words:\n print(word)", "def show( self):\n def symbol( i):\n return i<0 and (i==-2 and ' ' or '0') or chr(ord('a') + i)\n \n X, Y = np.max( self.board.positions, 0)\n # -2 to indicate outside board.\n display = np.zeros( (X+1,Y+1), dtype=int) - 2 \n for x, y in self.board.positions:\n display[x, y] = -1 # -1 to indicate unoccupied\n for p, i in self.occupation.items():\n x, y = self.board.positions[p]\n display[x, y] = i\n for x in xrange(X+1):\n s = ''.join( [ symbol( display[x, y]) for y in xrange(Y+1) ])\n print s", "def print_full_phrase(self):\n print(\"The phrase was...\")\n print(\"\\t\", end=\"\")\n for i in self.active_phrase:\n print(\"*\", end=\"\")\n print(f\"\\n\\t{self.active_phrase}\")\n print(\"\\t\", end=\"\")\n for i in self.active_phrase:\n print(\"*\", end=\"\")\n print()", "def print_instructions(self):\n\t\tprint('\\n\\n==========================================================================')\n\t\tprint('==========================================================================\\n')\n\t\tprint('Welcome to Tic Tac Toe, the came you know and love. \\nThe rules are the same ones you know and love. \\nTo make a move just type the coordinates of the spot like so - row,column. \\nNo spaces please! Lets go ahead and start! Here is a picuter of the board with some coordinates just in case!\\n')\n\t\tprint('=====================')\n\t\tprint('|| 0,0 | 0,1 | 0,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 1,0 | 1,1 | 1,2 ||')\n\t\tprint(' -----------------')\n\t\tprint('|| 2,0 | 2,1 | 2,2 ||')\n\t\tprint('=====================')\n\t\tprint('\\n==========================================================================')\n\t\tprint('==========================================================================\\n\\n')", "def print(self):\r\n self.print_avec_separateur()", "def _print(txt):\n\n # Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\n # Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.\n # Style: DIM, NORMAL, BRIGHT, RESET_ALL\n print('{0}{1}'.format(Style.BRIGHT + txt, Fore.RESET + Back.RESET + Style.RESET_ALL))", "def print_post():\n print('| | |'),", "def print_chars(self):\n for v in voc.split('\\n'):\n pair = v.split(',')\n print(pair[0], pair[1], '\\t', self.epi.xsampa_list(pair[0]))", "def print(self):\n print(\" a b c d e f g h \")\n print(\" ┼───┼───┼───┼───┼───┼───┼───┼───┼\")\n for row in range(8, 0, -1):\n pieces = \" │ \".join(self.state[row - 1])\n print(f\"{row} │ {pieces} │ {row}\")\n print(\" ┼───┼───┼───┼───┼───┼───┼───┼───┼\")\n print(\" a b c d e f g h \")", "def show_word(self):\n self.display_word = len(self.chosen_word) * \"_ \"\n Donatello.draw_word(self.display_word)\n return self.display_word", "def print(self):\r\n base = 8 * self.width\r\n print(base * \"-\")\r\n for x in range(self.height):\r\n output = \"\"\r\n for y in range(self.width):\r\n output = output + self.board[x][y] + \"|\"\r\n print(\"|\" + output)\r\n print(base * \"-\")", "def print(self) -> str:\n if self.is_unoccupied():\n return \"\"\n return str(\"%s-%s\" % (self.piece.color.name, self.piece.name.name))", "def print_trail(word):\n if len(word) == 0:\n return\n print(word, end = ' ')\n t = is_reducible(word, word_dict)\n print_trail(t[0])", "def space():\n print(' ', end='')", "def print_board(self):\n print(\n self.BOARD_TEMPLATE.format(\n *[self.COUNTER_REPRESENTATION[counter] for counter in self.board])\n )", "def print_possibility_space():\n\n print(\"Possibility space:\")\n print(\" {} unique sword images\".format(calculate_image_possibilities()))", "def print_confirmation(action):\n\tprint(Fore.YELLOW + Style.BRIGHT + action + Style.RESET_ALL + \"\\n\")", "def print_prompt(self):\n clear_term()\n\n print('Press \"w\", \"a\", \"s\", or \"d\" to move Up, Left, Down or Right',\n 'respectively.\\nEnter \"p\" or \"Q\" to quit.\\n')\n self.grid.draw_grid()\n print('\\nScore: ' + str(self.grid.score))", "def print(self):\n \n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n \n for j in range(self.width):\n \n if self.board[i][j]:\n print(\"|X\", end=\"\")\n \n else:\n print(\"| \", end=\"\")\n print(\"|\")\n \n print(\"--\" * self.width + \"-\")", "def __str__(self):\n s = \" 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\\n\"\n board = initial_board()\n count = 1\n for i in self.occupied:\n board[i[0]][i[1]] = self.occupied[i]\n space = ''\n for i in range(0, 16):\n space += ' '\n start = '---'.join(space)\n s += start+'\\n|'\n for row in range(1,16):\n for col in range(1,16):\n if use_color and (row, col) == self.action:\n s += '\\033[91m'\n if board[row][col] == 0:\n s += ' |'\n elif board[row][col] == 1:\n s += ' O |'\n else:\n s += ' X |'\n if use_color and (row, col) == self.action:\n s += '\\033[0m'\n s += '\\033[0m'\n s+=str(count)+'\\n'+start+'\\n|'\n count += 1\n\n s = s[:len(s)-1]\n s += \"\\n*****************************************************************************\"\n return s[:len(s)-1]", "def print_out():\n pass", "def print_answer(answer):\n print(\"-\" * 40)\n print(u\"Answer: \" + answer)\n print(\"-\" * 40)", "def print(self):\n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n for j in range(self.width):\n if self.board[i][j]:\n print(\"|X\", end=\"\")\n else:\n print(\"| \", end=\"\")\n print(\"|\")\n print(\"--\" * self.width + \"-\")", "def print(self):\n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n for j in range(self.width):\n if self.board[i][j]:\n print(\"|X\", end=\"\")\n else:\n print(\"| \", end=\"\")\n print(\"|\")\n print(\"--\" * self.width + \"-\")", "def print(self):\n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n for j in range(self.width):\n if self.board[i][j]:\n print(\"|X\", end=\"\")\n else:\n print(\"| \", end=\"\")\n print(\"|\")\n print(\"--\" * self.width + \"-\")", "def print(self):\n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n for j in range(self.width):\n if self.board[i][j]:\n print(\"|X\", end=\"\")\n else:\n print(\"| \", end=\"\")\n print(\"|\")\n print(\"--\" * self.width + \"-\")", "def print(self):\n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n for j in range(self.width):\n if self.board[i][j]:\n print(\"|X\", end=\"\")\n else:\n print(\"| \", end=\"\")\n print(\"|\")\n print(\"--\" * self.width + \"-\")", "def print(self):\n for i in range(self.height):\n print(\"--\" * self.width + \"-\")\n for j in range(self.width):\n if self.board[i][j]:\n print(\"|X\", end=\"\")\n else:\n print(\"| \", end=\"\")\n print(\"|\")\n print(\"--\" * self.width + \"-\")", "def print_board(self):\n\n print\n\n for row in xrange(8):\n for column in xrange(8):\n if self.squares[row][column]:\n print self.squares[row][column],; sys.stdout.write(u'')\n else:\n if self.dark_square((row, column)):\n print u' __ ',; sys.stdout.write(u'')\n else:\n print u' . ',; sys.stdout.write(u'')\n print\n print", "def print_phrase(self):\r\n for letter in self.phrase:\r\n if letter == ' ':\r\n print(letter, end=' ')\r\n else:\r\n print(letter.show(), end=' ')\r\n print('\\n')", "def review_word(word):\n\n print \"===== NEXT WORD ======\"\n print \"Is this word \"", "def print_board(self):\n self.board.print()", "def print(self):\n print(\" 0 1 2 3 4 5 6 7 8 \")\n print(\" -------------------------\")\n for x in range(0, 9):\n print(f\"{x} | \", end=\"\")\n for y in range(0, 9):\n if self.field[x][y] == -1:\n print(\"* \", end=\"\")\n else:\n print(f\"{self.field[x][y]} \", end=\"\")\n if y % 3 == 2:\n print(\"| \", end=\"\")\n print(\"\")\n if x % 3 == 2:\n print(\" -------------------------\")", "def show(self):\n for y in range(3):\n if y > 0:\n print(\"--+---+--\")\n for x in range(3):\n if x > 0:\n print('|',)\n\n # Print a space for empty (0), an O for player 1, or an X for player 2\n print(\" OX\"[self.get_square(x, y)],)\n print", "def print_column():\n print('+----+----+')", "def _print_board(board):\r\n pass", "def print_nice(self):\n print(\"- \" + str(self.__node_a.name) + \" (\" + self.__node_a.get_value_string() +\n \") -> \" + str(self.__node_b.name) + \" (\" + self.__node_b.get_value_string() + \")\")", "def _repr_term(self, c):\n return self.prefix()+str(c)", "def __str__(self):\n return \"{}\".format(self.word)", "def printc(txt):\n sys.stdout.write(txt)\n sys.stdout.write('\\n')", "def diag(self):\n print(\"This car is\", self.color, \"and has wheels\", self.wheels)", "def display(self): \n print ' ' \n print 'Connect ', NWIN, ' Board '\n print ' ' \n for r in reversed(range(self.getHeight())):\n for c in range(self.getWidth()):\n if self.cell[c][r] == BLACK:\n print '+',\n elif self.cell[c][r] == WHITE:\n print '-',\n else:\n print '.',\n print ' '\n for c in range(self.getWidth()):\n print c,\n print ' '\n print ' '", "def c_print(printme):\n print(f\"\\n\" + printme.center(80, \" \") + \"\\n\")", "def show_variables(self):\r\n\r\n variablelist = [(x_temp,self.variables[x_temp]) for x_temp in sorted(self.variables.keys())]\r\n display.noteprint(('/C/ '+labels.VARIABLES.upper(), EOL.join([x_temp[0]+BLANK\r\n +COLON+BLANK\r\n +abridge(str(x_temp[1]),40)\r\n for x_temp in variablelist])))", "def print_column():\n print('+----+----+----+----+')", "def main():\n\n args = get_args()\n words = args.phrase\n\n words = codify_phrase(words)\n display = ' '.join(words)\n\n print(display)", "def print_board(self):\n print_sp = functools.partial(print, end=' ')\n print_sp(' ')\n for i in range(BOARD_SIZE):\n print_sp(i)\n print()\n for i in range(BOARD_SIZE):\n print_sp(i)\n for j in range(BOARD_SIZE):\n e = self.board[j][i]\n print_sp('●') if e == BLACK else print_sp('○') if e == WHITE else print_sp('·')\n print()", "def _delete_print(number=1):\n\tif slogviz.config.interactive and not platform.system() == 'Windows':#Windows does not fully implement ANSI Control Characters, see README\n\t\tprint('\\x1b[2K\\x1b[1A'*number)", "def paste_to_stdout(self, text):\n _builtin_print(text)\n return self", "def print_cmd(cmd):\n padding = \" \" * 80\n sys.stdout.write(\"\\r\"+padding)\n sys.stdout.write(\"\\r\"+prompt+cmd)\n sys.stdout.flush()", "def triple_hash():\r\n board_format_1 = ' # #'\r\n print(board_format_1,'\\n',board_format_1,'\\n',board_format_1,'\\n',board_format_1,sep='')", "def emu_print(text):\n print \"%s %s\" % (EMU_PRINT_PREFIX, text)", "def print_concordance(self, word, width=80, lines=25):\n concordance_list = self.find_concordance(word, width=width)\n\n if not concordance_list:\n print(\"no matches\")\n else:\n lines = min(lines, len(concordance_list))\n print(f\"Displaying {lines} of {len(concordance_list)} matches:\")\n for i, concordance_line in enumerate(concordance_list[:lines]):\n print(concordance_line.line)", "def printStep(self):\n\n\t\tprint '\\nConfiguracao da fita: ',\n\n\t\tcount = 0\n\t\twhile count < len(self.tape):\n\t\t\tif count == self.currentPos:\n\t\t\t\tprint '_',\n\n\t\t\tprint self.tape[count],\n\t\t\tcount += 1\n\n\t\tprint '\\nEstado atual: ', self.currentState", "def shout():\n # Use echo_word in nonlocal scope\n nonlocal echo_word\n\n # Change echo_word to echo_word concatenated with '!!!'\n echo_word = echo_word + '!!!'", "def display():\n screen.addch(head[0],head[1],'x')", "def print_substep(text, style=\"\"):\n console.print(text, style=style)", "def verse_1():\n print(\"Old MACDONALD had a farm\")\n print(\"E-I-E-I-O\")", "def shout(): \n # Use echo_word in nonlocal scope\n nonlocal echo_word\n \n # Change echo_word to echo_word concatenated with '!!!'\n echo_word = echo_word+'!!!'", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word=word + '!!!'\n\n # Replace print with return\n return (shout_word)", "def print(self):\n board_string = ''\n for y in range(self.size):\n if y == 0:\n board_string += '+ '\n for x in range(self.size):\n board_string += str(x+1) + ' '\n board_string += '\\n'\n board_string += (1+3*self.size)*'-'\n board_string += '\\n'\n board_string += str(y+1)+'|'+y*' '\n \n for x in range(self.size):\n board_string += ' '\n if self.board[y,x] == HexBoard.BLUE:\n board_string += self.char_player1\n elif self.board[y,x] == HexBoard.RED:\n board_string += self.char_player2\n else: \n board_string += self.char_empty\n board_string += '\\n'\n board_string = board_string.strip()\n\n print(board_string)", "def prow(x, y=2):\n for i in range(y):\n print(\"+\", \"- \" * x, end=\"\")\n print(\"+\")", "def foo_printer(self):\n print(\"\\nHi I'm {}\".format(self.foo))", "def print_board(self):\n print(*self._board, sep=\"\\n\")", "def outro():\n print('Tento remake vytvoril mirek sko súčasť svojich školení v rokoch 2022-2023.')\n print('Originálnu hru vytvoril v roku 1986 František Fuka aka Fuxoft.')\n print('See you soon.')", "def PrintAt(self,x=0,y=0,text=''):\n self.SetCursor(x,y)\n self.Print(text)", "def print_board(self):\n\n print(\"=\" * 10)\n for row in self._board_matrix:\n for entry in row:\n if entry is None:\n print(\"_\", end=\"\")\n else:\n print(entry.length, end=\"\")\n print(\"\")\n print(\"=\" * 10)", "def crossword_words(crossword: list) -> list:\n pass", "def print_cmd_line(s, target, src, env):\n sys.stdout.write(\" Making %s...\\n\"% (' and '.join([str(x) for x in target])))", "def print_board(self):\n print(self.board)", "def print_moves(self):\n print self._current_moves\n self._current_moves = \"\"", "def print_board(self):\n to_join = [\"-\" * self.DIMENSIONS[0]]\n for row in self.grid:\n to_join.append(\"\".join([ch.letter if ch is not None else \" \" for ch in row]))\n\n print(\"\\n\".join(to_join))", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word = word + '!!!'\n\n # Replace print with return\n return shout_word", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word = word + '!!!'\n\n # Replace print with return\n return shout_word", "def display(self, assignment):\r\n # Subclasses can print in a prettier way, or display with a GUI\r\n print(assignment)", "def shout():\n # Use echo_word in nonlocal scope\n nonlocal echo_word\n\n echo_word = echo_word + '!!!'", "def printer(self, dictionary_db):\n tf = \"tf : %s\" % self.tf\n # insert the tf line before the idf\n s = self.word_info(dictionary_db).__str__().split(\"\\n\")\n r = s[:3] + [tf] + [s[-1]]\n return \"\\n\".join(r)", "def print_decomposition(self):\n if self.my_rank != 0:\n return\n\n print()\n for i in range(self.box_space.i-1, 0, -1):\n for j in range(self.box_space.k-1, 0, -1):\n print(\" \"*j, end=\"\")\n for k in range(self.box_space.k):\n print(\"{:4d}\".format(self.rank_of_box[(i, j, k)]), end=\"\")\n print()\n print()\n print()", "def draw(self):\n base_x = self.term.width // 2\n base_y = (self.term.height - len(self.OPTIONS)) // 2\n print(end=self.term.home + self.term.clear)\n print(\n self.term.move_xy(base_x - 2, base_y - 2)\n + self.term.green_bold\n + \"SNEK\"\n + self.term.normal\n )\n for index, (label, _action) in enumerate(self.OPTIONS):\n x = base_x - len(label) // 2\n y = base_y + index\n if index == self.selection_index:\n style = self.term.bold_red_reverse\n else:\n style = self.term.red\n print(self.term.move_xy(x, y) + style + label + self.term.normal)", "def display_board(self, board):\r\n print(\" 0 1 2\")\r\n for x, row in enumerate(board):\r\n sys.stdout.write(str(x))\r\n for val in row:\r\n if val == 1:\r\n sys.stdout.write(\"|X\")\r\n elif val == -1:\r\n sys.stdout.write(\"|O\")\r\n else:\r\n sys.stdout.write(\"| \")\r\n print(\"|\")", "def print_board(self):\n print('Board:')\n print('\\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in self.board]))", "def printState(self,board):\n self.printBoard(board.getBoard())\n self.printScore(board,board.getScore())", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word = word + '!!!'\n\n # Print shout_word\n print(shout_word)", "def shout(word):\n # Concatenate the strings: shout_word\n shout_word = word + '!!!'\n\n # Print shout_word\n print(shout_word)" ]
[ "0.6418134", "0.6091784", "0.60144675", "0.5868591", "0.58374834", "0.5763381", "0.5702663", "0.56456727", "0.56032985", "0.5602622", "0.5599726", "0.55596346", "0.5540012", "0.55338246", "0.5492764", "0.54739493", "0.5469291", "0.5463655", "0.54530674", "0.54021096", "0.54003537", "0.5392449", "0.53673506", "0.53634274", "0.5343367", "0.5339937", "0.5328883", "0.53235555", "0.5316245", "0.5312369", "0.53079814", "0.5304423", "0.5298113", "0.5291038", "0.52867806", "0.5282067", "0.5279835", "0.5279835", "0.5279835", "0.5279835", "0.5279835", "0.5279835", "0.527525", "0.52726996", "0.5253468", "0.5244345", "0.52408725", "0.5239788", "0.5219941", "0.5218585", "0.52172285", "0.52034616", "0.52013814", "0.5199693", "0.5193839", "0.5192833", "0.51903236", "0.518718", "0.5175286", "0.5175001", "0.5167117", "0.5160816", "0.51549757", "0.51481366", "0.51479423", "0.51396674", "0.5137272", "0.51335055", "0.51323885", "0.51306164", "0.5121025", "0.5118443", "0.51162195", "0.51129246", "0.51041615", "0.51035184", "0.51021194", "0.50914305", "0.50909597", "0.50878537", "0.50845814", "0.507151", "0.50710166", "0.506771", "0.5065688", "0.50647116", "0.50632393", "0.50632393", "0.5058219", "0.50527954", "0.50516313", "0.504937", "0.50490284", "0.5044581", "0.50423086", "0.5042134", "0.50420535", "0.50420535" ]
0.72022206
1
Save crossword assignment to an image file.
def save(self, assignment, filename): from PIL import Image, ImageDraw, ImageFont cell_size = 100 cell_border = 2 interior_size = cell_size - 2 * cell_border letters = self.letter_grid(assignment) # Create a blank canvas img = Image.new( "RGBA", (self.crossword.width * cell_size, self.crossword.height * cell_size), "black" ) font = ImageFont.truetype("assets/fonts/OpenSans-Regular.ttf", 80) draw = ImageDraw.Draw(img) for i in range(self.crossword.height): for j in range(self.crossword.width): rect = [ (j * cell_size + cell_border, i * cell_size + cell_border), ((j + 1) * cell_size - cell_border, (i + 1) * cell_size - cell_border) ] if self.crossword.structure[i][j]: draw.rectangle(rect, fill="white") if letters[i][j]: w, h = draw.textsize(letters[i][j], font=font) draw.text( (rect[0][0] + ((interior_size - w) / 2), rect[0][1] + ((interior_size - h) / 2) - 10), letters[i][j], fill="black", font=font ) img.save(filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, filename):\n print(\"Saving...\", end=\"\\r\")\n canvas = self.canvas[self.N:self.S,self.W:self.E]\n cv2.imwrite(\"./Output/\"+filename, canvas)\n print(\"Saved:\",filename)", "def save_detection(self, image):\n\t\timg = self.visualize_detection(image)\n\t\timg = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\t\tcv2.imwrite(f'{SAVE_PATH}{self.clip}{self.num_save}.jpg', img)", "def save_as(self, filename):\n opencv.imwrite(filename, self.img)", "def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")", "def save_groudtruth(im, coords, filename):\n print 'Saving ground truth ......{0}'.format(filename)\n img_draw = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))\n draw = ImageDraw.Draw(img_draw)\n for coord in coords:\n draw.polygon([(float(coord[0]), float(coord[1])), (float(coord[2]), float(coord[3])),\n (float(coord[4]), float(coord[5])), (float(coord[6]), float(coord[7]))],\n outline=\"red\", fill=\"blue\")\n img_draw = np.array(img_draw)\n img_draw = cv2.cvtColor(img_draw, cv2.COLOR_RGB2BGR)\n bname_excludepoint = filename.split('/')[-1].split('.')[0]\n image_path = '/home/yuquanjie/Documents/deep-direct-regression/result/' + bname_excludepoint + '_gt.jpg'\n cv2.imwrite(image_path, img_draw[0: img_draw.shape[0], 0: img_draw.shape[1]])", "def _save(filename, img):\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n # filename = filename+'.png'\n filename = os.path.join(OUTPUT_DIR, filename)\n # print(filename, img.shape)\n cv.imwrite(filename, img)", "def save(self):\n filename = os.path.expanduser(\"~/\" + self.name)\n print(filename)\n np.savetxt(filename + \"_left.txt\", self.central)\n np.savetxt(filename + \"_right.txt\", self.boundaries)", "def img_save(self):\n file_name, extension = return_folder_file_extension(self.img_name)[1:]\n image_name_save = \"%s_D=%s_Rs=%s_size=%s_offset=%i%s\" % (file_name, self.D, self.Rs, self.axe_X, self.offset_X+self.offset_X2, extension)\n\n if self.img2 is not None:\n self.img2.save(image_name_save)\n print(\"Saved \"+image_name_save)\n else:\n print(\"No image to save\")", "def writeImage(image, filename):\n Sky = [128,128,128]\n Building = [128,0,0]\n Pole = [192,192,128]\n Road_marking = [255,69,0]\n Road = [128,64,128]\n Pavement = [60,40,222]\n Tree = [128,128,0]\n SignSymbol = [192,128,128]\n Fence = [64,64,128]\n Car = [64,0,128]\n Pedestrian = [64,64,0]\n Bicyclist = [0,128,192]\n Unlabelled = [0,0,0]\n r = image.copy()\n g = image.copy()\n b = image.copy()\n label_colours = np.array([Sky, Building, Pole, Road_marking, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])\n for l in range(0,12):\n r[image==l] = label_colours[l,0]\n g[image==l] = label_colours[l,1]\n b[image==l] = label_colours[l,2]\n rgb = np.zeros((image.shape[0], image.shape[1], 3))\n rgb[:,:,0] = r/1.0\n rgb[:,:,1] = g/1.0\n rgb[:,:,2] = b/1.0\n im = Image.fromarray(np.uint8(rgb))\n # im.save('/Users/koheiyamamoto/Desktop/SegNet/out/' + filename)\n im.save('./out/' + filename)", "def save_image(image, file_name):\n io.imsave(file_name,image)", "def img_save(name,img):\n cv2.imwrite(name,img)", "def write_image(self, filename):\n cv2.imwrite(filename, self.image)", "def save_PPM(self, fileName, imageComment = \"\"):\n file = open(add_ext_if_needed(fileName, \"ppm\"), \"w\")\n file.write(\"P3\\n\"); \n file.write(\"# \" + imageComment + \"\\n\")\n file.write(str(self._image.width()) + \" \" + str(self._image.height()) + \"\\n\")\n file.write(\"255\\n\")\n for y in range(self._image.height()):\n for x in range(self._image.width()):\n r,g,b = self.getPixel(x,y);\n file.write(str(r) + \" \" + str(g) + \" \" + str(b) + \"\\n\")\n file.close()", "def save_image(name, image):\n image_name = 'output/' + name + '.png'\n cv2.imwrite(image_name, image)", "def save(self, filepath):\n self.drawer.flush()\n self.img.save(filepath)", "def save(self, x, y, names, path=\"\", zoom=False):\n for i in range(len(x)):\n image = self.generate(x[i], label=np.argmax(y[i]), zoom=zoom)\n image = Image.fromarray((image*255).astype(\"uint8\"))\n image.save(path + names[i] + \".png\", \"PNG\")", "def save_pca(self, filepath):\n mean_beam, principal_components, variances = self.pca()\n image_shape = np.array(self.image_shape)\n with open(filepath, 'wb') as f:\n np.save(f, image_shape)\n np.save(f, mean_beam)\n np.save(f, principal_components)\n np.save(f, variances)\n np.save(f, self.mask)", "def save_image(self, filename):\n if filename[-4:] != '.pkl':\n filename + '.pkl'\n with open(filename, 'wb') as output: # Overwrites any existing file.\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)", "def write(self, filename):\n\n self.__image.save(filename)", "def saveauto(self):\n self.inp.getedge()\n ss=ss=strftime(\"_%Y-%m-%d_%H:%M:%S\", gmtime())\n fn=os.environ['VMEWORKDIR'] +\"/WORK/phases/\"+self.name+ss+self.inp.edge+\"_\"+self.inp.inpnum+\"_\"+self.inp.ctpnum+\".ps\"\n rc=self.c1.postscript(file=fn)\n if rc is not '':\n MywError(errmsg=\"File \"+fn+\" cannot be created.\")\n print \"rc=\",rc,len(rc)\n else:\n print \"File \",fn, \" saved.\"", "def save(self, fn):\n plt.imsave(fn, self.image)", "def save(self, filename):\n \n path, name = os.path.split(filename)\n ext = name.split(\".\")[-1]\n _tkExec(self.image.write, filename, format=ext)", "def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)", "def save(self):\n img = Image.new(\"1\", (self.container.width, self.container.height))\n draw = ImageDraw.Draw(img)\n for item in self.items:\n draw.ellipse(item.box_coordinates(), fill=1)\n del draw\n img.save(\"plot.bmp\", \"bmp\")", "def saveCanvas(self,fileName):\n if self.sync==False:\n self._drawOnCanvas()\n fileName=fileName+'.bmp'\n cv.imwrite(fileName,self.canvas)", "def save_image(self):\n self.save()", "def save(self):\n pickle.dump([self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word], open(self.save_file, 'wb'), protocol=4)", "def write(filename, data):\r\n with open(filename, \"wb\") as f:\r\n pic.dump(data, f)", "def store_image(self):\n cv2.imwrite(self.__diff_filename(), self.__diff_image())", "def save_image(self, image_file):\r\n self.ensure_pyplot()\r\n command = 'plt.gcf().savefig(\"%s\")'%image_file\r\n #print 'SAVEFIG', command # dbg\r\n self.process_input_line('bookmark ipy_thisdir', store_history=False)\r\n self.process_input_line('cd -b ipy_savedir', store_history=False)\r\n self.process_input_line(command, store_history=False)\r\n self.process_input_line('cd -b ipy_thisdir', store_history=False)\r\n self.process_input_line('bookmark -d ipy_thisdir', store_history=False)\r\n self.clear_cout()", "def save(self, filename):\n self.image.save(filename, self.options.img_format)", "def _save_validation(self, img, imfile):\n val_file = join(dirname(imfile), 'OMR', 'validation', basename(imfile))\n Image.fromarray(img).save(val_file)", "def save_step_3(img_pairs, match_list, output_path=\"./output/step3\"):\n # ... your code here ...\n for i in range(len(img_pairs)):\n name1,tail1 = str.split(filenames[match_list[i][0]],\".\")\n name2,tail2 = str.split(filenames[match_list[i][1]],\".\")\n cv2.imwrite(output_path+\"/\"+name1+\"_\"+name2+\".jpg\", img_pairs[i][0])\n cv2.imwrite(output_path+\"/\"+name2+\"_\"+name1+\".jpg\", img_pairs[i][1])", "def saveOutlined(self, filepath: str):\n\n if self.outlined is None:\n print('No intermidiary image, try run find first')\n return\n self.save(filepath, self.outlined)", "def save(self, filepath):\n save_ckpt = {\n 'ae': self.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n try:\n torch.save(save_ckpt, os.path.join(filepath, 'ckpt_ae.pth'))\n except:\n print('Cannot save autoencoder.')", "def save_image(self, filename):\n raster.save_image(filename, self.image, self.metadata)", "def save_image(filename):\n subprocess(\"camera_save_image(%r)\" % filename)\n ##image = acquire_image()\n ##image.save(filename)", "def save_image(self, file_obj):\n manager = pyglet.image.get_buffer_manager()\n colorbuffer = manager.get_color_buffer()\n\n # if passed a string save by name\n if hasattr(file_obj, 'write'):\n colorbuffer.save(file=file_obj)\n else:\n colorbuffer.save(filename=file_obj)", "def saveImage(turtle, filename):\n ts = turtle.getscreen()\n tc = ts.getcanvas()\n tc.postscript(file=filename)", "def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump({'wi': self.W_input_to_hidden, 'wo': self.W_hidden_to_output}, f)", "def save_processed_image(self, filename=None):\n if filename is None:\n tag = self.get_alert_objects_list(\"_\")\n filename = os.path.join(\n config.IMAGE_SAVE_PATH, \"%s_%s_%s_%s.jpg\" %\n (time.strftime(\"%Y-%m-%d_%H.%M.%S\", time.localtime(self.timestamp)),\n self.camera_name, self.detection_model, tag))\n # print(\"Using filename %s\" % filename)\n ret = cv2.imwrite(filename, self.processed_image)\n self.processed_file = filename\n return ret", "def save(\n self,\n output_path: str,\n show_confidence: bool = True,\n ) -> None:\n image = self.draw(show_confidence=show_confidence)\n save_image(image=image, path=output_path)", "def save_to_disk(x_data, y_data, usage, output_dir='cifar10_images'):\n assert usage in ['train', 'val', 'test']\n\n # Set paths\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n for label in np.unique(y_data):\n label_path = os.path.join(output_dir, usage, str(label))\n if not os.path.exists(label_path):\n os.makedirs(label_path)\n\n for idx, img in enumerate(x_data):\n bgr_img = img[..., ::-1] # RGB -> BGR\n # label = y_data[idx][0]\n label = y_data[idx]\n img_path = os.path.join(\n output_dir, usage, str(label), 'img_{}.jpg'.format(idx)\n )\n retval = cv2.imwrite(img_path, bgr_img)\n\n assert retval, 'Problem saving image at index: {}'.format(idx)", "def save_image(start, stop, imgcount, label):\n text = \"\"\n imgfile = select_file(label)\n for p in range(imgcount):\n pxcnt = randint(start, stop)\n imgcurrent = create_image(imgfile, pxcnt)\n filename = \"img_train_\" + str(label) + \"_\" + str(p) + \"_\" + str(pxcnt) + \".png\"\n text += \"ctq/dataset/train/\" + filename + \" \" + str(label) + \"\\n\"\n imgcurrent.save(filename)\n text_file = open(imgfile + \"_train_label.txt\", \"w\")\n text_file.write(text)\n text_file.close()", "def save_image(img, path):\n cv2.imwrite(path, img)", "def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump({'wi': self.W_input_to_hidden, 'wh':\n self.W_hidden_to_hidden, 'wo': self.W_hidden_to_output}, f)", "def _write_assoc(self, cat, xname, yname, imgname):\n\t\t\n\t\t#if assoc_xname not in assoc_cat.colnames or assoc_yname not in assoc_cat.colnames:\n\t\t#\traise RuntimeError(\"I don't have columns %s or %s\" % (assoc_xname, assoc_yname))\n\t\t\n\t\tif os.path.exists(self._get_assoc_filepath(imgname)):\t\n\t\t\tlogger.warning(\"ASSOC file already exists, I will overwrite it\")\n\n\t\tlines = []\n\t\tfor (number, row) in enumerate(cat):\n\t\t\t# Seems safe(r) to not use row.index but our own number.\n\t\t\tlines.append(\"%.3f\\t%.3f\\t%i\\n\" % (row[xname], row[yname], number))\n\n\t\tlines = \"\".join(lines)\n\t\tf = open(self._get_assoc_filepath(imgname), \"w\")\n\t\tf.writelines(lines)\n\t\tf.close()\n\t\tlogger.debug(\"Wrote ASSOC file %s...\" % (self._get_assoc_filepath(imgname)))", "def save(img, path, file_name):\n\n name = os.path.join(path,file_name).replace('/', os.sep)\n\n io.imsave(name,img)", "def save(self, file):\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j]!= 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == \"Elephant\":\n elephants.append(\"(\" + str(i) + \",\" + str(j)+ \") : np.array([\"+str(L[0])+ \",\" + str(L[1])+\"])\")\n elif piece.species == \"Rhinoceros\":\n rhinos.append(\"(\"+str(i)+\",\" +str(j)+ \") : np.array([\"+str(L[0]) + \",\" + str(L[1])+\"])\")\n elif isinstance(piece, Boulder):\n boulders.append(\"(\" + str(i) + \",\" + str(j) + \")\")\n file.write(\"# King of Siam GameFile \\n\\nplayer_turn {\\n \" + self.playerTurn + \"\\n}\\n\\n\")\n file.write(\"Boulder {\")\n for k in range(len(boulders)):\n file.write(\"\\n \" + boulders[k] + \";\")\n file.write(\"\\n}\\n\\nElephant {\")\n for elt in elephants:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\\n\\nRhinoceros {\")\n for elt in rhinos:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\")\n\n file.close()", "def imageSaveOutput(image,name,number):\n FileName = name +\" \"+number\n mpimg.imsave(\"test_images_output\"+'//'+FileName,image)\n return 0;", "def save(self):\n fname, _ = getSaveFileName(self, \"Save cluster plot to\", 'cluster_plot.png')\n if fname:\n fname = str(fname) # convert from QString\n image = self.grabFrameBuffer() # defaults to withAlpha=False, makes no difference\n try:\n image.save(fname)\n except Exception as e:\n QtWidgets.QMessageBox.critical(\n self.panel, \"Error saving file\", str(e),\n QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)\n print('Cluster plot saved to %r' % fname)", "def _save_annotation(annotation, filename):\n\n pil_image = Image.fromarray(annotation.astype(dtype=np.uint8))\n '''\n with tf.io.gfile.GFile(filename, mode='w') as f:\n #with open(filename, mode='w') as f:\n print(f)\n pil_image.save(f, 'PNG')\n '''\n pil_image.save(filename)", "def saveImage(self, fileName=\"mandelbrot.frac\"):\n # Save the image as a PNG\n if fileName == \"\":\n fileName = \"mandelbrot.frac\"\n directories = fileName.split(\"/\")\n for n in directories:\n if \".frac\" in n:\n name = n.rsplit(\".\", 1)[0]\n self.img.write(f\"{name}.png\")\n print(f\"Wrote image {name}.png\")", "def save(self):\n fname = self.dir_saving+str(self.folder)+'/colours.txt'\n if not os.path.isfile(fname):\n self.file_com = open(fname, 'w')\n else:\n print 'warning this person has an objects file in its dir, I will rewrite it.'\n self.file_com = open(fname, 'w')\n\n self.file_com.write(self.all_objects['upper']+','+self.all_objects['lower'])\n # self.all_objects = {}\n self.first_click = 1\n self.file_com.close()\n self.NextVideo()\n # count = 1\n # for im_name in self.onlyfiles:\n # img = cv2.imread(self.dir2+im_name)\n # cv2.rectangle(img,(0,0),(250,50),(255,255,255),-1)\n # cv2.putText(img,'frame : '+str(count),(10,30), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,0),2)\n # img = self.add_objects(img)\n # cv2.imwrite(self.dir_saving+str(self.folder)+'/obj_images/'+im_name,img)\n # count+=1\n self.clear", "def save_png(self, filename):\n post_script = self.canvas.postscript().encode()\n img = Image.open(io.BytesIO(post_script))\n img.save(filename, format=\"PNG\")", "def save_twocolor(simsAB, iset, fname, force=False):\n data_panels = np.array( map(lambda x: x.as_numpy_array(), iset.get_raw_data(0) ))\n data = np.array( [ simsAB[0], simsAB[1],\n np.array(simsAB[0]) + np.array(simsAB[1]),\n data_panels])\n if os.path.exists(fname) and not force:\n print(\"%s exists!\" % fname)\n return\n\n with h5py.File(fname,\"w\") as h5:\n h5.create_dataset(\"sim64_d9114_images\", data=data)", "def save_step_4(imgs, output_path=\"./output/step4\"):\n # ... your code here ...\n cv2.imwrite(output_path+\"/output.jpg\", imgs)", "def save_crops(self, workspace):\n objects_name = self.objects_name.value\n objects = workspace.object_set.get_objects(objects_name)\n bit_depth = self.bit_depth.value\n if self.input_type == IF_IMAGE:\n image_name = self.image_name.value\n image = workspace.image_set.get_image(image_name)\n pixels = image.pixel_data\n elif self.input_type == IF_OBJECTS:\n obj_name = self.input_object_name.value\n inp_obj = workspace.object_set.get_objects(obj_name)\n pixels = inp_obj.get_segmented()\n else:\n raise (\"invalid choice of input\")\n\n filename = self.get_filename(workspace)\n object_extension = self.object_extension.value\n if filename is None: # failed overwrite check\n return\n\n slices = ndi.find_objects(objects.segmented)\n slices, labels = zip(\n *[(s, label) for label, s in enumerate(slices) if s is not None]\n )\n\n ext_slices = [\n self._extend_slice_touple(\n sl, object_extension, [pixels.shape[0], pixels.shape[1]]\n )\n for sl in slices\n ]\n out_folder = os.path.dirname(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n # the stack for imctools needs to be cxy, while it is xyc in cp\n if len(pixels.shape) == 2:\n stack = pixels.reshape([1] + list(pixels.shape))\n else:\n stack = np.rollaxis(pixels, 2, 0)\n\n # fix the dtype\n if bit_depth == BIT_DEPTH_8:\n stack = skimage.util.img_as_ubyte(stack)\n elif bit_depth == BIT_DEPTH_16:\n stack = skimage.util.img_as_uint(stack)\n elif bit_depth == BIT_DEPTH_FLOAT:\n stack = skimage.util.img_as_float(stack).astype(np.float32)\n\n self._save_object_stack(out_folder, basename, stack, ext_slices, labels)\n self.save_filename_measurements(workspace)\n if self.show_window:\n workspace.display_data.wrote_image = True", "def save_to_file(filename, R_list):\n k = len(R_list)\n with open(filename+'_board', mode='w') as file:\n file.write('There are totally {} candidates\\n'.format(k))\n for i in range(k):\n file.write('# {}\\n'.format(i+1))\n file.write(str(get_board(R_list[i])) + '\\n')\n\n save_var(filename, R_list)", "def write_to_file(self, filename):\n\n loader = ImageLoader()\n loader.write(self, filename)", "def save_image(img: Image, filename: str) -> None:\r\n img.save(filename)", "def saveimage(self):\n if self.saveimageButton.isChecked():\n self.save = True\n self.channelsOpen()\n self.movetoStart()\n self.saveimageButton.setText('Abort')\n self.guarda = np.zeros((self.numberofPixels, self.numberofPixels))\n self.liveviewStart()\n\n else:\n self.save = False\n print(\"Abort\")\n self.saveimageButton.setText('reintentar Scan and Stop')\n self.liveviewStop()", "def write_image(img, img_name):\n\n cv2.imwrite(img_name, img)", "def saveImage(self, file_name='./out.jpg'):\n frame = self.camera.get_frame()\n color = frame.color_image[0]\n cv2.imwrite(file_name, color)\n cv2.imshow('frame', color)\n cv2.waitKey()\n cv2.destroyAllWindows()", "def savePicture(self):\n self.file_name = QtGui.QFileDialog.getSaveFileName(self, \n \"Save as... (specify extension)\", \"\")\n cv2.imwrite(self.file_name, self.frame)", "def yolo_save_img(image, class_ids, boxes, labels, confidences, colors, file_path):\n for i, box in enumerate(boxes):\n # extract the bounding box coordinates\n (x, y) = (box[0], box[1])\n (w, h) = (box[2], box[3])\n\n # draw a bounding box rectangle and label on the image\n color = [int(c) for c in colors[class_ids[i]]]\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 3)\n text = '{}'.format(labels[i])\n # text = '{}: {:.4f}'.format(labels[i], confidences[i])\n print(text)\n\n font_scale = 1.3\n # set the rectangle background to white\n rectangle_bgr = color\n # set some text\n # get the width and height of the text box\n (text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=1)[0]\n # set the text start position\n text_offset_x = x\n text_offset_y = y - 3 \n # make the coords of the box with a small padding of two pixels\n box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 10, text_offset_y - text_height - 10 ))\n cv2.rectangle(image, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)\n cv2.putText(image, text, (text_offset_x, text_offset_y), cv2.FONT_HERSHEY_SIMPLEX, \n fontScale=font_scale, color=(255, 255, 255), thickness=2)\n cv2.imwrite(file_path, image)\n return image", "def create_and_save_sc_words(data_file, params, filename):\n print(\"Reading images.\")\n X, y, filenames = read_data(data_file, test=False)\n\n sc_words = {}\n\n for i, img in enumerate(X):\n print(\"Processing image \", i)\n edge_pixels = img.nonzero()\n indices = np.arange(len(edge_pixels[0]))\n\n try:\n samples = np.random.choice(indices, (sample_points), False)\n except ValueError:\n samples = indices\n\n points = np.array([[edge_pixels[0][i], edge_pixels[1][i]] for i in samples])\n if len(points) == 0:\n continue\n descriptors = gen_sc_descriptors(points, **params)\n sc_words[filenames[i]] = descriptors\n # pdb.set_trace()\n pickle.dump(sc_words, open(filename, \"wb\"))", "def save(images, output):\n for image, frame in images:\n image.save(output(frame))", "def save_to_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.save_to_disk(file_name)", "def save(self, filename):\n pass", "def iwrite(im, filename, **kwargs):\n\n # TODO check valid input\n\n ret = cv.imwrite(filename, im, **kwargs)\n\n if ret is False:\n print('Warning: image failed to write to filename')\n print('Image =', im)\n print('Filename =', filename)\n\n return ret", "def save_step_2(imgs, match_list, output_path=\"./output/step2\"):\n # ... your code here ...\n for i in range(len(imgs)):\n name1,tail1 = str.split(filenames[match_list[i][0]],\".\")\n name2,tail2 = str.split(filenames[match_list[i][2]],\".\")\n cv2.imwrite(output_path+\"/\"+name1+\"_\"+str(match_list[i][1])+\"_\"+name2+\"_\"+str(match_list[i][3])+\"_\"+str(match_list[i][4])+\".jpg\", imgs[i])", "def save_image(path_to_save, image, counter, session_label, date, sep=\"_\"):\n\n date = sep + date if date else \"\"\n session_label = sep + session_label if session_label else \"\"\n counter = sep + str(counter) if counter or counter == 0 else \"\"\n cv.imwrite(path_to_save + date + session_label + counter + \".jpg\", image)", "def crop_save( img_path_filename, lines_boxes, lines_texts, lines_probs, filename, basename, output_dir_name ):\n\t# Read the image\n\timage = Image.open( img_path_filename )\n\t# Get image's size\n\twidth, height = image.size\n\n\ti = 0\n\ttext_local = \"\"\n\ttext_global = \"\"\n\twhile i < len(lines_boxes):\n\t\t##################################################################################################\n\t\t# Left Upper Corner\n\t\tx1 = lines_boxes[i][0]\n\t\tx1 = x1 - 8\n\t\tif x1 < 0:\n\t\t\tx1 = 0\n\n\t\ty1 = lines_boxes[i][1]\n\t\ty1 = y1 - 1\n\t\tif y1 < 0:\n\t\t\ty1 = 0\n\n\t\t# Right Lower Corner\n\t\tx2 = lines_boxes[i][2]\n\t\tx2 = x2 + 10\n\t\tif x2 > (width - 1):\n\t\t\tx2 = width - 1\n\n\t\ty2 = lines_boxes[i][3]\n\t\ty2 = y2 + 1\n\t\tif y2 > (height - 1):\n\t\t\ty2 = height - 1\n\n\t\t# Crop the block and save it\n\t\tn_line = \"%03d\" % (i+1)\n\t\tline_filename = output_dir_name + \"/\" + basename + \"_\" + n_line + \".jpg\"\t\t\n\n\t\timg_cropped = image.crop( (x1, y1, x2, y2) )\n\t\timg_cropped.save( line_filename, 'JPEG', quality = 100 )\n\n\t\t##################################################################################################\n\t\t# Create the information about the cropped line for the local and global text files\n\t\ttext_line = basename + \"_\" + n_line + \".jpg\\t\" + str(x1) + \"\\t\" + str(y1) + \"\\t\" + str(x2) + \"\\t\" + str(y2) + \"\\t\" + ''.join(lines_texts[i]) + \"\\n\"\n\t\ttext_local += text_line\n\t\ttext_global += filename + \"\\t\" + text_line\n\n\t\t##################################################################################################\n\t\t# Creation of the text and probability file for each line\n\t\tj = 0\n\t\tcontent_text_file = \"\"\n\t\tcontent_prob_file = \"\"\n\t\twhile j<len(lines_texts[i]):\n\t\t\tcontent_text_file += lines_texts[i][j]\n\t\t\tcontent_prob_file += lines_texts[i][j] + '\\t' + str(lines_probs[i][j]) + '\\n'\n\t\t\tj = j + 1\n\t\t# Write to disk the text file\n\t\ttext_filename = output_dir_name + \"/\" + basename + \"_\" + n_line + \".txt\"\n\t\twith open( text_filename, \"w+\" ) as f_text:\n\t\t\tf_text.write( content_text_file )\n\t\t# Write to disk the probabilities file\n\t\tprob_filename = output_dir_name + \"/\" + basename + \"_\" + n_line + \".prob\"\n\t\twith open( prob_filename, \"w+\" ) as f_prob:\n\t\t\tf_prob.write( content_prob_file )\n\n\t\ti = i + 1\n\n\treturn( text_local, text_global )", "def save(self):\n fname = getSaveFileName(self, \"Save cluster plot to\", 'cluster_plot.png')\n if fname:\n fname = str(fname) # convert from QString\n image = self.grabFrameBuffer() # defaults to withAlpha=False, makes no difference\n try:\n image.save(fname)\n except Exception as e:\n QtGui.QMessageBox.critical(\n self.panel, \"Error saving file\", str(e),\n QtGui.QMessageBox.Ok, QtGui.QMessageBox.NoButton)\n print('cluster plot saved to %r' % fname)", "def WarpSave(fn, w=500,h=500, msg=True):\n MSG(\"WarpSave\")\n img = WarpImage(w,h)\n cv2.imwrite(fn,img)\n if msg:\n print \"Image written: \", fn\n return", "def imwrite(image, path):\n\n if image.ndim == 3 and image.shape[2] == 1: # for gray image\n image = np.array(image, copy=True)\n image.shape = image.shape[0:2]\n\n imgarray=((image+1.0)*127.5).astype(np.uint8)\n img=Image.fromarray(imgarray)\n img.save(path)", "def save_img(img, file_to_save):\n nparray_rep = img\n if not isinstance(img, np.ndarray):\n nparray_rep = k_image.img_to_array(img)\n k_image.save_img(file_to_save, nparray_rep)", "def save(self):\n data = (\n self.Joints,\n self.Links,\n self.joint_syms,\n self.global_syms,\n self.name,\n self.sym_prefix,\n )\n cloudpickle.dump(data, open(self.save_filename, \"wb\"))", "def write(self, pathname='wind.png'):\r\n cv2.imwrite(pathname, self.matrix * 255)", "def tiffwrite(filename, im):\n tf.imwrite(filename, im)", "def outputPulses(self,filename):\n np.save(filename,self.getData())\n return", "def write_file(_data, _label, _clinical, _contour, _type):\n pickle.dump(np.array(_data), open(_type + '_data.pxl', 'wb'))\n pickle.dump(np.array(_label), open(_type + '_label.pxl', 'wb'))\n pickle.dump(np.array(_clinical), open(_type + '_clinical.pxl', 'wb'))\n pickle.dump(np.array(_contour), open(_type + '_contour.pxl', 'wb'))", "def save_file(camera, frame):\n save = input(\"Would you like to save your drawing? Enter yes or no \")\n if save == \"yes\" or save == \"y\" or save == \"ye\" or save == \"yes \": # accounting for typos\n name = input(\"What would you like to name your masterpiece? \")\n filename = 'images/' + name + '.png'\n cv2.imwrite(filename, cv2.flip(frame,1)) # saves the image as the last frame\n camera.release()\n pygame.quit()\n\n # reopen saved picture to display for user\n img = cv2.imread(filename, 1)\n b,g,r = cv2.split(img) # get b,g,r\n rgb_img = cv2.merge([r,g,b]) # convert from bgr colorspace to rgb\n crop_img = rgb_img[36:450, 0:600] # crop out the colorbar\n cv2.imshow(filename, crop_img)\n cv2.imwrite(filename, crop_img)\n cv2.waitKey(10000)\n cv2.destroyAllWindows()\n camera.release()\n pygame.quit() # cleanup the camera and close any open windows\n else:\n print(\"Thank you for trying CVPaint!\")\n pygame.quit()\n camera.release()\n cv2.destroyAllWindows()", "def saveImageAs(self, name):\n\t\tself.image.save(name)", "def save(self, exp_file, gat_file):\n\t\tto_save = np.stack((self.b, self.sigma)) #(2, K)\n\t\tto_save = np.concatenate((self.W,to_save) , axis = 0) #(D+2,K)\n\t\tnp.savetxt(exp_file, to_save)\n\t\tself.gating.save(gat_file)\n\t\treturn", "def saveImage(self, event):\r\n fileWritten = self.image.writeFile()\r\n self.statusBar.SetStatusText(\"Saved {}\".format(fileWritten))", "def save(self, filename: str):\n if os.path.isfile(filename):\n raise FileExistsError(\"Target file exists\")\n\n with open(filename, \"wb\") as binary_writer:\n pickle.dump([self.gene_sets, self.gene_set_size, self.gene_set_names,\n self.interactors, self.n_curated, self.n_interactors],\n binary_writer)", "def save_image(path, image): \n if len(image.shape) == 4:\n image = image.reshape((image.shape[1], image.shape[2], image.shape[3]))\n image = np.clip(image * 255.0, 0, 255).astype(np.uint8)\n skimage.io.imsave(path, image)", "def camera_save_image(filename):\n image = camera_acquire_image()\n image.save(filename)", "def write_validation(self, img, imfile):\n self._save_validation(img, imfile)\n self._save_info_image(img, imfile)", "def save(self, filename):\n\n torch.save(self.state_dict(), filename)", "def write_reference_image(image, file_path):\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n image_file = file_io.FileIO(file_path, mode='w')\n scipy.misc.imsave(image_file, image)\n if FLAGS.wandb:\n import_wandb_idempotent()\n wandb.log({\"reference_images\": wandb.Image(\n image, caption=file_path)})", "def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')", "def _export_button_cb(self):\n filename = asksaveasfile(\n mode='w',\n filetypes=(('YAML files', '*.yaml'), ('All files', '*.*'))\n )\n\n if not filename:\n return\n\n with open(filename.name, 'w') as f:\n f.write('obstacles:\\n')\n for obstacle in self.obstacles:\n f.write(f' - {str(obstacle)}')\n f.write('\\n')", "def save_image(image, output_folder, output_name):\n\n\tfolder_path = compute_path(output_folder, 'dataset')\n\tos.makedirs(folder_path, exist_ok=True)\n\n\tfile_path = os.path.join(folder_path, output_name + '.png')\n\timage.save(file_path)", "def saveToFile():\n save_interface = Tk()\n save_interface.filename = filedialog.asksaveasfilename(initialdir = os.getcwd(), defaultextension=\".bti\", title = \"Save as\",filetypes = ((\"Bit Tune Image File\",\"*.bti\"),(\"All Files\",\"*.*\")))\n save_interface.destroy()\t\n\n with open (save_interface.filename,'w') as f:\n f.write(str(coordinates))", "def save(image, name):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.savefig(name)" ]
[ "0.65461063", "0.6395748", "0.63688743", "0.6313389", "0.62285316", "0.61451906", "0.6133265", "0.6110629", "0.61080384", "0.60995334", "0.6096377", "0.60782754", "0.60167956", "0.60059804", "0.59735614", "0.5961569", "0.5951604", "0.5941784", "0.591758", "0.5905942", "0.58904254", "0.58610684", "0.5856268", "0.5855799", "0.5845757", "0.58446544", "0.5829126", "0.5824401", "0.5798292", "0.5796669", "0.57686955", "0.57648265", "0.57503164", "0.5746222", "0.57441664", "0.5740034", "0.57284933", "0.57225895", "0.57187915", "0.5706697", "0.56915236", "0.56842065", "0.5683766", "0.56814456", "0.5669883", "0.566411", "0.56634593", "0.5661214", "0.56476164", "0.5636582", "0.5627922", "0.5626423", "0.56223595", "0.56140333", "0.5613645", "0.55916464", "0.55914366", "0.5584959", "0.5581192", "0.5577919", "0.5572124", "0.5570049", "0.5565626", "0.5561723", "0.55598557", "0.5557014", "0.5554905", "0.5550314", "0.5542322", "0.5541055", "0.55407965", "0.5540194", "0.55313164", "0.5525075", "0.5517713", "0.5516273", "0.5511317", "0.5510918", "0.5504133", "0.55037224", "0.549531", "0.54884815", "0.54696727", "0.54668844", "0.5466725", "0.5458371", "0.5456114", "0.54553187", "0.5453606", "0.54518867", "0.54453975", "0.54417163", "0.54406035", "0.5436591", "0.54348207", "0.54324836", "0.54283214", "0.54225373" ]
0.78399444
1
Enforce node and arc consistency, and then solve the CSP.
def solve(self): self.enforce_node_consistency() self.ac3() return self.backtrack(dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solve_csp(nodes, arcs, max_steps):\n\n nodes = list(nodes)\n print 'nodes:', nodes\n\n node_values_dict = dict(zip(nodes, '2'*len(set(nodes))))\n print 'initial random assignment', node_values_dict\n indexes = np.arange(len(nodes))\n\n graph = {}\n for arc in arcs:\n if not arc[0] in graph:\n graph[arc[0]] = []\n if not arc[1] in graph:\n graph[arc[1]] = []\n graph[arc[0]].append(arc[1])\n graph[arc[1]].append(arc[0])\n for i in indexes:\n if i in graph:\n continue\n else:\n graph[i] = []\n graph = dict(sorted(graph.items()))\n print 'graph:', graph\n\n domain = [i for i in np.arange(1, 10, 1)]\n print 'initial domain for each node:', domain\n\n superAdjacency ={}\n for i in np.arange(len(nodes)):\n superAdjacency[i] = []\n superAdjacency[i].append(nodes[i])\n superAdjacency[i].append(node_values_dict[nodes[i]])\n superAdjacency[i].append(graph[i])\n superAdjacency[i].append(domain)\n\n print 'superAdjacency', superAdjacency\n\n def getNodeType(superAdjacency, index):\n return list(superAdjacency[index])[0]\n\n def getCurrentAssignment(superAdjacency, index):\n return list(superAdjacency[index])[1]\n\n def getCurrentAssignmentForList(superAdjacency, indexList):\n return [int(list(superAdjacency[index])[1]) for index in indexList]\n\n def getSolution(superAdjacency):\n return [int(list(superAdjacency[index])[1]) for index in superAdjacency]\n\n def getNeighbours(superAdjacency, index):\n return list(superAdjacency[index])[2]\n\n def getDomain(superAdjacency, index):\n return list(superAdjacency[index])[3]\n\n def updateSuperAdjacency(superAdjacency, nodeType, newValue):\n updateList =[]\n for i in superAdjacency:\n if str(getNodeType(superAdjacency, i)) == nodeType:\n updateList.append(i)\n for i in updateList:\n superAdjacency[i][1] = newValue\n\n def isSolution():\n return graphConstraints(superAdjacency)\n\n def graphConstraints(superAdjacency):\n graphEval = []\n\n for index in superAdjacency:\n neighbours = getNeighbours(superAdjacency, index)\n nodeType = getNodeType(superAdjacency, index)\n\n if nodeType == 'T':\n graphEval.append(int(str(eval(str(\n getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[0]))\n elif nodeType == 'C':\n return 'NA'\n elif nodeType == 'S':\n graphEval.append(int(str(eval(str(\n getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[::-1][0]))\n elif nodeType == 'H':\n graphEval.append(int(str(np.sum(\n getCurrentAssignmentForList(superAdjacency, neighbours)))[0]))\n if nodeType == 'P':\n graphEval.append(int(str(np.sum(\n getCurrentAssignmentForList(superAdjacency, neighbours)))[::-1][0]))\n\n currentAssignment = [item[1] for item in superAdjacency.values()]\n difference = map(sub, currentAssignment, graphEval)\n\n if sum(difference) == 0:\n return True\n else:\n return difference\n\n def findConflictVariable(superAdjacency, lastUpdateNode):\n node_conflict_count = {}\n for node in node_values_dict:\n node_conflict_count[node] = 0\n for index in superAdjacency:\n neighbours = getNeighbours(superAdjacency, index)\n nodeType = getNodeType(superAdjacency, index)\n if nodeType == 'T':\n try:\n if getCurrentAssignment(superAdjacency, index) != \\\n int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n elif nodeType == 'S':\n try:\n if getCurrentAssignment(superAdjacency, index) != int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[::-1][0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n elif nodeType == 'H':\n try:\n if getCurrentAssignment(superAdjacency, index) != int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n if nodeType == 'P':\n try:\n if getCurrentAssignment(superAdjacency, index) != int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[::-1][0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n choices = [k for k, v in node_conflict_count.items() if v > 0]\n if len(choices) > 0:\n updateNode = random.choice(choices)\n\n if updateNode == lastUpdateNode:\n choices.pop(choices.index(updateNode))\n try:\n lastUpdateNode = random.choice(choices)\n return lastUpdateNode, lastUpdateNode\n except:\n return lastUpdateNode, lastUpdateNode\n else:\n lastUpdateNode = updateNode\n return updateNode, lastUpdateNode\n else:\n return 'NA', 'NA'\n\n\n\n def valueForConflictedVariable(superAdjacency, var):\n for index in superAdjacency:\n nodeType = getNodeType(superAdjacency, index)\n neighbours = getNeighbours(superAdjacency, index)\n if not neighbours:\n continue\n elif str(nodeType) == str(var):\n domain = getDomain(superAdjacency, index)\n\n choice = random.choice(domain)\n if nodeType == 'T':\n choice = int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[0])\n elif nodeType == 'S':\n choice = int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[::-1][0])\n elif nodeType == 'H':\n choice = int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[0])\n if nodeType == 'P':\n choice = int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[::-1][0])\n\n choice = int(choice)\n if choice % 2 == 0:\n return choice\n else:\n return choice\n\n def min_conflicts(nodes, arcs, max_steps):\n lastUpdateNode = ''\n for i in range(max_steps):\n if isSolution() == True:\n return\n var, lastUpdateNode = findConflictVariable(superAdjacency, lastUpdateNode)\n if var != 'NA':\n value = valueForConflictedVariable(superAdjacency, var)\n updateSuperAdjacency(superAdjacency, var, value)\n node_values_dict[var] = value\n else:\n pass\n\n return\n\n min_conflicts(nodes, arcs, max_steps)\n node_values = getSolution(superAdjacency)\n return node_values", "def enforce_node_consistency(self):\n # Loop over each variable (space for word) in the crossword\n # Use copy to prevent domains from being modified while looping\n for var in self.domains.copy():\n # Get all unary constraints for this variable\n for value in self.domains[var].copy():\n # Check if the value is consistent with all unary constraints\n if len(value) != var.length:\n # If not, remove the value from the domain\n self.domains[var].remove(value)\n # No return value is necessary", "def ac3(csp, arcs=None):\n #print \"============BEGIN==================\"\n\n queue_arcs = deque(arcs if arcs is not None else csp.constraints.arcs())\n\n \"\"\"\n print \"QUEUE ARCS\"\n for x in queue_arcs:\n print x\n print \"fin queue arcs\"\n print \"\\nconstraints\"\n for x in csp.constraints:\n print x\n print \"end constraints\"\n \"\"\"\n while queue_arcs:\n (v1, v2) = queue_arcs.pop()\n #print str(v1) + \"---\"+ str(v2)\n if revise(csp, v1, v2):\n if not v1.domain:\n return False\n #print str(v1)+ \"LOOK HEREREREREREREAFVSD\"\n for c in csp.constraints[v1]:\n #print \"WTF IS THE ARC\" + str(c)\n if c.var2 != v1 and c.var2 != v2:\n queue_arcs.append((c.var2,v1))\n\n \"\"\"print \"AC3 IS RETURNING TRUE\"\n for x in queue_arcs:\n print x\"\"\"\n\n return True\n \n\n # TODO implement this\n pass", "def ac3(csp, arcs=None):\n\n queue_arcs = deque(arcs if arcs is not None else csp.constraints.arcs())\n while queue_arcs:\n var1, var2 = queue_arcs.popleft()\n\n # Propagate changes in var1.domain to neighbors\n if revise(csp, var1, var2):\n if len(var1.domain) == 0:\n return False\n for (v, neighbor) in csp.constraints[var1].arcs():\n if (neighbor != var2):\n queue_arcs.append((v, neighbor))\n return True", "def make_arc_consistent(Xj, Xk, csp):\r\n # csp.curr_domains[Xj] = []\r\n for val1 in csp.domains[Xj]:\r\n keep = False # Keep or remove val1\r\n for val2 in csp.domains[Xk]:\r\n if csp.constraints(Xj, val1, Xk, val2):\r\n # Found a consistent assignment for val1, keep it\r\n keep = True\r\n break\r\n\r\n if not keep:\r\n # Remove val1\r\n csp.prune(Xj, val1, None)\r\n\r\n return csp.curr_domains[Xj]", "def ac3(self, arcs=None):\n if arcs == None:\n #creates a queue of arcs to update\n arcs= []\n for node1 in self.domains:\n for node2 in self.domains:\n if node1 != node2:\n #for each pair of nodes that intersect, add them as a tuple pair to a list of arcs\n if self.crossword.overlaps[node1,node2] != None: \n arcs.append((node1,node2))\n\n while arcs != []:\n x= arcs[0][0]\n y= arcs[0][1]\n\n if self.revise(x, y):\n #if the domain of node x is empty after revision, this problem has no solution\n if len(self.domains[x]) == 0:\n return False\n #if the arc is updated successfully, node x may no longer be arc consistent in respect to other nodes that it may have been before\n #we must then add the arcs between the revised x and all of its neighbors(except y as we have just checked it) to the queue\n for neighbor in self.crossword.neighbors(x):\n if neighbor != y:\n arcs.append((neighbor, x))\n #remove arcs from queue after revision\n arcs.pop(0)\n else:\n arcs.pop(0)\n \n return True", "def check_integrity(self):\r\n nodes = graph.ops(self.inputs, self.outputs)\r\n if self.apply_nodes != nodes:\r\n missing = nodes.difference(self.apply_nodes)\r\n excess = self.apply_nodes.difference(nodes)\r\n raise Exception(\r\n \"The nodes are inappropriately cached. missing, in excess: \",\r\n missing, excess)\r\n for node in nodes:\r\n if node.fgraph is not self:\r\n raise Exception(\"Node should belong to the FunctionGraph.\",\r\n node)\r\n for i, variable in enumerate(node.inputs):\r\n if variable.fgraph is not self:\r\n raise Exception(\r\n \"Input of node should belong to the FunctionGraph.\",\r\n variable, (node, i))\r\n if (node, i) not in variable.clients:\r\n raise Exception(\"Inconsistent clients list.\",\r\n (node, i), variable.clients)\r\n variables = set(graph.variables(self.inputs, self.outputs))\r\n if set(self.variables) != variables:\r\n missing = variables.difference(self.variables)\r\n excess = self.variables.difference(variables)\r\n raise Exception(\r\n \"The variables are inappropriately cached. missing, in excess: \",\r\n missing, excess)\r\n for variable in variables:\r\n if (variable.owner is None and\r\n variable not in self.inputs and\r\n not isinstance(variable, graph.Constant)):\r\n raise Exception(\"Undeclared input.\", variable)\r\n if variable.fgraph is not self:\r\n raise Exception(\"Variable should belong to the FunctionGraph.\",\r\n variable)\r\n for node, i in variable.clients:\r\n if node == 'output':\r\n if self.outputs[i] is not variable:\r\n raise Exception(\"Inconsistent clients list.\",\r\n variable, self.outputs[i])\r\n continue\r\n if node not in nodes:\r\n raise Exception(\"Client not in FunctionGraph.\",\r\n variable, (node, i))\r\n if node.inputs[i] is not variable:\r\n raise Exception(\"Inconsistent clients list.\",\r\n variable, node.inputs[i])", "def forward_checking(csp, var, value, assignment, removals):\r\n csp.support_pruning()\r\n check=0\r\n for B in csp.neighbors[var]:\r\n if B not in assignment:\r\n for b in csp.curr_domains[B][:]:\r\n check+=1\r\n if not csp.constraints(var, value, B, b):\r\n csp.prune(B, b, removals)\r\n # we have a failure\r\n # we check if domains list for variable B is not empty\r\n # and increase weight of B,var by 1\r\n if not csp.curr_domains[B]:\r\n csp.weight[(B,var)] += 1\r\n return False,check\r\n return True,check", "def check_all_constraints(csp) :\n constraints=csp.get_all_constraints()\n for constraint in constraints:\n var1 = constraint.var1\n var2 = constraint.var2\n val1=csp.get_assigned_value(var1)\n val2=csp.get_assigned_value(var2)\n if val1!=None and val2!=None:\n if not constraint.check(val1,val2):\n return False\n return True", "def check_all_constraints(csp) :\n\n for constraint in csp.get_all_constraints():\n assigned1 = csp.get_assigned_value(constraint.var1)\n assigned2 = csp.get_assigned_value(constraint.var2)\n check = constraint.check(assigned1,assigned2)\n if check==False and assigned1!=None and assigned2!=None:\n return False \n return True", "def inference(csp, variable):\n return ac3(csp, csp.constraints[variable].arcs())", "def inference(csp, variable):\n return ac3(csp, csp.constraints[variable].arcs())", "def constraint_test():\n import itertools, sys\n\n show_analysis = False\n #Generated via grammar\n gr = grammar.Grammar('grammars/test_constraints.bnf')\n inputs = ([1 for _ in range(100)], [ i%3 for i in range(100)])\n for _input in inputs: \n output = gr.generate(_input)\n azr = analyser.Analyser('test',output['phenotype'],True)\n try:\n azr.create_graph()\n except ValueError as e:\n print(__name__, \"ERROR\", _input, e)\n continue\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()\n \n #Fixed generated\n lengths = (1000, 10000)\n levels = (5, 10)\n for length_idx, level_idx in itertools.permutations([0,1]):\n try:\n GRAPH = constrained_offset_graph(lengths[length_idx],\n levels[length_idx])\n except ValueError as e:\n print(__name__, \"ERROR\", lengths[length_idx], levels[length_idx], e)\n continue\n GRAPH.save_graph(\"pylon\")\n print \"nodes:\", GRAPH.number_of_nodes()\n print \"edges\", GRAPH.number_of_edges()\n #will it blend?\n azr = analyser.Analyser('test',\"moo\",True)\n azr.my_graph = GRAPH\n azr.parse_graph()\n azr.apply_stresses()\n azr.create_slf_file()\n azr.test_slf_file()\n azr.parse_results()\n azr.print_stresses()\n if show_analysis:\n azr.show_analysis()", "def ValidateControlFlowGraph(self, strict: bool = True) -> \"ControlFlowGraph\":\n number_of_nodes = self.number_of_nodes()\n\n # CFGs must contain one or more nodes.\n if number_of_nodes < 1:\n raise NotEnoughNodes(f\"Function `{self.name}` has no nodes\")\n\n # Get the entry and exit blocks. These properties will raise exceptions\n # if they are not found / duplicates found.\n entry_node = self.entry_block\n exit_nodes = self.exit_blocks\n\n out_degrees = {n: self.out_degree(n) for n in self.nodes}\n in_degrees = {n: self.in_degree(n) for n in self.nodes}\n\n if number_of_nodes > 1:\n if entry_node in exit_nodes:\n raise InvalidSpecialBlock(\n f\"Exit and entry nodes are the same: \"\n f\"'{self.nodes[entry_node]['name']}'\"\n )\n\n for exit_node in exit_nodes:\n if not nx.has_path(self, entry_node, exit_node):\n raise MalformedControlFlowGraphError(\n f\"No path from entry node '{self.nodes[entry_node]['name']}' to \"\n f\"exit node '{self.nodes[exit_node]['name']}' in function \"\n f\"`{self.name}`\"\n )\n\n # Validate node attributes.\n node_names = set()\n for node in self.nodes:\n # All nodes must have a name.\n if \"name\" not in self.nodes[node]:\n raise MissingNodeName(\n f\"Node {node} has no name in function `{self.name}`\"\n )\n\n # All node names must be unique.\n node_name = self.nodes[node][\"name\"]\n if node_name in node_names:\n raise DuplicateNodeName(\n f\"Duplicate node name '{node_name}' in function `{self.name}`\"\n )\n node_names.add(node_name)\n\n # All nodes must be connected (except for 1-node graphs).\n if number_of_nodes > 1 and not out_degrees[node] + in_degrees[node]:\n raise UnconnectedNode(f\"Unconnected node '{self.nodes[node]['name']}'\")\n\n # The entry node has an additional input, since it must entered.\n in_degrees[entry_node] += 1\n\n # The exit block cannot have outputs.\n for exit_node in exit_nodes:\n if out_degrees[exit_node]:\n app.Error(\"OUT DEGREE %s\", self.out_degree(exit_node))\n raise InvalidNodeDegree(\n f\"Exit block outdegree({self.nodes[exit_node]['name']}) = \"\n f\"{out_degrees[exit_node]} in function `{self.name}`\"\n )\n\n # Additional \"strict\" CFG tests.\n if strict:\n # Validate edge attributes.\n for src, dst in self.edges:\n if src == dst:\n raise GraphContainsSelfLoops(f\"Self loops: {src} -> {dst}\")\n\n # Each node in a CFG must have more than one output, or more than one\n # input. This is because nodes represent basic blocks: a node with only\n # a single output should have been fused with the consuming node (i.e.\n # they are the same basic block).\n if not (out_degrees[src] > 1 or in_degrees[dst] > 1):\n raise InvalidNodeDegree(\n f\"outdegree({self.nodes[src]['name']}) = {out_degrees[src]}, \"\n f\"indegree({self.nodes[dst]['name']}) = {in_degrees[dst]}\"\n )\n\n return self", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def consolidation_heuristics(to_print = False):\n # Instantiate the data problem.\n data = create_data_model()\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n def pending_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['post'][to_node]\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n pending_callback_index = routing.RegisterTransitCallback(pending_callback)\n # Define cost of each arc.\n for i in range(data['num_vehicles']-1):\n routing.SetArcCostEvaluatorOfVehicle(transit_callback_index, i) #Transit cost\n routing.SetFixedCostOfVehicle(data['fixed_cost'], i) #Fixed cost\n routing.SetArcCostEvaluatorOfVehicle(pending_callback_index, data['num_vehicles']-1) #Postponement and/or NonService cost\n # Add Capacity constraint.\n def demand_callback(from_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to demands NodeIndex.\n from_node = manager.IndexToNode(from_index) \n return data['demands'][from_node]\n demand_callback_index = routing.RegisterUnaryTransitCallback(\n demand_callback)\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, # null capacity slack\n data['vehicle_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Capacity')\n # Add time constraint.\n def time_callback(from_index,to_index): #\n \"\"\"Returns the demand of the node.\"\"\"\n # Convert from routing variable Index to NodeIndex in time\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return time_matrix[from_node][to_node] \n time_callback_index = routing.RegisterTransitCallback(time_callback) \n routing.AddDimensionWithVehicleCapacity(\n time_callback_index,\n 0, # null capacity slack\n data['time_capacities'], # vehicle maximum capacities\n True, # start cumul to zero\n 'Time')\n # Setting solution heuristic-procedure.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 5 #10 # 60 #20 #3000\n search_parameters.log_search = True\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n # Print solution on console.\n if assignment:\n sent, sol_results, routes_results = print_solution(data, manager, routing, assignment) \n return sent, sol_results, routes_results", "def scale_arc_constraints(blk):\n for arc in blk.component_data_objects(Arc, descend_into=True):\n arc_block = arc.expanded_block\n if arc_block is None: # arc not expanded or port empty?\n _log.warning(\n f\"{arc} has no constraints. Has the Arc expansion transform \"\n \"been applied?\"\n )\n continue\n warning = (\n \"Automatic scaling for arc constraints is supported for \"\n \"only the Equality rule. Variable {name} on Port {port} was \"\n \"created with a different rule, so the corresponding constraint \"\n \"on {arc_name} will not be scaled.\"\n )\n port1 = arc.ports[0]\n port2 = arc.ports[1]\n for name in port1.vars.keys():\n if not port1.is_equality(name):\n _log.warning(\n warning.format(name=name, port=port1.name, arc_name=arc.name)\n )\n continue\n if not port2.is_equality(name):\n _log.warning(\n warning.format(name=name, port=port2.name, arc_name=arc.name)\n )\n continue\n con = getattr(arc_block, name + \"_equality\")\n for i, c in con.items():\n if i is None:\n sf = min_scaling_factor([port1.vars[name], port2.vars[name]])\n else:\n sf = min_scaling_factor([port1.vars[name][i], port2.vars[name][i]])\n constraint_scaling_transform(c, sf)", "def enforce(context, action, target, do_raise=True):\n \"\"\"\n ======================================================================================\n context = <xdrs.context.RequestContext object at 0x6dcf050>\n target = {'project_id': u'4537aca4a4a4462fa4c59ad5b5581f00', 'user_id': u'91d732b65831491d8bd952b3111e62dd'}\n action = xdrs:get_algorithms\n ======================================================================================\n \"\"\"\n init()\n \n credentials = context.to_dict()\n \"\"\"\n ======================================================================================\n credentials = {'project_name': u'admin', 'user_id': u'91d732b65831491d8bd952b3111e62dd', 'roles': [u'heat_stack_owner', u'_member_', u'admin'], 'timestamp': '2015-03-10T06:48:40.110653', 'auth_token': 'MIIT9wYJKoZIhvcNAQcCoIIT6DCCE+QCAQExCTAHBgUrDgMCGjCCEk0GCSqGSIb3DQEHAaCCEj4EghI6eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxNS0wMy0xMFQwNjo0ODozOS41MzU2NjEiLCAiZXhwaXJlcyI6ICIyMDE1LTAzLTEwVDA3OjQ4OjM5WiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7ImRlc2NyaXB0aW9uIjogImFkbWluIHRlbmFudCIsICJlbmFibGVkIjogdHJ1ZSwgImlkIjogIjQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgIm5hbWUiOiAiYWRtaW4ifX0sICJzZXJ2aWNlQ2F0YWxvZyI6IFt7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc0L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjIvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAiaWQiOiAiMTZiMTVjYzVmZjUwNGNiODlmNTg2NjRlMjdhNjljNjkiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc0L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImNvbXB1dGUiLCAibmFtZSI6ICJub3ZhIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjk2OTYvIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjk2OTYvIiwgImlkIjogIjFiMjkzYTgxNjk2YjRiN2Y4OTZlYWQ0NjIyYTFjMmExIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6OTY5Ni8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAibmV0d29yayIsICJuYW1lIjogIm5ldXRyb24ifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3Ni92Mi80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCIsICJyZWdpb24iOiAiUmVnaW9uT25lIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgImlkIjogIjNhNzY3OWNjZTdkZjRhY2ZhMTZiM2NhNTJkZGNmYzgyIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3Ni92Mi80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWV2MiIsICJuYW1lIjogImNpbmRlcnYyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjMiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3NC92MyIsICJpZCI6ICIwYmIxZDFiODhhZmU0MGRhOTNiY2IxNTg0Y2ExN2ZiOSIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjMifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY29tcHV0ZXYzIiwgIm5hbWUiOiAibm92YXYzIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwODAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MCIsICJpZCI6ICIxZTMyZTE3MmU3OWM0YzVhYTZiNWM3ZjhkNzVhZjRmYiIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwODAifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiczMiLCAibmFtZSI6ICJzd2lmdF9zMyJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo5MjkyIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjkyOTIiLCAiaWQiOiAiM2QxYzc5MjY1MWEwNDljNWE2MWUzNWJmZWZjNGM4OGIiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo5MjkyIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImltYWdlIiwgIm5hbWUiOiAiZ2xhbmNlIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzciLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3NyIsICJpZCI6ICIzOWE0YzA2NDIzYTg0OTNjOTI4ZGExOGY0YTVjY2MxZiIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzcifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAibWV0ZXJpbmciLCAibmFtZSI6ICJjZWlsb21ldGVyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDAvdjEvIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDAvdjEvIiwgImlkIjogIjU1NzBiOGY4MTE0OTRlMWI5NTVkYjZlNTAzZGYyYWZkIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODAwMC92MS8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY2xvdWRmb3JtYXRpb24iLCAibmFtZSI6ICJoZWF0LWNmbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YxLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzYvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAiaWQiOiAiMGExYzhkYTRmMTU2NDk1YWFkMjEzMGUyYzA2OTE5ODIiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YxLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogInZvbHVtZSIsICJuYW1lIjogImNpbmRlciJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4NzczL3NlcnZpY2VzL0FkbWluIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzMvc2VydmljZXMvQ2xvdWQiLCAiaWQiOiAiMDMzZjY3ZTk1MDBjNDljYThmOGIxODkzZTJhN2VkYWYiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4NzczL3NlcnZpY2VzL0Nsb3VkIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImVjMiIsICJuYW1lIjogIm5vdmFfZWMyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDQvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODAwNC92MS80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCIsICJpZCI6ICI0YmViNjQ0MjUzYWU0NzdmOWU5NDk2ZWVkZDEwOTNhNSIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDQvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAib3JjaGVzdHJhdGlvbiIsICJuYW1lIjogImhlYXQifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC8iLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC92MS9BVVRIXzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgImlkIjogIjNhMTA2MzU0MjYxMDQzMjk5YTVkMjQ3ZTVmMjU5NGQyIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC92MS9BVVRIXzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogIm9iamVjdC1zdG9yZSIsICJuYW1lIjogInN3aWZ0In0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjM1MzU3L3YyLjAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6NTAwMC92Mi4wIiwgImlkIjogIjVjNGVlN2MzMTE4NDQyNGM5NDJhMWM1MjgxODU3MmZiIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6NTAwMC92Mi4wIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImlkZW50aXR5IiwgIm5hbWUiOiAia2V5c3RvbmUifV0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJhZG1pbiIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQiOiAiOTFkNzMyYjY1ODMxNDkxZDhiZDk1MmIzMTExZTYyZGQiLCAicm9sZXMiOiBbeyJuYW1lIjogImhlYXRfc3RhY2tfb3duZXIifSwgeyJuYW1lIjogIl9tZW1iZXJfIn0sIHsibmFtZSI6ICJhZG1pbiJ9XSwgIm5hbWUiOiAiYWRtaW4ifSwgIm1ldGFkYXRhIjogeyJpc19hZG1pbiI6IDAsICJyb2xlcyI6IFsiZDlmZGVlODI1NjE3NGJlNWE3MmFjZGZmNDNkM2VkZDMiLCAiOWZlMmZmOWVlNDM4NGIxODk0YTkwODc4ZDNlOTJiYWIiLCAiN2E1ZTg5MmFiYTE5NDI3NWI3ZjQxZWM4Njg2ZDUwOGYiXX19fTGCAYEwggF9AgEBMFwwVzELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBVVuc2V0MQ4wDAYDVQQHDAVVbnNldDEOMAwGA1UECgwFVW5zZXQxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbQIBATAHBgUrDgMCGjANBgkqhkiG9w0BAQEFAASCAQBkwVlwVgYM+mCIXICViGPgW+AZ--Y3NfWjW92GTBqW4keVrPosYxz--b2SVSGqwOHI1xFPqIx1+fzBCcilE5rIuJ3gxAc2VEWl4whMkriqWo6M8YY+GxGJ07h1NZ3Jc9Mrk7RTWPwU9YPilWPSU9sRx4bv+y7XpL8EIEvi+0dvHKgGI+nvqEYVFIf1vYQN5bvSnAgC1rZ9oB0M4Pg1wd47xQcenZL+XOWb8uxUReAvT-lfjXav7DhwUzPgmlY2XpN+9yfhAXAFF0GkokwjncvC5YTILOa41eMUg8ip47+rijNpQ2FuxVpRhQ-xL9it8+vAYkGLqe7eaQylsf0Nu6JJ', 'remote_address': '172.21.7.40', 'quota_class': None, 'is_admin': True, 'tenant': u'4537aca4a4a4462fa4c59ad5b5581f00', 'service_catalog': [{u'endpoints_links': [], u'endpoints': [{u'adminURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00', u'region': u'RegionOne', u'publicURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00', u'id': u'0a1c8da4f156495aad2130e2c0691982', u'internalURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00'}], u'type': u'volume', u'name': u'cinder'}], 'request_id': 'req-c0439276-3600-49cb-8de5-680b3f7d735c', 'instance_lock_checked': False, 'project_id': u'4537aca4a4a4462fa4c59ad5b5581f00', 'user_name': u'admin', 'read_deleted': 'no', 'user': u'91d732b65831491d8bd952b3111e62dd'}\n ======================================================================================\n \"\"\"\n\n # Add the exception arguments if asked to do a raise\n extra = {}\n if do_raise:\n extra.update(exc=exception.PolicyNotAuthorized, action=action)\n\n \"\"\"\n ======================================================================================\n action = xdrs:get_algorithms\n target = <xdrs.objects.instance.Instance object at 0x62b4a50>\n credentials = {'project_name': u'admin', 'user_id': u'91d732b65831491d8bd952b3111e62dd', 'roles': [u'heat_stack_owner', u'_member_', u'admin'], 'timestamp': '2015-03-10T06:48:40.110653', 'auth_token': 'MIIT9wYJKoZIhvcNAQcCoIIT6DCCE+QCAQExCTAHBgUrDgMCGjCCEk0GCSqGSIb3DQEHAaCCEj4EghI6eyJhY2Nlc3MiOiB7InRva2VuIjogeyJpc3N1ZWRfYXQiOiAiMjAxNS0wMy0xMFQwNjo0ODozOS41MzU2NjEiLCAiZXhwaXJlcyI6ICIyMDE1LTAzLTEwVDA3OjQ4OjM5WiIsICJpZCI6ICJwbGFjZWhvbGRlciIsICJ0ZW5hbnQiOiB7ImRlc2NyaXB0aW9uIjogImFkbWluIHRlbmFudCIsICJlbmFibGVkIjogdHJ1ZSwgImlkIjogIjQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgIm5hbWUiOiAiYWRtaW4ifX0sICJzZXJ2aWNlQ2F0YWxvZyI6IFt7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc0L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjIvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAiaWQiOiAiMTZiMTVjYzVmZjUwNGNiODlmNTg2NjRlMjdhNjljNjkiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc0L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImNvbXB1dGUiLCAibmFtZSI6ICJub3ZhIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjk2OTYvIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjk2OTYvIiwgImlkIjogIjFiMjkzYTgxNjk2YjRiN2Y4OTZlYWQ0NjIyYTFjMmExIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6OTY5Ni8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAibmV0d29yayIsICJuYW1lIjogIm5ldXRyb24ifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3Ni92Mi80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCIsICJyZWdpb24iOiAiUmVnaW9uT25lIiwgImludGVybmFsVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YyLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgImlkIjogIjNhNzY3OWNjZTdkZjRhY2ZhMTZiM2NhNTJkZGNmYzgyIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3Ni92Mi80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCJ9XSwgImVuZHBvaW50c19saW5rcyI6IFtdLCAidHlwZSI6ICJ2b2x1bWV2MiIsICJuYW1lIjogImNpbmRlcnYyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjMiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3NC92MyIsICJpZCI6ICIwYmIxZDFiODhhZmU0MGRhOTNiY2IxNTg0Y2ExN2ZiOSIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzQvdjMifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY29tcHV0ZXYzIiwgIm5hbWUiOiAibm92YXYzIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwODAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MCIsICJpZCI6ICIxZTMyZTE3MmU3OWM0YzVhYTZiNWM3ZjhkNzVhZjRmYiIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwODAifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiczMiLCAibmFtZSI6ICJzd2lmdF9zMyJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo5MjkyIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjkyOTIiLCAiaWQiOiAiM2QxYzc5MjY1MWEwNDljNWE2MWUzNWJmZWZjNGM4OGIiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo5MjkyIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImltYWdlIiwgIm5hbWUiOiAiZ2xhbmNlIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzciLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODc3NyIsICJpZCI6ICIzOWE0YzA2NDIzYTg0OTNjOTI4ZGExOGY0YTVjY2MxZiIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzcifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAibWV0ZXJpbmciLCAibmFtZSI6ICJjZWlsb21ldGVyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDAvdjEvIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDAvdjEvIiwgImlkIjogIjU1NzBiOGY4MTE0OTRlMWI5NTVkYjZlNTAzZGYyYWZkIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODAwMC92MS8ifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAiY2xvdWRmb3JtYXRpb24iLCAibmFtZSI6ICJoZWF0LWNmbiJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YxLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzYvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAiaWQiOiAiMGExYzhkYTRmMTU2NDk1YWFkMjEzMGUyYzA2OTE5ODIiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4Nzc2L3YxLzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogInZvbHVtZSIsICJuYW1lIjogImNpbmRlciJ9LCB7ImVuZHBvaW50cyI6IFt7ImFkbWluVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4NzczL3NlcnZpY2VzL0FkbWluIiwgInJlZ2lvbiI6ICJSZWdpb25PbmUiLCAiaW50ZXJuYWxVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjg3NzMvc2VydmljZXMvQ2xvdWQiLCAiaWQiOiAiMDMzZjY3ZTk1MDBjNDljYThmOGIxODkzZTJhN2VkYWYiLCAicHVibGljVVJMIjogImh0dHA6Ly8xNzIuMjEuNy40MDo4NzczL3NlcnZpY2VzL0Nsb3VkIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImVjMiIsICJuYW1lIjogIm5vdmFfZWMyIn0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDQvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODAwNC92MS80NTM3YWNhNGE0YTQ0NjJmYTRjNTlhZDViNTU4MWYwMCIsICJpZCI6ICI0YmViNjQ0MjUzYWU0NzdmOWU5NDk2ZWVkZDEwOTNhNSIsICJwdWJsaWNVUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjgwMDQvdjEvNDUzN2FjYTRhNGE0NDYyZmE0YzU5YWQ1YjU1ODFmMDAifV0sICJlbmRwb2ludHNfbGlua3MiOiBbXSwgInR5cGUiOiAib3JjaGVzdHJhdGlvbiIsICJuYW1lIjogImhlYXQifSwgeyJlbmRwb2ludHMiOiBbeyJhZG1pblVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC8iLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC92MS9BVVRIXzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIiwgImlkIjogIjNhMTA2MzU0MjYxMDQzMjk5YTVkMjQ3ZTVmMjU5NGQyIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6ODA4MC92MS9BVVRIXzQ1MzdhY2E0YTRhNDQ2MmZhNGM1OWFkNWI1NTgxZjAwIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogIm9iamVjdC1zdG9yZSIsICJuYW1lIjogInN3aWZ0In0sIHsiZW5kcG9pbnRzIjogW3siYWRtaW5VUkwiOiAiaHR0cDovLzE3Mi4yMS43LjQwOjM1MzU3L3YyLjAiLCAicmVnaW9uIjogIlJlZ2lvbk9uZSIsICJpbnRlcm5hbFVSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6NTAwMC92Mi4wIiwgImlkIjogIjVjNGVlN2MzMTE4NDQyNGM5NDJhMWM1MjgxODU3MmZiIiwgInB1YmxpY1VSTCI6ICJodHRwOi8vMTcyLjIxLjcuNDA6NTAwMC92Mi4wIn1dLCAiZW5kcG9pbnRzX2xpbmtzIjogW10sICJ0eXBlIjogImlkZW50aXR5IiwgIm5hbWUiOiAia2V5c3RvbmUifV0sICJ1c2VyIjogeyJ1c2VybmFtZSI6ICJhZG1pbiIsICJyb2xlc19saW5rcyI6IFtdLCAiaWQiOiAiOTFkNzMyYjY1ODMxNDkxZDhiZDk1MmIzMTExZTYyZGQiLCAicm9sZXMiOiBbeyJuYW1lIjogImhlYXRfc3RhY2tfb3duZXIifSwgeyJuYW1lIjogIl9tZW1iZXJfIn0sIHsibmFtZSI6ICJhZG1pbiJ9XSwgIm5hbWUiOiAiYWRtaW4ifSwgIm1ldGFkYXRhIjogeyJpc19hZG1pbiI6IDAsICJyb2xlcyI6IFsiZDlmZGVlODI1NjE3NGJlNWE3MmFjZGZmNDNkM2VkZDMiLCAiOWZlMmZmOWVlNDM4NGIxODk0YTkwODc4ZDNlOTJiYWIiLCAiN2E1ZTg5MmFiYTE5NDI3NWI3ZjQxZWM4Njg2ZDUwOGYiXX19fTGCAYEwggF9AgEBMFwwVzELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBVVuc2V0MQ4wDAYDVQQHDAVVbnNldDEOMAwGA1UECgwFVW5zZXQxGDAWBgNVBAMMD3d3dy5leGFtcGxlLmNvbQIBATAHBgUrDgMCGjANBgkqhkiG9w0BAQEFAASCAQBkwVlwVgYM+mCIXICViGPgW+AZ--Y3NfWjW92GTBqW4keVrPosYxz--b2SVSGqwOHI1xFPqIx1+fzBCcilE5rIuJ3gxAc2VEWl4whMkriqWo6M8YY+GxGJ07h1NZ3Jc9Mrk7RTWPwU9YPilWPSU9sRx4bv+y7XpL8EIEvi+0dvHKgGI+nvqEYVFIf1vYQN5bvSnAgC1rZ9oB0M4Pg1wd47xQcenZL+XOWb8uxUReAvT-lfjXav7DhwUzPgmlY2XpN+9yfhAXAFF0GkokwjncvC5YTILOa41eMUg8ip47+rijNpQ2FuxVpRhQ-xL9it8+vAYkGLqe7eaQylsf0Nu6JJ', 'remote_address': '172.21.7.40', 'quota_class': None, 'is_admin': True, 'tenant': u'4537aca4a4a4462fa4c59ad5b5581f00', 'service_catalog': [{u'endpoints_links': [], u'endpoints': [{u'adminURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00', u'region': u'RegionOne', u'publicURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00', u'id': u'0a1c8da4f156495aad2130e2c0691982', u'internalURL': u'http://172.21.7.40:8776/v1/4537aca4a4a4462fa4c59ad5b5581f00'}], u'type': u'volume', u'name': u'cinder'}], 'request_id': 'req-c0439276-3600-49cb-8de5-680b3f7d735c', 'instance_lock_checked': False, 'project_id': u'4537aca4a4a4462fa4c59ad5b5581f00', 'user_name': u'admin', 'read_deleted': 'no', 'user': u'91d732b65831491d8bd952b3111e62dd'}\n extra = {'action': 'xdrs:get_algorithms', 'exc': <class 'xdrs.exception.PolicyNotAuthorized'>}\n ======================================================================================\n \"\"\"\n return policy.check(action, target, credentials, **extra)", "def repair(self):\n # self.add_cons_vars([x.constraint for x in self._cons_dict.values()])\n # self.add_cons_vars([x.variable for x in self._var_dict.values()])\n self._push_queue()\n Model.repair(self)\n self.regenerate_constraints()\n self.regenerate_variables()", "def arcConsistency(self, constraint):\n # start out assuming the constraint is satisfied\n satisfied = True\n # if the tail is assigned then we don't need to do anything\n if (constraint.tail.value != \"none\"):\n # the arc is consistent\n return satisfied\n # if the head is assigned a value then we compare the tail domain to the assigned value\n if (constraint.head.value != \"none\"):\n # make a copy of the tail domain to loop through\n tailDomain = constraint.tail.domain[:]\n # loop through all values in the tail domain\n for tailValue in tailDomain:\n # if this value doesn't satisfy the constraint then remove the value from the domain\n if (not constraint.satisfied(tailValue, constraint.head.value)):\n # record that the constraint wasn't satisfied\n satisfied = False\n # remove the value from the domain\n constraint.tail.domain.remove(tailValue)\n # return whether or not the constraint was satisfied\n return satisfied\n # if the head is not assigned a value then we compare the tail domain to each value in the head domain\n # start assuming the tail domain has not been modified\n domainModified = False\n # make a copy of the tail domain to loop through\n tailDomain = constraint.tail.domain[:]\n # loop through all values in the tail domain\n for tailValue in tailDomain:\n # start out assuming the constraint is not satisfied\n satisfied = False\n # loop through all values in the head domain\n for headValue in constraint.head.domain:\n # does this value satisfy the constraint\n if (constraint.satisfied(tailValue, headValue)):\n # record that the constraint wasn't satisfied\n satisfied = True\n # if we didn't find a value in the head that works with the tail value\n if (not satisfied):\n # remove the tail value from the domain\n constraint.tail.domain.remove(tailValue)\n # mark that we removed something from the tail domain\n domainModified = True\n # return whether or not the constraint was satisfied\n return (not domainModified)", "def backtrack(csp):\n\n if len(csp.assignment) == len(csp.variables):\n return True\n\n variable = select_unassigned_variable(csp)\n value = order_domain_values(csp, variable)\n #print variable\n #print value\n flag = 0\n for x in value:\n csp.variables.begin_transaction()\n if is_consistent(csp, variable, x):\n #print \"past is_consistent\"\n for var in csp.variables:\n if var == variable:\n var.assign(x)\n var.is_assigned()\n solution = backtrack(csp)\n if solution != False:\n return True\n csp.variables.rollback()\n return False", "def test_load_additive_constraints():\n solution_repo = SolutionRepository(\n os.path.join(os.path.dirname(__file__), \"requests_security_solution.txt\")\n )\n constraints = solution_repo.solution[\"idna\"].build_constraints()\n assert constraints == pkg_resources.Requirement.parse(\"idna<2.9,>=2.5\")", "def sodoku_CSP(eg):\n\n constraints = create_constraints();\n\n return CSP(\n {(x,y):(digits if eg[y][x]==0 else [eg[y][x]])\n for x in range(9) for y in range(9)},\n constraints)", "def main():\r\n # Instantiate the data problem.\r\n data = create_data_model()\r\n\r\n # Create the routing index manager.\r\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']), data['num_vehicles'], data['depot'])\r\n\r\n # Create Routing Model.\r\n routing = pywrapcp.RoutingModel(manager)\r\n\r\n\r\n # Create and register a transit callback.\r\n def distance_callback(from_index, to_index):\r\n \"\"\"Returns the distance between the two nodes.\"\"\"\r\n # Convert from routing variable Index to distance matrix NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n to_node = manager.IndexToNode(to_index)\r\n return data['distance_matrix'][from_node][to_node]\r\n\r\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\r\n\r\n # Define cost of each arc.\r\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\r\n\r\n\r\n # Add Capacity constraint.\r\n def demand_callback(from_index):\r\n \"\"\"Returns the demand of the node.\"\"\"\r\n # Convert from routing variable Index to demands NodeIndex.\r\n from_node = manager.IndexToNode(from_index)\r\n return data['demands'][from_node]\r\n\r\n demand_callback_index = routing.RegisterUnaryTransitCallback(\r\n demand_callback)\r\n routing.AddDimensionWithVehicleCapacity(\r\n demand_callback_index,\r\n 0, # null capacity slack\r\n data['vehicle_capacities'], # vehicle maximum capacities\r\n True, # start cumul to zero\r\n 'Capacity')\r\n\r\n # Setting first solution heuristic.\r\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\r\n search_parameters.first_solution_strategy = (\r\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\r\n\r\n\r\n # Solve the problem.\r\n assignment = routing.SolveWithParameters(search_parameters)\r\n\r\n # Print solution on console.\r\n if assignment:\r\n print_solution(data, manager, routing, assignment)", "def graphConsistency(self, feature):\n # get a list of all constraints in which feature appears in the head\n headConstraints = self.getHeadConstraints(feature.name)\n # make a copy of the constraints list - we will treat this like a stack\n constraintList = headConstraints[:]\n # loop through all the constraints\n while len(constraintList) > 0:\n if (len(constraintList) % 100 == 0):\n print \"\\tconsistency checking constraints = \" + str(len(constraintList))\n # grab a constraint off the stack\n constraint = constraintList.pop()\n # check the constraint for arc consistency\n consistent = self.arcConsistency(constraint)\n # if we removed all the values from the domain of the tail then we need to backtrack\n if (len(constraint.tail.domain) == 0):\n return False\n # if the arc wasn't consistent then we need to add back all the constraints\n # with a head equal to the tail of the changed constraint to the queue\n constraintsAdded = 0\n if (not consistent):\n # get a list of constraints where the tail feature we just changed appears as\n # the head\n reCheckConstraints = self.getHeadConstraints(constraint.tail.name)\n # go through the list, add back all constraints that are not already in the stack\n for c in reCheckConstraints:\n # if the constraint is not already in the stack\n if not c in constraintList:\n # put it at the bottom of the stack\n constraintList.insert(0, c)\n constraintsAdded += 1\n print \"\\t\\tNumber of constraints added: \" + str(constraintsAdded)\n return True", "def main(supply):\n\n # Define four parallel arrays: start_nodes, end_nodes, capacities, and unit costs\n # between each pair. For instance, the arc from node 0 to node 1 has a\n # capacity of 15 and a unit cost of 4.\n\n start_nodes = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 8, 8, 8, 8, 8, 9, 9, 9]\n end_nodes = [8, 2, 4, 6, 5, 4, 7, 6, 9, 8, 9, 0, 3, 4, 2, 5, 1, 0, 2, 5, 1, 8, 3, 4, 1, 0, 8, 1, 1, 0, 9, 5, 6, 1, 8, 2]\n capacities = [23, 10, 25, 15, 17, 14, 10, 21, 17, 11, 22, 27, 14, 6, 19, 9, 11, 8, 29, 16, 22, 29, 20, 13, 18, 14, 20, 25, 13, 8, 10, 24, 5, 9, 20, 28]\n unit_costs = [6, 9, 7, 8, 8, 5, 8, 5, 6, 9, 6, 5, 6, 6, 9, 7, 8, 6, 9, 6, 5, 5, 8, 7, 5, 8, 7, 9, 7, 6, 9, 6, 5, 5, 6, 7]\n\n # Define an array of supplies at each node.\n supplies = supply\n\n\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n\n # Add each arc.\n for i in range(0, len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i], end_nodes[i],\n capacities[i], unit_costs[i])\n\n # Add node supplies.\n\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n\n\n # Find the minimum cost flow between node 0 and node 4.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n flag = 1\n optimal_flows = np.zeros(36)\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i) * min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # save answer to the variable\n optimal_flows[i] = min_cost_flow.Flow(i)\n return flag, optimal_flows\n else:\n print('There was an issue with the min cost flow input.')\n flag = 0\n return flag, 0", "def _update_tarsqidoc(self, cp):\n self.tarsqidoc.remove_tlinks()\n for n1, rest in cp.graph.edges.items():\n for n2, edge in cp.graph.edges[n1].items():\n if edge.constraint is not None:\n if edge.constraint.has_simple_relation():\n self._add_constraint_to_tarsqidoc(edge)", "def aga_contract_graph(adata, min_group_size=0.01, max_n_contractions=1000, copy=False):\n adata = adata.copy() if copy else adata\n if 'aga_adjacency_tree_confidence' not in adata.add: raise ValueError('run tool aga first!')\n min_group_size = min_group_size if min_group_size >= 1 else int(min_group_size * adata.n_smps)\n logg.info('contract graph using `min_group_size={}`'.format(min_group_size))\n\n def propose_nodes_to_contract(adjacency_tree_confidence, node_groups):\n # nodes with two edges\n n_edges_per_seg = np.sum(adjacency_tree_confidence > 0, axis=1).A1\n for i in range(adjacency_tree_confidence.shape[0]):\n if n_edges_per_seg[i] == 2:\n neighbors = adjacency_tree_confidence[i].nonzero()[1]\n for neighbors_edges in range(1, 20):\n for n_cnt, n in enumerate(neighbors):\n if n_edges_per_seg[n] == neighbors_edges:\n logg.msg('merging node {} into {} (two edges)'\n .format(i, n), v=4)\n return i, n\n # node groups with a very small cell number\n for i in range(adjacency_tree_confidence.shape[0]):\n if node_groups[str(i) == node_groups].size < min_group_size:\n neighbors = adjacency_tree_confidence[i].nonzero()[1]\n neighbor_sizes = [node_groups[str(n) == node_groups].size for n in neighbors]\n n = neighbors[np.argmax(neighbor_sizes)]\n logg.msg('merging node {} into {} '\n '(smaller than `min_group_size` = {})'\n .format(i, n, min_group_size), v=4)\n return i, n\n return 0, 0\n\n def contract_nodes(adjacency_tree_confidence, node_groups):\n for count in range(max_n_contractions):\n i, n = propose_nodes_to_contract(adjacency_tree_confidence, node_groups)\n if i != 0 or n != 0:\n G = nx.Graph(adjacency_tree_confidence)\n G_contracted = nx.contracted_nodes(G, n, i, self_loops=False)\n adjacency_tree_confidence = nx.to_scipy_sparse_matrix(G_contracted)\n node_groups[str(i) == node_groups] = str(n)\n for j in range(i+1, G.size()+1):\n node_groups[str(j) == node_groups] = str(j-1)\n else:\n break\n return adjacency_tree_confidence, node_groups\n\n size_before = adata.add['aga_adjacency_tree_confidence'].shape[0]\n adata.add['aga_adjacency_tree_confidence'], adata.smp['aga_groups'] = contract_nodes(\n adata.add['aga_adjacency_tree_confidence'], adata.smp['aga_groups'])\n adata.add['aga_groups_order'] = np.unique(adata.smp['aga_groups'])\n for key in ['aga_adjacency_full_confidence', 'aga_groups_original',\n 'aga_groups_order_original', 'aga_groups_colors_original']:\n if key in adata.add: del adata.add[key]\n logg.info(' contracted graph from {} to {} nodes'\n .format(size_before, adata.add['aga_adjacency_tree_confidence'].shape[0]))\n logg.msg('removed adata.add[\"aga_adjacency_full_confidence\"]', v=4)\n return adata if copy else None", "def prove_CP() -> Proof:\n # Optional Task 6.7d", "def isValidCompatible(cls,root):\n valid = True\n # the order of node types in chains is restricted\n # (this would be easier if the data was in a Corpus-instance)\n allowed = NX.XDiGraph(selfloops=True)\n\n # continue from here!\n allowed.add_edge('Physical','Physical')\n allowed.add_edge('Property','Physical')\n allowed.add_edge('Process','Physical')\n allowed.add_edge('Regulation','Physical')\n\n allowed.add_edge('Property','Property')\n allowed.add_edge('Process','Property')\n allowed.add_edge('Regulation','Property')\n\n allowed.add_edge('Property','Process')\n# allowed.add_edge('Process','Process')\n allowed.add_edge('Regulation','Process')\n\n allowed.add_edge('Property','Regulation')\n# allowed.add_edge('Process','Regulation')\n allowed.add_edge('Regulation','Regulation')\n\n mapping = {}\n for a in root.find(\"ontologies\").findall(\"ontology\"):\n if a.attrib['id']=='interaction':\n for x in a.getiterator(\"ontnode\"):\n if x.attrib.has_key('effect') and x.attrib['effect'].endswith('regulation'):\n t = 'Regulation'\n else:\n t = x.attrib['onttype']\n mapping[x.attrib['id']] = t\n \n for a in root.getiterator(\"relannotation\"):\n t2type = dict( [(x.attrib['id'],x.attrib['type'])\n for x in a.findall(\"reltoken\")] )\n n2t = dict( [(x.attrib['id'],x.attrib['token'])\n for x in a.findall(\"relnode\")] )\n for x in a.findall(\"reledge\"):\n bt = t2type[n2t[x.attrib['bgn']]]\n et = t2type[n2t[x.attrib['end']]]\n bgn = mapping[bt]\n end = mapping[et]\n if not allowed.has_edge(bgn,end):\n printError(cls,inspect.stack()[1][3],\n \"%s -- %s (%s) -> %s (%s) is not a valid edge\"%\n (x.attrib['id'].split('.')[1],bgn,bt,end,et))\n valid = False\n \n return(valid)", "def solve(num_wizards, num_constraints, wizards, constraints): \n global wiz_const\n wiz_const = mapConstraints(wizards, constraints)\n partial_soltns = []\n\n # counter for priority queue since it doesn't allow \n # identical priorities\n k = 0\n\n # list of wizards sorted by lowest to highest degree\n sorted_wiz = sortWizByConsts(wiz_const)\n wiz_rankings = {wiz: i for i, wiz in enumerate(sorted_wiz)}\n\n const_set = set(map(tuple, constraints))\n for i in range(4) : \n heapq.heappush(partial_soltns, (0, k, nx.DiGraph(), const_set.copy()))\n k += 1\n\n print(\"setup done, commencing solving\")\n\n while len(partial_soltns) : \n\n # for partial_soltn, const_set in partial_soltns : \n# partial_soltns.remove(partial_soltn)\n num_seen, _, partial_soltn, const_set = heapq.heappop(partial_soltns)\n const = findNextConst(partial_soltn, const_set, wiz_rankings)\n print(\"seen \" + str(len(partial_soltn)) + \"\\t num partial_solutions\\t\" + str(len(partial_soltns)))\n try : \n const_set.remove(const)\n except KeyError : \n print(\"BAD SHIT\")\n pass\n possible_arrangements = [(const[0], const[1], const[2]),\n (const[2], const[0], const[1]), \n (const[2], const[1], const[0]),\n (const[1], const[0], const[2])]\n for arr in possible_arrangements:\n soltn = partial_soltn.copy()\n a, b, c = arr\n if not (soltn.has_node(a) and soltn.has_node(b) and nx.has_path(soltn, a, b)) : \n soltn.add_edge(a, b)\n if not (soltn.has_node(b) and soltn.has_node(c) and nx.has_path(soltn, b, c)) : \n soltn.add_edge(b, c)\n # see if we violated any other constraints (seen or not seen)\n is_valid, num_wiz = validNumWiz(soltn, const_set)\n\n if is_valid and len(list(nx.simple_cycles(soltn))) == 0 :\n heapq.heappush(partial_soltns, (-len(soltn), k, soltn, const_set.copy()))\n k += 1\n # are we done?\n if num_wiz == num_wizards :\n print(\"FINAL SOLUTION (found without processing all constraints but validating against them)\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n if foundCompleteOrdering(heapq.heappop(partial_soltns)) : \n print(\"FINAL SOLUTION\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n print(\"NO SOLUTION FOUND\")\n return \"\"", "def verify_pln(self):\n\n warnings = []\n\n self._populate_uncertainties()\n\n # The transitref and transiturl actually end up stored in the 'transit'\n # ExoParam due to the ref and url splits. Pull these out and set the\n # transit entries to the proper pointers.\n if self.transit.value == 1:\n if is_empty(self.transit.reference):\n self.transit.reference = \"__TRANSITREF\"\n if is_empty(self.transit.url):\n self.transit.url = \"__TRANSITURL\"\n\n # If the transit depth is not provided, but an Rp/R* ratio is,\n # calculate the depth value.\n if is_empty(self.depth.value) and is_valid(self.rr.value):\n self.depth.value = self.rr.value ** 2\n if isinstance(self.rr.uncertainty, Decimal):\n self.depth.uncertainty = self.rr.uncertainty * 2\n if isinstance(self.rr.uncertainty_upper, Decimal):\n self.depth.uncertainty_upper = self.rr.uncertainty_upper * 2\n self.depth.reference = \"Calculated from Rp/R*\"\n self.depth.url = self.rr.reference\n\n # If the orbital eccentricity value is 0 and a TT value is provided,\n # use the same values for T0 as well.\n if self.ecc.value == Decimal(0) and is_empty(self.om.value):\n self.om.value = Decimal(90)\n self.om.reference = \"Set to 90 deg with ecc~0\"\n print(\"set omega to 90\")\n if is_valid(self.tt.value):\n print(\"copying TT to T0\")\n self.t0.copy_values(self.tt)\n # OM may already be set to 90.\n elif self.ecc.value == 0 and self.om.value == 90:\n if str(self.tt.value) != \"NaN\":\n print(\"copying TT to T0\")\n self.t0.copy_values(self.tt)\n\n # Set the FREEZE_ECC flag if ECC=0 and no uncertainty is provided.\n if self.ecc.value == 0 and is_empty(self.ecc.uncertainty):\n self.freeze_ecc.value = 1\n\n # Set the MULT flag if NCOMP is more than 1 planet.\n if self.ncomp.value > 1:\n self.mult.value = 1\n\n # Set the TREND flag if a DVDT value is provided.\n if not is_empty(self.dvdt.value):\n self.trend.value = 1\n\n # Exclude planets with period uncertainty >10%.\n self.per.check_constrained(0.1)\n if not self.per.well_constrained:\n self.exclude()\n warnings.append(\"<uncertain PER>\")\n\n # Warn of planets with K speeds <2 m/s.\n if is_valid(self.k.value):\n if self.k.value < 2:\n # self.exclude()\n warnings.append(\"<low K value>\")\n\n # Make sure RA string uses spaces.\n if not is_empty(self.ra_string.value):\n if \"h\" in self.ra_string.value:\n new_value = self.ra_string.value.replace(\"h\", \" \")\n new_value = new_value.replace(\"m\", \" \")\n new_value = new_value.replace(\"s\", \"\")\n self.ra_string.value = new_value\n\n # Make sure DEC string uses spaces.\n if not is_empty(self.dec_string.value):\n if \"d\" in self.dec_string.value:\n new_value = self.dec_string.value.replace(\"d\", \" \")\n new_value = new_value.replace(\"m\", \" \")\n new_value = new_value.replace(\"s\", \"\")\n self.dec_string.value = new_value\n\n # Display warnings generated by final adjustments.\n if len(warnings) > 0:\n print(\"<<<{0} GOT {1} WARNING(S)>>>\".format(self.name.value,\n len(warnings)\n )\n )\n [print(x) for x in warnings]", "def enforce_constraints(self):\n self.session.flush()\n try:\n self.session.execute('SET CONSTRAINTS ALL IMMEDIATE')\n except DatabaseError:\n handle_sqlalchemy_database_error()", "def build_proof(self):\n if not self.root.is_atomic():\n l = apply_rule(self.root)\n self.left_subtree = ProofTree(l[0])\n if l[1] != None:\n self.right_subtree = ProofTree(l[1]) \n if self.left_subtree is not None:\n self.rule = l[2]\n left_tree= ProofTree(self.left_subtree.build_proof())\n if self.right_subtree is not None:\n self.rule = l[2]\n right_tree = ProofTree(self.right_subtree.build_proof())", "def check_constraints ( A, S, complete ) :\n\t\n\tok = True\n\t\n\tfor i in range(len(complete)) :\n\t\tif complete[i] :\n\t\t\tif not (dot(A[i],S) == 0) :\n\t\t\t\tok = False\n\t\t\t\tprint '\\n'\n\t\t\t\tprint '*** warning *** constraint %d not verified' % (i)\n\t\t\t\tvars_inds = (where(abs(A[i]) == 1))[0]\n\t\t\t\tprint 'variables involved:', vars_inds\n\t\t\t\tprint 'displacements:', S[vars_inds]\n\t\t\t\tprint\n\t\t\t\t#programPause = raw_input(\"Press the <ENTER> key to continue...\")\n\t\t\t\t\n\treturn ok", "def test_add_strict_node_to_non_strict_node(self):\n non_strict_node = self.cluster.master\n strict_node = self.cluster.servers[self.nodes_init:self.nodes_init + 1][0]\n self.enable_tls_encryption_cli_on_nodes \\\n (nodes=self.cluster.servers[self.nodes_init:self.nodes_init + 1])\n CbServer.use_https = True\n RestConnection(non_strict_node).add_node(user='Administrator', password='password',\n port=CbServer.ssl_port,\n remoteIp=strict_node.ip)\n CbServer.use_https = False\n rest = RestConnection(non_strict_node)\n nodes = rest.node_statuses()\n rest.rebalance(otpNodes=[node.id for node in nodes],\n ejectedNodes=[])\n result = rest.monitorRebalance()\n self.assertTrue(result, \"Rebalance failed\")", "def remaining_constraints(self):\r\n \r\n def iec1(state,decision,nodes):\r\n return decision['E:L']+decision['E:R_1']<=nodes['E'].get_preds_value(state)\r\n def iec2(state,decision,nodes):\r\n return decision['R_1:L']<=nodes['R_1'].get_preds_value(state)\r\n def iec3(state,decision,nodes):\r\n return decision['G:R_1']>=-(nodes['R_1'].get_preds_value(state)) \r\n def iec4(state,decision,nodes):\r\n return decision['G:L']>=0.0\r\n def iec5(state,decision,nodes):\r\n return decision['E:L']>=0.0\r\n def iec6(state,decision,nodes):\r\n return decision['E:R_1']>=0.0\r\n def iec7(state,decision,nodes):\r\n return decision['R_1:L']>=0.0\r\n\r\n Inequality_Constraints=[iec1,iec2,iec3,iec4,iec5,iec6,iec7]\r\n \r\n return Inequality_Constraints", "def revise(self, verbose=0):\n if verbose:\n print '** Consistency **'\n\n _queue = [ (constr.estimateCost(self._domains),\n constr) for constr in self._constraints ]\n _queue.sort()\n _affected_constraints = {}\n while True:\n if not _queue:\n # refill the queue if some constraints have been affected\n _queue = [(constr.estimateCost(self._domains),\n constr) for constr in _affected_constraints]\n if not _queue:\n break\n _queue.sort()\n _affected_constraints.clear()\n if verbose > 2:\n print 'Queue', _queue\n cost, constraint = _queue.pop(0)\n if verbose > 1:\n print 'Trying to entail constraint',\n print constraint, '[cost:%d]' % cost\n entailed = constraint.narrow(self._domains)\n for var in constraint.affectedVariables():\n # affected constraints are listeners of\n # affected variables of this constraint\n dom = self._domains[var]\n if not dom.has_changed():\n continue\n if verbose > 1 :\n print ' -> New domain for variable', var, 'is', dom\n for constr in self._variableListeners[var]:\n if constr is not constraint:\n _affected_constraints[constr] = True\n dom.clear_change()\n if entailed:\n if verbose:\n print \"--> Entailed constraint\", constraint\n self._removeConstraint(constraint)\n if constraint in _affected_constraints:\n del _affected_constraints[constraint]\n \n for domain in self._domains.itervalues():\n if domain.size() != 1:\n return 0\n return 1", "def constraint_for(dist=None, param=None):\n\n constraints = {\n 'atol':\n tfb.Softplus(),\n 'rtol':\n tfb.Softplus(),\n 'concentration':\n tfb.Softplus(),\n 'GeneralizedPareto.concentration': # Permits +ve and -ve concentrations.\n lambda x: tf.math.tanh(x) * 0.24,\n 'concentration0':\n tfb.Softplus(),\n 'concentration1':\n tfb.Softplus(),\n 'df':\n tfb.Softplus(),\n 'InverseGaussian.loc':\n tfb.Softplus(),\n 'JohnsonSU.tailweight':\n tfb.Softplus(),\n 'PowerSpherical.mean_direction':\n lambda x: tf.math.l2_normalize(tf.math.sigmoid(x) + 1e-6, -1),\n 'ContinuousBernoulli.probs':\n tfb.Sigmoid(),\n 'Geometric.logits': # TODO(b/128410109): re-enable down to -50\n # Capping at 15. so that probability is less than 1, and entropy is\n # defined. b/147394924\n lambda x: tf.minimum(tf.maximum(x, -16.), 15.\n ), # works around the bug\n 'Geometric.probs':\n constrain_between_eps_and_one_minus_eps(),\n 'Binomial.probs':\n tfb.Sigmoid(),\n 'NegativeBinomial.probs':\n tfb.Sigmoid(),\n 'Bernoulli.probs':\n tfb.Sigmoid(),\n 'PlackettLuce.scores':\n tfb.Softplus(),\n 'ProbitBernoulli.probs':\n tfb.Sigmoid(),\n 'RelaxedBernoulli.probs':\n tfb.Sigmoid(),\n 'cutpoints': # Permit values that aren't too large\n lambda x: tfb.Ascending().forward(10. * tf.math.tanh(x)),\n 'log_rate':\n lambda x: tf.maximum(x, -16.),\n 'mixing_concentration':\n tfb.Softplus(),\n 'mixing_rate':\n tfb.Softplus(),\n 'rate':\n tfb.Softplus(),\n 'scale':\n tfb.Softplus(),\n 'scale_diag':\n tfb.Softplus(),\n 'scale_identity_multiplier':\n tfb.Softplus(),\n 'tailweight':\n tfb.Softplus(),\n 'temperature':\n tfb.Softplus(),\n 'total_count':\n lambda x: tf.floor(tfb.Sigmoid()(x / 100.) * 100.) + 1.,\n 'Bernoulli':\n lambda d: dict(d, dtype=tf.float32),\n 'CholeskyLKJ':\n fix_lkj,\n 'LKJ':\n fix_lkj,\n 'Zipf':\n lambda d: dict(d, dtype=tf.float32),\n 'GeneralizedNormal.power':\n tfb.Softplus(),\n }\n\n if param is not None:\n return constraints.get('{}.{}'.format(dist, param),\n constraints.get(param, tfb.Identity()))\n return constraints.get(dist, tfb.Identity())", "def condition_domain_reduction(csp, var) :\n return True", "def condition_domain_reduction(csp, var) :\n return True", "def test_4():\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_4).construct_hierarchy()\n asc = h.atom_selection_cache()\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n assert len(ncs_groups) == 1\n # group 1\n assert ncs_groups[0].master_iselection.all_eq(\n asc.selection(string = \"chain A\").iselection())\n g1_c = ncs_groups[0].copies\n assert len(g1_c)==1\n assert g1_c[0].iselection.all_eq(\n asc.selection(string = \"chain B\").iselection())", "def validate(self):\n for rosdep_key in self:\n # Ensure all dependencies have definitions\n # i.e.: Ensure we aren't pointing to invalid rosdep keys\n for dependency in self[rosdep_key]['dependencies']:\n if dependency not in self:\n raise KeyError(\n 'Invalid Graph Structure: rosdep key `%s` does not exist in the dictionary of resolutions.'\n % dependency)\n self[dependency]['is_root'] = False\n # Check each entry for cyclical dependencies\n for rosdep_key in self:\n self.detect_cycles(rosdep_key, [])", "def enforce_node_consistency(self):\n # print(\"Entered enforce_node_consistency Function\")\n # print(\"self.domains\")\n # print(self.domains)\n for mystery in self.domains:\n # print(\"!!!!!!!!!!!!\")\n # print(mystery)\n # print(self.domains[mystery])\n keep_list = set()\n while self.domains[mystery]:\n word = self.domains[mystery].pop()\n if(len(word) == mystery.length):\n keep_list.add(word)\n for word in keep_list:\n self.domains[mystery].add(word)\n # print(self.domains[mystery])\n\n # raise NotImplementedError", "def consistency(node, sequence, orientation, overlap):\n from_id, to_id = node\n from_sequence, to_sequence = sequence\n from_orn, to_orn = orientation\n if from_orn == '-':\n from_sequence = reverse_and_complement(from_sequence)\n if to_orn == '-':\n to_sequence = reverse_and_complement(to_sequence)\n size_overlap = real_overlap(from_sequence, to_sequence)\n if not size_overlap == overlap:\n GRAPH_LOGGER.debug('Edge between node %s and %s have \\\n \tno consistency between CIGAR overlap end \"real\" overlap', from_id, to_id)\n return False\n\n return True", "def propagate(enqueue_condition_fn, csp, queue=None) :\n if (queue==None):\n queue = csp.get_all_variables()\n dequeued = []\n while len(queue)!=0:\n removedVar = queue[0]\n dequeued.append(removedVar)\n queue = queue[1:]\n for constraint in csp.constraints_between(removedVar,None)[:]:\n var2 = constraint.var2\n val2 = csp.get_assigned_value(var2)\n var2Domain = csp.get_domain(var2)[:]\n removedDomain = csp.get_domain(removedVar)[:]\n if len(removedDomain)==0 or len(var2Domain)==0:\n return None\n for domainVal2 in var2Domain:\n anyNonViolators = False\n for domainVal in removedDomain:\n check = constraint.check(domainVal,domainVal2)\n if check==True:\n anyNonViolators = True\n continue\n if anyNonViolators==False:\n csp.eliminate(var2, domainVal2)\n if len(csp.get_domain(var2))==0:\n return None\n if var2 not in queue and enqueue_condition_fn(csp,var2):\n queue.append(var2)\n return dequeued", "def check_valid_request_ca(self):\n\n self.check_valid_request_common()\n\n alg = self.get_POW().getSignatureAlgorithm()\n bc = self.get_POW().getBasicConstraints()\n eku = self.get_POW().getEKU()\n sia = self.get_POW().getSIA()\n\n if alg != rpki.oids.sha256WithRSAEncryption:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 has bad signature algorithm for CA: %s\" % alg)\n\n if bc is None or not bc[0] or bc[1] is not None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA bad basicConstraints\")\n\n if eku is not None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA EKU not allowed\")\n\n if sia is None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA missing\")\n\n caRepository, rpkiManifest, signedObject, rpkiNotify = sia\n\n logger.debug(\"check_valid_request_ca(): sia: %r\", sia)\n\n if signedObject:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must not have id-ad-signedObject\")\n\n if not caRepository:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must have id-ad-caRepository\")\n\n if not any(uri.startswith(\"rsync://\") for uri in caRepository):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-caRepository contains no rsync URIs\")\n\n if any(uri.startswith(\"rsync://\") and not uri.endswith(\"/\") for uri in caRepository):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-caRepository does not end with slash\")\n\n if not rpkiManifest:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must have id-ad-rpkiManifest\")\n\n if not any(uri.startswith(\"rsync://\") for uri in rpkiManifest):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiManifest contains no rsync URIs\")\n\n if any(uri.startswith(\"rsync://\") and uri.endswith(\"/\") for uri in rpkiManifest):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiManifest ends with slash\")\n\n if any(not uri.startswith(\"http://\") and not uri.startswith(\"https://\") for uri in rpkiNotify):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiNotify neither HTTP nor HTTPS\")", "def backtracking_search(self):\n # Make a so-called \"deep copy\" of the dictionary containing the\n # domains of the CSP variables. The deep copy is required to\n # ensure that any changes made to 'assignment' does not have any\n # side effects elsewhere.\n assignment = copy.deepcopy(self.domains)\n\n # Run AC-3 on all constraints in the CSP, to weed out all of the\n # values that are not arc-consistent to begin with\n self.inference(assignment, self.get_all_arcs())\n # Call backtrack with the partial assignment 'assignment'\n\n return self.backtrack(assignment)", "def test_3():\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_3).construct_hierarchy()\n asc = h.atom_selection_cache()\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n assert len(ncs_groups) == 1\n # group 1\n assert ncs_groups[0].master_iselection.all_eq(\n asc.selection(string = \"chain A\").iselection())\n g1_c = ncs_groups[0].copies\n assert len(g1_c)==1\n assert g1_c[0].iselection.all_eq(\n asc.selection(string = \"chain B\").iselection())", "def check_deterministic_constraints(self, x):\n return True", "def check_deterministic_constraints(self, x):\n return True", "def CompilationRelaxations(self) -> int:", "def contractor(self, *args, **kwargs):\n vertices = copy.deepcopy(args[0])\n nrange = len(vertices[0])\n xpts = []\n ypts = []\n for i in range(nrange):\n xpts.append(vertices[0][i].value)\n ypts.append(vertices[1][i].value)\n constraint = copy.deepcopy(args[1])\n \n \n \n \n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n \n ## the all important computation split (need to abstract this kind of thing)\n ##lhs = (np.sqrt(qxdot*qxdot + qydot*qydot)**3.) *constraint\n lhs = ( ( np.sqrt(qxdot**2 + qydot**2) )**3 )*constraint\n \n # check2 = qxdot*qyddot\n # if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n # t1 = (lhs - check2)/qydot\n \n #\n # qyddot\n #\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qxdot.contains(0.) and abs(qxdot.min.value)>1.e-6:\n # print 'qxdot = ',qxdot\n # print 'qxdot not invertable, implement other logic please'\n if abs(float(qxdot.inf))<1.e-6:\n qxdot.inf = 1.e-10\n print 'invert qxdot'\n print 'qxdot = ', qxdot\n \n #t1 = (lhs + qydot*qxddot)/(qxdot)\n t1 = (lhs + check2)/(qxdot)\n \n t1 = t1 & qyddot # go ahead and shrink t1 to qyddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n min_ans = (ypts[j]*float(self.localBasis[2,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[2,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if new_ans[i].isempty == False: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 1'\n \n ## \n ## qxdot\n ##\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qyddot.contains(0.):\n # print 'qyddot = ',qyddot\n # print 'qyddot not invertable, implement other logic please'\n \n if qyddot.contains(0.) and qyddot.width()<1.e-6:\n qxdot.inf = 0.#1.e-10\n print 'invert qyddot'\n print 'qyddot = ',qyddot\n fix = (lhs + check2)*(1./qyddot)#*(qyddot**-1.)\n fix = fix & qxdot # go ahead and shrink fix to qxdot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n \n for i in range(len(xpts)): #contract on x[i]\n min_ans = 0.\n for j in range(len(xpts)): # add up all jth pieces of the dot product except i\n if j==i:\n pass\n else:\n \n min_ans = (xpts[j]*float(self.localBasis[1,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[1,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 2'\n \n \n ## switch to the other side\n \n ##\n ## contract on qydot\n ##\n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n# if qxddot.contains(0.):\n# print 'qxddot = ',qxddot\n# print 'qxddot not invertable, implement other logic please'\n# qxddot.min.value = 0.\n if qxddot.contains(0.):\n qxddot.inf = 0.\n \n print 'invert qxddot'\n print 'qxddot = ',qxddot\n t1 = (lhs - check2)/(-qxddot)#*(-qxddot**-1)\n t1 = t1 & qydot\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n #print 't1 = ',t1\n #print 'ypts[{}] = {}'.format(i,ypts[i])\n #print 'localbasis[{},{}] = {}'.format(1,i,self.localBasis[1,j])\n min_ans = (ypts[j]*float(self.localBasis[1,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[1,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 3'\n \n ##contract on qxdot\n \n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #contract on qxddot\n# if qydot.contains(0.):\n# print 'qydot = ',qxddot\n# print 'qydot not invertable, implement other logic please'\n if qydot.contains(0.):\n qydot.inf = 0.\n print 'invert qydot'\n print 'qydot = ',qydot\n fix = (lhs - qxdot*qyddot)/(-qydot)#*(-qydot**-1)\n fix = fix & qxddot # go ahead and shrink t1 to quddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(xpts)):\n min_ans = 0.\n for j in range(len(xpts)):\n if j==i:\n pass\n else:\n min_ans = (xpts[j]*float(self.localBasis[2,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[2,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 4'\n \n for i in range(nrange):\n vertices[0][i].value = xpts[i]\n vertices[1][i].value = ypts[i]\n return vertices", "def _enforce(self, req, action):\n try:\n self.policy.enforce(req.context, action, {})\n except exception.Forbidden:\n raise HTTPForbidden()", "def P3C(stn, ordering): #UNTESTED!\n if DirectedPathConsistency.convert_to_DPC(stn, ordering):\n for k in range(stn.length):\n x_k = ordering[k]\n for i in range(k):\n x_i = ordering[i]\n for j in range(k):\n x_j = ordering[j]\n if x_j in stn.successor_edges[x_i]:\n if x_k in stn.successor_edges[x_j] and x_k in stn.successor_edges[x_i]:\n stn.successor_edges[x_i][x_k] = min(stn.successor_edges[x_i][x_k], stn.successor_edges[x_i][x_j] + stn.successor_edges[x_j][x_k])\n if x_j in stn.successor_edges[x_k] and x_i in stn.successor_edges[x_k]:\n stn.successor_edges[x_k][x_j] = min(stn.successor_edges[x_k][x_j], stn.successor_edges[x_k][x_i] + stn.successor_edges[x_i][x_j])\n return stn\n else:\n return False", "def part1c_0():\n xs = exampleInput\n T = submission.computeEdgeMarginals(simpleCRF, xs)\n for t in T:\n grader.requireIsEqual( 1.0, sum(t.values()) )", "def contracted(self):\n res = self.copy()\n # remembering which arcs were contracted in order to reconstruct the\n # paths in the original graph later\n arc_mapping = {e: [e] for e, _ in res.arcs()}\n # contract out degree 1 vertices\n for u in list(res):\n if res.out_degree(u) == 1:\n arc = res.out_arcs(u)[0]\n # mark u's inarcs to know they use the arc to be contracted\n for a in res.in_arcs(u):\n arc_mapping[a].extend(arc_mapping[arc])\n # if u is the source, it has no in-arcs to mark the\n # contraction of this out-arc, so we store it in the out-arcs\n # of its out-neighbor.\n if res.in_degree(u) == 0:\n v = res.out_neighborhood(u)[0][0]\n for a in res.out_arcs(v):\n new_path = list(arc_mapping[arc])\n new_path.extend(arc_mapping[a])\n arc_mapping[a] = new_path\n\n # contract the edge\n res.contract_edge(arc, keep_source=False)\n # contract in degree 1 vertices\n for v in list(res):\n if res.in_degree(v) == 1:\n arc = res.in_arcs(v)[0]\n # mark v's outarcs to know they use the arc to be contracted\n for a in res.out_arcs(v):\n new_path = list(arc_mapping[arc])\n new_path.extend(arc_mapping[a])\n arc_mapping[a] = new_path\n # if u is the sink, it has no out-arcs to mark the contraction\n # of this in-arc, so we store it in the in-arcs of its\n # in-neighbor.\n if res.out_degree(v) == 0:\n u = res.in_neighborhood(v)[0][0]\n for a in res.in_arcs(u):\n arc_mapping[a].extend(arc_mapping)\n\n # print(\"{} has in degree 1 from {}\".format(v,u))\n res.contract_edge(arc, keep_source=True)\n return res, arc_mapping", "def inference(self, assignment, queue):\n # Do this as long as there is elements in the queue\n # e.g there is still more arcs to check \n while queue:\n # Pop the first element in the queue\n xi, xj = queue.pop(0)\n # Do the revise check \n if self.revise(assignment, xi, xj):\n # IF zero, CSP has no consistent soluton and AC-3 returns failure \n if len(assignment[xi]) == 0:\n return False\n # If NOT ZERO loop throuh the neighboring arcs of node\n # and append the neighbor and this node to the queue for further checking.\n # We do this so that we keep checking after we do changes and make sure \n # all is gucci gang\n for n in self.get_all_neighboring_arcs(xi):\n if n[0] != xj:\n queue.append((n[0], xi))\n return True", "def find_topo_order(s,graph):\n\n ## initialization\n matrix = graph.get_adjacency()\n n, c = matrix.shape\n sym_matrix = np.empty((n,c), dtype=object)\n # cost_matrix = np.zeros((n,c))\n cache = {}\n\n def symbolize(i,j):\n \"given two indices, create a symbolic variable\"\n s = z.Int('edge_{0}{1}'.format(i,j))\n return s\n\n\n def value_of(i,j):\n \"given two indices, return the (i,j)th value in the adjacency matrix\"\n return sym_matrix[i][j]\n\n\n def constraint_1(n,i,j,k):\n y_ij = value_of(i,j)\n y_jk = value_of(j,k)\n y_ik = value_of(i,k)\n\n name = \"c1\" + str((n,i,j,k))\n constraint = (y_ij + y_jk - y_ik) <= 1\n\n # if name not in cache:\n # cache[name] = constraint\n s.assert_and_track(constraint, name)\n\n\n def constraint_2(n,i,j,k):\n y_ij = value_of(i,j)\n y_jk = value_of(j,k)\n y_ik = value_of(i,k)\n\n name = \"c2\" + str((n,i,j,k))\n constraint = (-y_ij - y_jk + y_ik) <= 0\n\n # if name not in cache:\n # cache[name] = constraint\n s.assert_and_track(constraint, name)\n\n\n def constraint_3(symbolic):\n s.add(z.Or([symbolic == 0, symbolic == 1]))\n\n\n def int_formulation(j):\n left = z.Sum([matrix[k][j] * sym_matrix[k][j] for k in range(j)])\n right = z.Sum([matrix[l][j] * (1 - sym_matrix[j][l]) for l in range(j+1, n)])\n\n return [left, right]\n\n\n ## constraint 3, every edge must be a 0 or a 1, we get the 0 or 1 directly\n ## from the adjacency matrix\n ## we do this first so that the sym_matrix is populated\n for n_iter in range(n):\n for j in range(n_iter+1):\n for i in range(j):\n s_edge = symbolize(i,j)\n sym_matrix[i][j] = s_edge\n constraint_3(s_edge)\n\n ## Iteration for triangle inequalities\n for n_iter in range(n):\n for k in range(n_iter+1):\n for j in range(k):\n for i in range(j):\n constraint_1(n_iter,i,j,k)\n constraint_2(n_iter,i,j,k)\n\n\n ## minimization\n o = z.Optimize()\n y = z.Int('y')\n\n y = z.Sum(u.flatten([int_formulation(j) for j in range(n)]))\n o.minimize(y)\n\n result = []\n\n if s.check() == z.sat:\n result = s.model()\n\n return result", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def solve(self):\n print(\"Attempting to solve problem instance with {} constraints\".format(len(self.constraints)))\n self.formulation.solve(solver='SCS')\n print(self.formulation.status)", "def test_missing_proof(self):\n node, other = self.create_nodes(2)\n node.send_identity(other)\n\n # permit NODE\n authorize = self._mm.create_authorize([(node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"permit\"),\n (node.my_member, self._community.get_meta_message(u\"protected-full-sync-text\"), u\"authorize\")])\n node.give_message(authorize, self._mm)\n\n protected_text = node.create_protected_full_sync_text(\"Protected message\", 42)\n node.store([protected_text])\n\n # OTHER pretends to received the protected message and requests the proof\n node.give_message(other.create_missing_proof(node.my_member, 42), other)\n\n # NODE sends dispersy-authorize to OTHER\n _, authorize = other.receive_message(names=[u\"dispersy-authorize\"]).next()\n\n permission_triplet = (node.my_member.mid, u\"protected-full-sync-text\", u\"permit\")\n authorize_permission_triplets = [(triplet[0].mid, triplet[1].name, triplet[2]) for triplet in authorize.payload.permission_triplets]\n self.assertIn(permission_triplet, authorize_permission_triplets)", "def _check_family(self):\n for (s, (b, c)), (cond, ref) in families.items():\n if s != self.SYMBOL or len(b) != self._.d:\n continue\n vars = tuple(set(sum(map(variables, b + c), ())))\n sols = _solve([SR(l) == r for l, r\n in zip(self._.b[:-1] + self._.c[1:], b + c)],\n vars)\n if any(checkConditions(cond, sol) for sol in sols\n if is_integral(sol)):\n raise InfeasibleError(refs=ref)", "def constraints(self):\n ...", "def ac3(self, arcs=None):\n # print(\"Entered ac3 Function\")\n revise = False\n if arcs is None:\n arcs = set()\n for arc in self.crossword.overlaps:\n arcs.add(arc)\n\n while arcs:\n arc = arcs.pop()\n # print(\"arc\")\n # print(arc)\n revise = self.revise(arc[0], arc[1])\n if revise:\n arcs.update(self.crossword.neighbors(arc[0]))\n if (self.domains[arc[0]] is None):\n return False\n # print(\"revise\")\n # print(revise)\n # print(\"arc\")\n # print(arc)\n # input()\n\n # print(\"\")\n # print(\"\")\n # print(\"arcs\")\n # print(arcs)\n # print(\"\")\n # print(\"\")\n\n return True\n # raise NotImplementedError", "def __init__(self, variables, domains, neighbors, constraints, C):\r\n super().__init__(())\r\n variables = variables or list(domains.keys())\r\n self.variables = variables\r\n self.domains = domains\r\n self.neighbors = neighbors\r\n self.constraints = constraints\r\n self.curr_domains = None\r\n # visited nodes\r\n self.nassigns = 0\r\n self.conflict_set = {} #dictionary which stores the conflict set of each variable for fc - cbj\r\n self.prev_conflict_set = [] # we store the conflict set from the variable that causes dead-end\r\n self.deadend = None # we save the dead end variable in fc - cbj\r\n # initializating the conflict set array\r\n for x in self.variables:\r\n self.conflict_set[x]=[]\r\n # --------------------------\r\n # keep track of total checks for each algo\r\n self.totchecks=0\r\n # dict for later use in dom / wdeg heuristic\r\n # we initializating weights from constraints to 1\r\n self.weight = {}\r\n for each in C.keys():\r\n self.weight[(each[0],each[1])] = 1", "def test_2():\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_2).construct_hierarchy()\n asc = h.atom_selection_cache()\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n assert len(ncs_groups) == 1\n # group 1\n assert ncs_groups[0].master_iselection.all_eq(\n asc.selection(string = \"chain A\").iselection())\n g1_c = ncs_groups[0].copies\n assert len(g1_c)==1\n assert g1_c[0].iselection.all_eq(\n asc.selection(string = \"chain B\").iselection())", "def SSC_test():\n \n # generating data\n\n D = 40 # Dimension of ambient space\n k = 4 # Number of subspaces\n d = 1\n N = 90 # Number of points in subspaces\n # Generating N points in d dim. subspaces\n Xi = [np.random.randn(D, d).dot(np.random.randn(d, N))\n for _ in range(k)]\n X = np.concatenate(Xi, axis=1)\n\n # Generating the ground-truth for evaluating clustering results\n lbls = [i * np.ones([1, N]) for i in range(k)]\n s = np.concatenate(lbls, axis=1)\n r = 0 # Enter the projection dimension e.g. r = d*n, enter r = 0 to not project\n Cst = 0 # Enter 1 to use the additional affine constraint sum(c) == 1\n OptM = 'L1Noise' # OptM can be {'L1Perfect','L1Noise','Lasso','L1ED'}\n lmbda = 0.001 # Regularization parameter in 'Lasso' or the noise level for 'L1Noise'\n # Number of top coefficients to build the similarity graph, enter K=0 for using the whole coefficients\n K = 0 #max(d1, d2)\n if Cst == 1:\n K = d + 1 # For affine subspaces, the number of coefficients to pick is dimension + 1\n\n Xp = DataProjection(X, r, 'NormalProj')\n \n # testing clustering\n \n '''\n # using wrapper function\n Grps, sc = sparseSubspaceClustering(\n Xp.T, n, ground_truth=s, affine=Cst, OptM=OptM, lam=lmbda)\n Grps = BestMap(sc, Grps)\n Missrate = float(np.sum(sc != Grps)) / sc.size\n print(\"\\n\\nMisclassification rate: {:.4f} %\\n\\n\".format(Missrate * 100))\n '''\n \n '''\n # calling internal tools\n CMat = SparseCoefRecovery(Xp, Cst, OptM, lmbda)\n # Make small values 0\n eps = np.finfo(float).eps\n CMat[np.abs(CMat) < eps] = 0\n\n CMatC, sc, OutlierIndx, Fail = OutlierDetection(CMat, s)\n\n if Fail == False:\n CKSym = BuildAdjacency(CMatC, K)\n Grps = SpectralClustering(CKSym, n)\n Grps = BestMap(sc, Grps)\n Missrate = float(np.sum(sc != Grps)) / sc.size\n print(\"\\n\\nMisclassification rate: {:.4f} %\\n\\n\".format(Missrate * 100))\n else:\n print(\"Something failed\")\n '''\n \n labels, _, __ = KSubspaces(Xp.T, k, d)\n labels = BestMap(s, labels)\n Missrate = float(np.sum(s != labels)) / s.size\n print(\"\\n\\nMisclassification rate: {:.4f} %\\n\\n\".format(Missrate * 100))", "def update_graph_compound_costs(self):\n\n # # # Check if all costs are available\n if not self.compound_costs_solved:\n unsolved_cmp = [key for key, _ in self.compound_costs.items()]\n raise RuntimeError(\"The following cmp have no cost assigned:\\n\" + str(unsolved_cmp) +\n \"\\nReconsider the starting conditions.\")\n # # # Reset unique_iterator_list as graph changes\n self._reset_iterator_memory()\n for node in self.compound_costs.keys():\n # # # Loop over all edges of compound and manipulate weight\n for target_node, attributes in self.graph_handler.graph[node].items():\n required_compound_costs = np.asarray([self.compound_costs[k] for k in attributes['required_compounds']])\n tot_required_compound_costs = np.sum(required_compound_costs)\n # # # Set required compound costs in edge\n self.graph_handler.graph.edges[node,\n target_node]['required_compound_costs'] = tot_required_compound_costs\n # # # Add required compound costs to weight\n self.graph_handler.graph.edges[node, target_node]['weight'] += tot_required_compound_costs", "def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True", "def _make_valid(self, covar):\n # eliminate nans and infs (replace them with high values on the\n # diagonal and zeros else)\n bs = compat.get_dim_int(covar, 0) # covar.get_shape()[0]\n dim = compat.get_dim_int(covar, -1) # covar.get_shape()[-1]\n covar = tf.where(tf.math.is_finite(covar), covar,\n tf.eye(dim, batch_shape=[bs])*1e6)\n\n # make symmetric\n covar = (covar + tf.linalg.matrix_transpose(covar)) / 2.\n\n # add a bit of noise to the diagonal of covar to prevent\n # nans in the gradient of the svd\n noise = tf.random.uniform(covar.get_shape().as_list()[:-1], minval=0,\n maxval=0.001/self.scale**2)\n s, u, v = tf.linalg.svd(covar + tf.linalg.diag(noise))\n # test if the matrix is invertible\n invertible = self._is_invertible(s)\n # test if the matrix is positive definite\n pd = tf.reduce_all(tf.greater(s, 0), axis=-1)\n\n # try making a valid version of the covariance matrix by ensuring that\n # the minimum eigenvalue is at least 1e-4/self.scale\n min_eig = s[..., -1:]\n eps = tf.tile(tf.maximum(1e-4/self.scale - min_eig, 0),\n [1, compat.get_dim_int(s, -1)])\n covar_invertible = tf.matmul(u, tf.matmul(tf.linalg.diag(s + eps), v,\n adjoint_b=True))\n\n # if the covariance matrix is valid, leave it as is, else replace with\n # the modified variant\n covar_valid = tf.where(tf.logical_and(invertible, pd)[:, None, None],\n covar, covar_invertible)\n\n # make symmetric again\n covar_valid = \\\n (covar_valid + tf.linalg.matrix_transpose(covar_valid)) / 2.\n\n return covar_valid", "def legalize_graph(gm: pippy.fx.GraphModule) -> pippy.fx.GraphModule:\n indeg = {node: 0 for node in gm.graph.nodes}\n new_graph = pippy.fx.Graph()\n # Track how many unfulfilled dependencies each node has\n for node in gm.graph.nodes:\n for user in node.users:\n indeg[user] += 1\n queue: collections.deque = collections.deque()\n # Add all nodes with no dependencies to the queue\n for node in gm.graph.nodes:\n if indeg[node] == 0:\n queue.append(node)\n env: Dict[pippy.fx.Node, pippy.fx.Node] = {}\n # Pop nodes from the queue, and add nodes that have had all their\n # dependencies fulfilled\n while len(queue) > 0:\n cur = queue.popleft()\n env[cur] = new_graph.node_copy(cur, lambda x: env[x])\n for user in cur.users:\n indeg[user] -= 1\n if indeg[user] == 0:\n queue.append(user)\n # If the new graph's size is not as large as the old one, then there must be\n # a cycle (i.e. some node's dependencies were not satisfied.)\n if len(new_graph.nodes) < len(gm.graph.nodes):\n raise RuntimeError(f\"Input graph has cycles, unable to add {[node for node in indeg if indeg[node] != 0]}\")\n gm.graph = new_graph\n return gm", "def incompatibility_solve_cg(self, useAMS=True):\n \n zero = Expression((\"0.0\", \"0.0\", \"0.0\"), degree=1)\n bc = DirichletBC(self.PN, zero, DirichletBoundary())\n \n T1 = Function(self.PN) # Solution for the curl curl problem\n T2 = Function(self.PN) # Solution for the curl curl problem\n T3 = Function(self.PN) # Solution for the curl curl problem\n\n if useAMS:\n \n # Set operator for the linear solver\n L_X = inner(self.strain_diff_1, curl(self.inc_v0))*dx\n A_X, b_X = assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T1.vector()).vec())\n\n # Show linear solver details\n self.ksp_X.view()\n\n # Solve 2nd system\n L_X = inner(self.strain_diff_2, curl(self.inc_v0))*dx\n A_X, b_X = assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T2.vector()).vec())\n\n # Solve 3nd system\n L_X = inner(self.strain_diff_3, curl(self.inc_v0))*dx\n A_X, b_X= assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T3.vector()).vec())\n \n else:\n\n ### vanilla CG works with potential as RHS\n\n L_X = inner(self.strain_diff_1, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T1, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'}) \n\n L_X = inner(self.strain_diff_2, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T2, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'}) \n\n L_X = inner(self.strain_diff_3, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T3, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'})\n\n return project( self.X_0(curl(T1),curl(T2),curl(T3)), \n self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")", "def exercise():\n pi_good = get_pdb_inputs(pdb_str=pdb_str_answer, restraints=False)\n map_data = get_map(xrs=pi_good.xrs)\n xrs_good = pi_good.xrs.deep_copy_scatterers()\n pi_good.ph.write_pdb_file(file_name=\"answer.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())\n #\n pi_poor = get_pdb_inputs(pdb_str=pdb_str_poor, restraints=True)\n pi_poor.ph.write_pdb_file(file_name=\"poor.pdb\")\n xrs_poor = pi_poor.xrs.deep_copy_scatterers()\n #\n d = xrs_good.distances(other=xrs_poor)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)>2\n assert flex.mean(d)>0.7\n #\n xrs_refined = xrs_poor\n for i in range(3):\n ero = individual_sites.easy(\n map_data = map_data,\n xray_structure = xrs_refined,\n pdb_hierarchy = pi_poor.ph,\n geometry_restraints_manager = pi_poor.grm)\n xrs_refined = ero.xray_structure\n # comapre\n d = xrs_good.distances(other=xrs_refined)\n print(d.min_max_mean().as_tuple())\n assert flex.max(d)<0.15\n assert flex.mean(d)<0.03\n ero.pdb_hierarchy.write_pdb_file(file_name=\"refined.pdb\",\n crystal_symmetry=xrs_good.crystal_symmetry())", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (priority queue y set)\n openNodes = util.PriorityQueue()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Calculamos funcion heuristica y el coste acumulado para sacar la funcion de evaluacion del nodo inicial\n fn = problem.getCostOfActions(node.path) + nullHeuristic(node.name, problem);\n\n #Lo metemos en la cola con su funcion de evaluacion como prioridad\n openNodes.push(node, fn)\n\n #Iteramos para cada nodo\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #sacamos el nodo de arriba de la cola\n node = openNodes.pop()\n if problem.isGoalState(node.name): #Comprobamos si el nodo es Goal. Si lo es terminamos.\n break\n else: #Expandimos los nodos sucesores del nodo si no estan en closed\n if nodeIsClosed(node, closedNodes) is False:\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n fn = problem.getCostOfActions(findPath(succNode)) + nullHeuristic(succNode.name, problem);\n openNodes.push(succNode, fn)\n #Metemos el nodo en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)", "def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False", "def test_validate_ksk_proof_of_ownership_4(self):\n self._test_file(\"ksr-root-2016-q3-0.xml\")", "def flow_condition(p_prime, p3, triple):\n\n all_edges = set(self.arc_info.keys())\n not_p_prime = all_edges.difference(set(p_prime))\n #print(\"Not p_prime: {}\".format(not_p_prime))\n not_p3 = all_edges.difference(set(p3))\n #print(\"Not p_3: {}\".format(not_p3))\n p_prime_alone = list(set(p_prime).intersection(not_p3))\n #print(\"p_prime_alone: {}\".format(p_prime_alone))\n p3_alone = list(set(p3).intersection(not_p_prime))\n #print(\"p3 alone: {}\".format(p3_alone))\n overlap = list(set(p3).intersection(p_prime))\n #print(\"overlap alone: {}\".format(overlap))\n\n #print(\"computing L_wprime and U_wprime\")\n L_wprime, U_wprime = compute_bounds(p_prime_alone, triple)\n #print(\"computing L_w3 and U_w3\")\n L_w3, U_w3 = compute_bounds(p3_alone, triple)\n #print(\"computing L_overlap and U_overlap\")\n L_overlap, U_overlap = compute_bounds(overlap, triple)\n #print(\"L_wprime, U_wprime: {} {}\".format(L_wprime, U_wprime))\n #print(\"L_w3, U_w3: {} {}\".format(L_w3, U_w3))\n #print(\"{} <= {}\".format(L_overlap, U_wprime + U_w3))\n #print(\"{} >= {}\".format(U_overlap, L_wprime + L_w3))\n meets_conditions = (L_wprime <= U_wprime) & \\\n (L_w3 <= U_w3) & \\\n (L_overlap <= U_wprime + U_w3) & \\\n (L_wprime + L_w3 <= U_overlap)\n if meets_conditions:\n w_prime, w3 = center_flows(L_wprime, U_wprime,\n L_w3, U_w3,\n L_overlap, U_overlap)\n # change paths\n # first, delete:\n for index in sorted(triple, reverse=True):\n del self.paths[index]\n del self.weights[index]\n # now, add:\n self.paths.append(p3)\n self.paths.append(p_prime)\n self.weights.append(w3)\n self.weights.append(w_prime)\n # update weights on edges\n self.update_edge_weights()\n self.check_flow()\n self.check_paths()\n return(True)\n else:\n return(False)", "def check_proof(self, *, no_gaps=False, compute_only=False):\n self.rpt = report.ProofReport()\n return self.thy.check_proof(self.prf, rpt=self.rpt, no_gaps=no_gaps, compute_only=compute_only)", "def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n path_to_point = {}\n cost_to_point = {}\n\n # Get the start node\n start_node = problem.getStartState()\n fringe_node = [start_node]\n path_to_point[start_node] = []\n cost_to_point[start_node] = problem.getCostOfActions(path_to_point[start_node])\n\n goal_found = False\n\n while(not goal_found):\n #for i in range(100): \n nodes_to_expand = set()\n # get max value node in the fringe node\n min_val = float(\"inf\")\n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] < min_val:\n min_val = cost_to_point[one_node]\n \n for one_node in fringe_node:\n # Compute the cost to reach a node\n if cost_to_point[one_node] == min_val:\n nodes_to_expand.add(one_node)\n fringe_node.remove(one_node)\n\n # Expand the fringe node \n for one_node in nodes_to_expand:\n path_to_parent = path_to_point[one_node]\n for nxt_node in problem.getSuccessors(one_node):\n pos = nxt_node[0]\n mv = nxt_node[1]\n # check if point already present in path to point\n prev_cost = float(\"inf\")\n if pos in cost_to_point:\n prev_cost = cost_to_point[pos]\n new_path = path_to_parent + [mv]\n if prev_cost > problem.getCostOfActions(new_path):\n path_to_point[pos] = new_path\n cost_to_point[pos] = problem.getCostOfActions(new_path)\n fringe_node.append(pos)\n\n # Check if destination is reached in the fringe node\n for one_node in fringe_node:\n if problem.isGoalState(one_node):\n final_node = one_node\n goal_found = True\n break\n \n #print(len(fringe_node))\n print(final_node)\n print(path_to_point[final_node])\n return path_to_point[final_node] \n\n util.raiseNotDefined()", "def test_update_hyperflex_cluster_network_policy(self):\n pass", "def _constraints_external(self):\n pass", "def test_sdp(self):\n a = sp.rand(100, 100, .1, random_state=1)\n a = a.todense()\n X = Variable(100, 100)\n obj = at.norm(X, \"nuc\") + at.norm(X-a, 'fro')\n p = Problem(Minimize(obj))\n p.solve(solver=\"SCS\")", "def consistent(self,assignment):\n return all(con.holds(assignment)\n for con in self.constraints\n if all(v in assignment for v in con.scope))", "def validate_ipsec_site_connection(self, context, ipsec_sitecon,\n ip_version):\n super(CiscoCsrVpnValidator, self)._check_dpd(ipsec_sitecon)\n\n ike_policy = self.service_plugin.get_ikepolicy(\n context, ipsec_sitecon['ikepolicy_id'])\n ipsec_policy = self.service_plugin.get_ipsecpolicy(\n context, ipsec_sitecon['ipsecpolicy_id'])\n vpn_service = self.service_plugin.get_vpnservice(\n context, ipsec_sitecon['vpnservice_id'])\n router = self.l3_plugin._get_router(context, vpn_service['router_id'])\n self.validate_lifetime('IKE Policy', ike_policy)\n self.validate_lifetime('IPSec Policy', ipsec_policy)\n self.validate_ike_version(ike_policy)\n self.validate_mtu(ipsec_sitecon)\n self.validate_public_ip_present(router)\n self.validate_peer_id(ipsec_sitecon)\n LOG.debug(\"IPSec connection validated for Cisco CSR\")", "def mcts(env, x_e, x_p, goal, k_budget, default_policy, T_max=100):\n a = np.array(action_space)\n new_x_p = x_p\n new_x_e = x_e\n tree = MyTree([Node(my_id=0, parent_id=-2, state=x_e, p_state=x_p,\n sum_inv_distance=1. / np.linalg.norm(np.array(x_e) - np.array(x_p)))]) # our tree\n for _ in range(k_budget):\n sum_inv_distance = 0. # aggregated sum of distances, before termination happened\n # First, we select the new candidate:\n best_node = tree.return_best() # here we have the best state\n x_e_best = best_node.state\n\n # Second, we perform action, according to our default policy\n u_e = a[default_policy[x_e_best]]\n new_x_e, _ = transition_function(env=env, x=x_e_best, u=u_e)\n # And perform corresponding step for pursuer\n new_x_p = pursuer_transition(env=env, x_e=new_x_e, x_p=new_x_p)\n\n # And we add the node to the tree\n tree.add_node(parent_id=best_node.id, state=new_x_e, action_applied=u_e)\n\n # Third, we launch the simulation\n last_t = T_max\n for t in range(T_max):\n # Step, according to the default policy\n u_e = a[default_policy[new_x_e]]\n new_x_e, _ = transition_function(env=env, x=new_x_e, u=u_e)\n # Step, according to pursuer policy\n new_x_p = pursuer_transition(env=env, x_e=new_x_e, x_p=new_x_p)\n # Accumulate the inversed distance. We need it for reward computation\n if new_x_e != new_x_p:\n sum_inv_distance += 1. / np.linalg.norm(np.array(new_x_e) - np.array(new_x_p))\n # If we reached the goal or was eaten, then stop\n if (new_x_e == new_x_p) or (new_x_e == goal):\n last_t = t\n break\n\n rew = reward(x_e=new_x_e, x_p=new_x_p, sum_inv_distance=sum_inv_distance, goal=goal, t=last_t, T_max=T_max)\n\n # Forth, we update all parent's nodes\n tree.update_tree(node_id=-1, outcome=rew)\n\n u = max([(i, tree.nodes[i].value) for i in tree.nodes[0].children_ids])\n u = tree.nodes[u[0]].action_applied\n return u", "def _check_case_sol_200(sol: int,\n subcase: Subcase,\n fem2: BDF,\n p0: Any,\n isubcase: int, subcases: int,\n log: Any):\n assert 'ANALYSIS' in subcase, 'sol=%s\\n%s' % (sol, subcase)\n\n analysis = subcase.get_parameter('ANALYSIS')[0]\n # BUCKLING\n if 'DESOBJ' in subcase:\n value = subcase.get_parameter('DESOBJ')[0]\n assert value in fem2.dresps, f'value={value} not in dresps'\n else:\n fem2.log.warning('no DESOBJ (DRESPi) in this subcase; is this a buckling preload case?')\n fem2.log.warning('\\n%s' % subcase)\n\n nopt = len(fem2.dvprels) + len(fem2.dvmrels) + len(fem2.dvcrels)\n if nopt == 0:\n fem2.log.error('no DVPRELs/DVMRELs/DVCRELs found')\n\n #--------------------------------------------------------------------------\n # DCONSTR\n if 'DESSUB' not in subcase and 'DESGLB' not in subcase:\n fem2.log.warning('no DESSUB/DESGLB (DCONSTR) in this subcase;'\n ' is this a buckling preload case?')\n log.warning('\\n%s' % subcase)\n\n if 'DESSUB' in subcase:\n value = subcase.get_parameter('DESSUB')[0]\n if value not in fem2.dconstrs:\n msg = 'value=%s not in dconstrs; Allowed DCONSTRs=%s' % (\n value, np.unique(list(fem2.dconstrs.keys())))\n raise RuntimeError(msg)\n if 'DESGLB' in subcase:\n value = subcase.get_parameter('DESGLB')[0]\n if value not in fem2.dconstrs:\n msg = 'value=%s not in dconstrs; Allowed DCONSTRs=%s' % (\n value, np.unique(list(fem2.dconstrs.keys())))\n raise RuntimeError(msg)\n #--------------------------------------------------------------------------\n\n if analysis in ['STATIC', 'STATICS']:\n solution = 101\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n elif analysis in ['MODE', 'MODES']:\n solution = 103\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n elif analysis in ['BUCK', 'BUCKLING']:\n solution = 105\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n elif analysis == 'DFREQ':\n solution = 108\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n elif analysis == 'MFREQ':\n if 'GUST' in subcase:\n solution = 146\n else:\n solution = 111\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n elif analysis in ['MTRAN', 'MTRANS']:\n solution = 112\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n elif analysis in ['SAERO', 'DIVERG', 'DIVERGE']:\n solution = 144\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n elif analysis in ['FLUT', 'FLUTTER']:\n solution = 145\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n elif analysis == 'DCEIG': # direct complex eigenvalues\n solution = 107\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n #elif analysis == 'MCEIG': # modal direct complex eigenvalues\n elif analysis == 'HEAT': # heat transfer analysis\n solution = 159\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n elif analysis == 'MCEIG': # modal complex eigenvalues\n solution = 110\n check_case(solution, subcase, fem2, p0, isubcase, subcases)\n else:\n msg = 'analysis = %s\\nsubcase =\\n%s' % (analysis, subcase)\n raise NotImplementedError(msg)", "def enforce_node_consistency(self):\n for node in self.domains:\n #creates a list of words per node to remove since we cannot remove the elements in a set while it is iterating\n words_to_remove= []\n\n for word in self.domains[node]:\n if len(word) != node.length:\n words_to_remove.append(word)\n\n for word in words_to_remove:\n self.domains[node].remove(word)", "def revise(csp, Xi, Xj, removals, checks=0):\r\n revised = False\r\n for x in csp.curr_domains[Xi][:]:\r\n conflict = True\r\n for y in csp.curr_domains[Xj]:\r\n if csp.constraints(Xi, x, Xj, y):\r\n conflict = False\r\n checks += 1\r\n if not conflict:\r\n break\r\n if conflict:\r\n csp.prune(Xi, x, removals)\r\n revised = True\r\n # we check if domains list for our current variable is empty\r\n # and we increase weight for current variable\r\n if not csp.curr_domains[Xi]:\r\n csp.weight[(Xi,Xj)] += 1\r\n\r\n return revised, checks", "def check(self):\n\n constrains = pm.ls(type='constraint')\n uselessConstrains = []\n\n for const in constrains:\n connections = const.listConnections(scn=True, s=False, d=True)\n if const in connections:\n connections.remove(const)\n\n if len(connections) == 0:\n uselessConstrains.append(const)\n\n if not uselessConstrains:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = uselessConstrains\n for obj in uselessConstrains:\n self.addError(\"%s doesn't have outgoing connections.\" % obj)\n self.errorMessage = \"%s useless constrains\" % (\n len(uselessConstrains))", "def require_direct_connection (self, sg1, sg2):\n self.solver.append(self.d_reach(self.nodes[sg1], self.nodes[sg2]))", "def solve(self):", "def prove_and_commutativity() -> Proof:\n all_lines = []\n all_lines.append(Proof.Line(Formula.parse('(p&q)')))\n all_lines.append(Proof.Line(Formula.parse('q'), AE1_RULE, [0]))\n all_lines.append(Proof.Line(Formula.parse('p'), AE2_RULE, [0]))\n all_lines.append(Proof.Line(Formula.parse('(q&p)'), A_RULE, [1, 2]))\n statement = InferenceRule([Formula.parse('(p&q)')], Formula.parse('(q&p)'))\n all_rules = {A_RULE, AE1_RULE, AE2_RULE}\n return Proof(statement, all_rules, all_lines)", "def solve(self):\n initial_fes = eades(self.graph, self.force_forward_edges)\n initial_fes_vec = self.edge_vector(initial_fes)\n\n # bounds for the objective\n lower_bound = 0\n upper_bound = np.sum(initial_fes_vec @ self.weights)\n\n self.logger.info('Calculating FES for graph with %d edges, max %d feedback edges', self.m, len(initial_fes))\n\n simple_cycles = set(induced_cycles(self.graph, initial_fes))\n\n for iteration in itertools.count(1):\n self.logger.info('Baharev iteration %d, %g <= objective <= %g, %d simple cycles', iteration, lower_bound,\n upper_bound, len(simple_cycles))\n\n # Formulate and solve the problem for this iteration:\n y = cp.Variable(self.m, boolean=True, name=\"y\")\n objective = cp.Minimize(cp.sum(y @ self.weights))\n\n cycle_vectors = [self.edge_vector(nx.utils.pairwise(cycle)) for cycle in simple_cycles]\n constraints = [cp.sum(a @ y) >= 1 for a in cycle_vectors]\n constraints.append(cp.sum(y @ self.force_forward_vec) == 0) # no force forward vec may be in the result set\n problem = cp.Problem(objective, constraints)\n resolution = problem.solve(**self.solver_args)\n if problem.status != 'optimal':\n self.logger.warning('Optimization solution is %s. Try solver != %s?', problem.status,\n problem.solver_stats.solver_name)\n self.logger.debug(\n \"Solved optimization problem with %d constraints: %s -> %s (%g + %g seconds, %d iterations, solver %s)\",\n len(constraints), resolution, problem.solution.status,\n problem.solver_stats.solve_time or 0, problem.solver_stats.setup_time or 0,\n problem.solver_stats.num_iters or 0, problem.solver_stats.solver_name)\n current_solution = np.abs(y.value) >= 0.5 # y.value = vector of floats each ≈ 0 or 1\n current_fes = self.edges_for_vector(current_solution)\n self.logger.debug('Iteration %d, resolution: %s, %d feedback edges', iteration, resolution,\n len(current_fes))\n # S, the feedback edge set calculated using the constraint subset, can be an incomplete solution\n # (i.e. cycles remain after removing S from the graph). So lets compare this with the upper bound\n # from the heuristic\n lower_bound = max(lower_bound, objective.value)\n if lower_bound == upper_bound:\n self.logger.info('upper == lower bound == %g, optimal solution found', lower_bound)\n break # y.value is the optimal solution\n\n if resolution > upper_bound:\n self.logger.error('Solution %g > upper bound %g!', resolution, upper_bound)\n break\n\n Gi = self.graph.copy()\n Gi.remove_edges_from(current_fes)\n if nx.is_directed_acyclic_graph(Gi):\n self.logger.info('Graph is acyclic, optimal solution found')\n break # y.value is the optimal solution\n\n # The solution is not yet ideal. So we take G^(i), the graph still containing some feedback edges,\n # calculate a heuristic on it and use the heuristic (= over-estimation) to adjust upper bound and\n # determine additional simple cycles (= constraints)\n Fi = eades(Gi, self.force_forward_edges)\n yi = self.edge_vector(Fi) | current_solution\n zi = np.sum(yi @ self.weights)\n if zi < upper_bound:\n upper_bound = zi\n current_solution = yi\n simple_cycles |= set(induced_cycles(Gi, Fi))\n\n self.solution_vector = current_solution\n self.solution = self.edges_for_vector(current_solution)\n self.objective = objective.value\n self.iterations = iteration\n self.simple_cycles = simple_cycles\n return self.solution", "def relax_checker():\n checker = 2\n global v, vNew, n\n for check in range(0,2):\n for x in range(1,n):\n for y in range(1,n):\n if (x*(n+1) + y) % 2 == check:\n v[x,y] = (v[x-1][y] + v[x+1][y] + v[x][y-1] + v[x][y+1])*0.25", "def backtracking(csp, ac_3=False):\n assigned = []\n unassigned = csp.variables[:]\n for v in unassigned:\n if v.value is not None:\n unassigned.remove(v)\n \n result = recursive_backtracking(csp, assigned,unassigned)\n if result is False:\n print \"fuck\"\n return csp" ]
[ "0.64042765", "0.6084755", "0.60748124", "0.57520914", "0.5737966", "0.56193435", "0.55433697", "0.53763986", "0.5332282", "0.526332", "0.5261379", "0.5261379", "0.52395386", "0.52281404", "0.52064234", "0.5187962", "0.5164979", "0.5130408", "0.5113685", "0.5096188", "0.5075866", "0.5060113", "0.50414926", "0.5037765", "0.5029581", "0.5020807", "0.5008852", "0.49621907", "0.49464136", "0.49422997", "0.49249187", "0.49247643", "0.49240497", "0.4871244", "0.4859838", "0.48490524", "0.4847167", "0.48468465", "0.48314077", "0.48290676", "0.48290676", "0.48287824", "0.48166573", "0.48157418", "0.48135906", "0.47956342", "0.47901836", "0.477923", "0.47757286", "0.4761914", "0.4761914", "0.47562316", "0.4749933", "0.47405928", "0.47385204", "0.47304985", "0.47263408", "0.47218946", "0.4714259", "0.47130236", "0.47130236", "0.47130236", "0.4711462", "0.47105595", "0.47092134", "0.47021374", "0.46961555", "0.46915007", "0.46910962", "0.4688644", "0.46815306", "0.4673786", "0.46731955", "0.46692672", "0.46684507", "0.46682936", "0.46666804", "0.4665976", "0.46654096", "0.46509168", "0.465053", "0.46436673", "0.46430445", "0.46393868", "0.46349546", "0.46342978", "0.46322808", "0.46298113", "0.46251625", "0.46215507", "0.46205077", "0.46160796", "0.4612906", "0.4609148", "0.4598259", "0.4597245", "0.4596895", "0.4594262" ]
0.54098696
8
Update `self.domains` such that each variable is nodeconsistent. (Remove any values that are inconsistent with a variable's unary constraints; in this case, the length of the word.)
def enforce_node_consistency(self): # print("Entered enforce_node_consistency Function") # print("self.domains") # print(self.domains) for mystery in self.domains: # print("!!!!!!!!!!!!") # print(mystery) # print(self.domains[mystery]) keep_list = set() while self.domains[mystery]: word = self.domains[mystery].pop() if(len(word) == mystery.length): keep_list.add(word) for word in keep_list: self.domains[mystery].add(word) # print(self.domains[mystery]) # raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enforce_node_consistency(self):\n # Loop over each variable (space for word) in the crossword\n # Use copy to prevent domains from being modified while looping\n for var in self.domains.copy():\n # Get all unary constraints for this variable\n for value in self.domains[var].copy():\n # Check if the value is consistent with all unary constraints\n if len(value) != var.length:\n # If not, remove the value from the domain\n self.domains[var].remove(value)\n # No return value is necessary", "def prune(self,domains,constraint):\n left_var = constraint.left[0]\n left_const_mult = constraint.left[1]\n left_val = constraint.left[2]\n\n right_var = constraint.right[0]\n right_const_mult = constraint.right[1]\n right_val = constraint.right[2]\n\n new_domains = deepcopy(domains)\n\n\n # Simple Variable-Value Labeling\n if (left_val == [0] and left_const_mult == [1]) and (right_const_mult == [0]):\n new_domains[left_var[0]] = [right_val[0]]\n \n # Simple Variable-Variable Labeling\n elif (left_val == [0] and left_const_mult == [1]) and (right_val == [0] and right_const_mult == [1]):\n new_set = set(new_domains[left_var[0]]) & set(new_domains[right_var[0]])\n new_domains[left_var[0]] = list(new_set)\n new_domains[right_var[0]] = list(new_set)\n\n else:\n l = 0\n for var,mult in zip(left_var,left_const_mult):\n l += mult*max(domains[var])\n for const in left_val:\n l += const\n\n r = 0\n for var,mult in zip(right_var,right_const_mult):\n r += mult*min(domains[var])\n for const in right_val:\n r += const\n\n # print(l,r)\n # print(new_domains)\n # print(constraint)\n\n for var,mult in zip(left_var,left_const_mult):\n max_var = max(domains[var])\n comp = (r-(l-mult*max_var)) / mult\n for elem in domains[var]:\n if elem < comp:\n new_domains[var].remove(elem)\n\n for var,mult in zip(right_var,right_const_mult):\n min_var = min(domains[var])\n comp = (l-(r-mult*min_var)) / mult\n for elem in domains[var]:\n if elem > comp:\n new_domains[var].remove(elem)\n\n # for i,domain in enumerate(new_domains):\n # if len(domain) == 0:\n # print(i,l,r)\n # print(\"Old:\",domains)\n # print(\"New:\",new_domains)\n # print(domains)\n # print(constraint)\n # print(\"------------------------\")\n # raise SystemError(\"Domain is Empty!!\")\n\n return new_domains", "def enforce_node_consistency(self):\n for node in self.domains:\n #creates a list of words per node to remove since we cannot remove the elements in a set while it is iterating\n words_to_remove= []\n\n for word in self.domains[node]:\n if len(word) != node.length:\n words_to_remove.append(word)\n\n for word in words_to_remove:\n self.domains[node].remove(word)", "def all_different_assignment_propagator(var: str, val: int, domains: Domains, problem_vars: FrozenSet[str]) -> Domains:\r\n reduced_domains = {v: frozenset({val}) if v == var else\r\n domains[v] - {val} if v in problem_vars else\r\n domains[v] for v in domains}\r\n return reduced_domains", "def finalize_variable(self):\n # variables for which there has been a constraint\n constrained_values = []\n for constraint_type in self.constraint:\n for constraint in self.constraint[constraint_type]:\n if constraint_type in ['threshold', 'count', 'only_one']:\n constraint_value = constraint[-1]\n constrained_values.append(constraint_value)\n elif constraint_type == 'time':\n constraint_values = constraint[-2:]\n constrained_values += constraint_values\n # compare constrained values to all populated values\n unconstrained_values = [value for value in self.value if value not in constrained_values]\n\n # TODO: make sure constraint interpreter knows 1,0,0 is a special case of just making sure a matching value is seen\n for value in unconstrained_values:\n if 'count' in self.constraint.keys():\n self.constraint['count'].append([[1, 0, 0], value])\n else:\n self.constraint['count'] = [[1, 0, 0], value]\n # default is a single variable count if not otherswise stated\n for value in unconstrained_values:\n self.constraint\n\n ##TODO: if variable is seen in multiple constraints, link those constraints to create a special super constraint of some sort", "def set_all_domains(self, domains_dict) :\n if not set(domains_dict.keys()) <= set(self.variables):\n invalid_vars = filter(lambda v: v not in self.variables, domains_dict.keys())\n raise KeyError(str(invalid_vars) + \" are not variables in this problem.\")\n self.domains = deepcopy(domains_dict)\n return self", "def revise(self, verbose=0):\n if verbose:\n print '** Consistency **'\n\n _queue = [ (constr.estimateCost(self._domains),\n constr) for constr in self._constraints ]\n _queue.sort()\n _affected_constraints = {}\n while True:\n if not _queue:\n # refill the queue if some constraints have been affected\n _queue = [(constr.estimateCost(self._domains),\n constr) for constr in _affected_constraints]\n if not _queue:\n break\n _queue.sort()\n _affected_constraints.clear()\n if verbose > 2:\n print 'Queue', _queue\n cost, constraint = _queue.pop(0)\n if verbose > 1:\n print 'Trying to entail constraint',\n print constraint, '[cost:%d]' % cost\n entailed = constraint.narrow(self._domains)\n for var in constraint.affectedVariables():\n # affected constraints are listeners of\n # affected variables of this constraint\n dom = self._domains[var]\n if not dom.has_changed():\n continue\n if verbose > 1 :\n print ' -> New domain for variable', var, 'is', dom\n for constr in self._variableListeners[var]:\n if constr is not constraint:\n _affected_constraints[constr] = True\n dom.clear_change()\n if entailed:\n if verbose:\n print \"--> Entailed constraint\", constraint\n self._removeConstraint(constraint)\n if constraint in _affected_constraints:\n del _affected_constraints[constraint]\n \n for domain in self._domains.itervalues():\n if domain.size() != 1:\n return 0\n return 1", "def solve_constraint_propagate_reduced_domains(problem) :\n q = [problem]\n extCount = 0\n while len(q)!=0:\n removed = q[0]\n q = q[1:]\n extCount+=1\n if has_empty_domains(removed) or check_all_constraints(removed)==False:\n continue\n if len(removed.unassigned_vars)==0:\n return (removed.assigned_values,extCount)\n \n var = removed.pop_next_unassigned_var()\n extensions = []\n for val in removed.get_domain(var):\n csp_new = removed.copy()\n csp_new.set_assigned_value(var,val)\n domain_reduction(csp_new,[var])\n extensions.append(csp_new)\n \n q = extensions + q\n return (None,extCount)", "def order_domain_values(self, var, assignment):\n #list to store pair data of words and their constraint score\n constraint_list= []\n #function to create list of all neighbors to node var\n neighbors= self.crossword.neighbors(var)\n\n for neighbor in neighbors:\n overlap= self.crossword.overlaps[var, neighbor]\n \n for word_var in self.domains[var]:\n constraint_score= 0\n\n for word_neighbor in self.domains[neighbor]:\n #adds constraint score for each word in the domain of neighbor nodes that are not consistent if word_var is chosen\n if word_var[overlap[0]] != word_neighbor[overlap[1]]:\n constraint_score += 1\n #add the pair data to list of all words\n constraint_list.append([word_var, constraint_score])\n #sorts the list in terms of constraint score\n constraint_list.sort(key= lambda x:x[1])\n #creates a list of all words in the same order as constraint_list\n return_list= map(lambda x:x.pop(0), constraint_list)\n return_list= list(return_list)\n return return_list", "def repair(self):\n # self.add_cons_vars([x.constraint for x in self._cons_dict.values()])\n # self.add_cons_vars([x.variable for x in self._var_dict.values()])\n self._push_queue()\n Model.repair(self)\n self.regenerate_constraints()\n self.regenerate_variables()", "def fixDomains(self, energyMin, energyMax, domainToFix):\n\n return 0", "def fixDomains(self, energyMin, energyMax, domainToFix):\n\n return 0", "def solve_constraint_propagate_singleton_domains(problem) :\n q = [problem]\n extCount = 0\n while len(q)!=0:\n removed = q[0]\n q = q[1:]\n extCount+=1\n if has_empty_domains(removed) or check_all_constraints(removed)==False:\n continue\n if len(removed.unassigned_vars)==0:\n return (removed.assigned_values,extCount)\n \n var = removed.pop_next_unassigned_var()\n extensions = []\n for val in removed.get_domain(var):\n csp_new = removed.copy()\n csp_new.set_assigned_value(var,val)\n domain_reduction_singleton_domains(csp_new,[var])\n extensions.append(csp_new)\n \n q = extensions + q\n return (None,extCount)", "def reset(self):\n for var in self.var_list:\n var.value = None\n var.domain = copy.deepcopy(var.init_domain)", "def set_domain(self, var, domain) :\n if var not in self.variables :\n raise KeyError(str(var) + \" is not a variable in this problem.\")\n self.domains[var] = sorted(domain[:])\n return self", "def suppose(self, var, value):\r\n self.support_pruning()\r\n removals = [(var, a) for a in self.curr_domains[var] if a != value]\r\n self.curr_domains[var] = [value]\r\n return removals", "def removedummyvars(self, dummy_var_no):\n self.nodummyvariablelist = [] # Necessary for a list copy\n self.nodummyvariablelist.extend(self.variablelist)\n self.nodummygain = self.originalgain.copy()\n self.nodummyconnection = self.originalconnection.copy()\n for index in range(dummy_var_no):\n self.nodummyvariablelist.pop(0)\n self.nodummygain = np.delete(self.nodummygain, 0, 0)\n self.nodummygain = np.delete(self.nodummygain, 0, 1)\n self.nodummyconnection = np.delete(self.nodummyconnection, 0, 0)\n self.nodummyconnection = np.delete(self.nodummyconnection, 0, 1)\n\n [r, c] = self.nodummyconnection.shape\n self.nodummy_nodes = r", "def support_pruning(self):\r\n if self.curr_domains is None:\r\n self.curr_domains = {v: list(self.domains[v]) for v in self.variables}", "def _infer_domain(self, name, domain, elements):\n if '*' not in domain:\n return domain\n debug('guessing a better domain for {}: {}'.format(name, domain))\n\n # Domain as a list of references to Variables in the File/xr.Dataset\n domain_ = [self[d] for d in domain]\n\n for i, d in enumerate(domain_): # Iterate over dimensions\n e = set(elements[i])\n if d.name != '*' or len(e) == 0: # pragma: no cover\n assert set(d.values).issuperset(e)\n continue # The stated domain matches the data; or no data\n # '*' is given\n if (self._state[name]['attrs']['type_code'] == gdxcc.GMS_DT_PAR and\n self._implicit):\n d = '_{}_{}'.format(name, i)\n debug(('Constructing implicit set {} for dimension {} of {}\\n'\n ' {} instead of {} elements')\n .format(d, name, i, len(e), len(self['*'])))\n self.coords[d] = elements[i]\n d = self[d]\n else:\n # try to find a smaller domain for this dimension\n # Iterate over every Set/Coordinate\n for s in self.coords.values():\n if s.ndim == 1 and set(s.values).issuperset(e) and \\\n len(s) < len(d):\n d = s # Found a smaller Set; use this instead\n domain_[i] = d\n\n # Convert the references to names\n inferred = [d.name for d in domain_]\n\n if domain != inferred:\n # Store the result\n self._state[name]['attrs']['domain_inferred'] = inferred\n debug('…inferred {}.'.format(inferred))\n else:\n debug('…failed.')\n\n return inferred", "def del_variables(self, variables):\n variables = [variables] if isinstance(variables, str) else set(variables)\n indices = [\n index\n for index, variable in enumerate(self.variables)\n if variable in variables\n ]\n self.variables = np.delete(self.variables, indices, 0)\n self.cardinality = np.delete(self.cardinality, indices, 0)\n self.inhibitor_probability = [\n prob_array\n for index, prob_array in enumerate(self.inhibitor_probability)\n if index not in indices\n ]", "def no_of_dofs_unconstrained(self, new_no_of_dofs_unconstrained):\n self._no_of_dofs_unconstrained = new_no_of_dofs_unconstrained\n self._update_flag = True", "def prepare_domain_restrictions(self):\n for index, restriction in enumerate(self._domain_restrictions):\n self.add_specific_domain_restriction(index+1, restriction)", "def fixDomains(self, domainMin, domainMax, fixToDomain):\n\n return 0", "def order_domain_values(self, var, assignment):\n # print(\"Entered order_domain_values Function\")\n ordered_variables = []\n # print(\"Var\")\n # print(var)\n # print(\"self.domains[var]\")\n # print(self.domains[var])\n # print(\"self.crossword.neighbor(var)\")\n # print(self.crossword.neighbors(var))\n \n neighbors_to_check = self.crossword.neighbors(var).difference(assignment.keys())\n for word in self.domains[var]:\n\n n = 0\n for neighbor in neighbors_to_check:\n overlap = self.crossword.overlaps[(var, neighbor)]\n for neighbor_word in self.domains[neighbor]:\n if ( word[overlap[0]] is not neighbor_word[overlap[1]] or word is neighbor_word):\n n += 1\n\n\n\n ordered_variables.append( (word, n) )\n ordered_variables.sort(key=self.orderFunc)\n # print(\"ordered_variables\")\n # print(ordered_variables)\n # input()\n return ordered_variables\n # raise NotImplementedError", "def domain_reduction(csp, queue=None) :\n if (queue==None):\n queue = csp.get_all_variables()\n dequeued = []\n while len(queue)!=0:\n removedVar = queue[0]\n dequeued.append(removedVar)\n queue = queue[1:]\n for constraint in csp.constraints_between(removedVar,None)[:]:\n var2 = constraint.var2\n val2 = csp.get_assigned_value(var2)\n var2Domain = csp.get_domain(var2)[:]\n removedDomain = csp.get_domain(removedVar)[:]\n if len(removedDomain)==0 or len(var2Domain)==0:\n return None\n for domainVal2 in var2Domain:\n anyNonViolators = False\n for domainVal in removedDomain:\n check = constraint.check(domainVal,domainVal2)\n if check==True:\n anyNonViolators = True\n continue\n if anyNonViolators==False:\n csp.eliminate(var2, domainVal2)\n if len(csp.get_domain(var2))==0:\n return None\n if var2 not in queue:\n queue.append(var2)\n return dequeued", "def update(self):\n ## Initialize\n self.domain.update()\n self.var = self.domain.var.copy()\n self.out = []\n\n ## Construct var and out, respecting DAG properties\n for fun in self.functions:\n self.var = list(set(self.var).union(set(fun.var).difference(set(self.out))))\n\n self.out = list(set(self.out).union(set(fun.out)))\n\n try:\n self.var_rand = list(self.density.marginals.keys())\n except AttributeError:\n self.var_rand = []\n self.var_det = list(set(self.var).difference(self.var_rand))\n\n ## TODO parameters\n\n ## Convenience constants\n self.n_var = len(self.var)\n self.n_var_rand = len(self.var_rand)\n self.n_var_det = len(self.var_det)\n self.n_out = len(self.out)", "def add_variable(self, name, domain):\n self.variables.append(name)\n self.domains[name] = list(domain)\n self.constraints[name] = {}", "def order_domain_values(self, var, assignment):\n # retrieve the domain for the variable\n domain = self.domains[var]\n # initialise a dictionary for sorting the values in the variable's domain\n sorting_dict = {} \n # for each of the values in the variable's domain \n for value in domain:\n # set the constraint counter to zero\n sorting_dict[value] = 0\n # for each of the neighbors of the variable\n for neighbor in self.crossword.neighbors(var):\n # retrieve the overlap indexes\n overlap = self.crossword.overlaps[(neighbor, var)]\n # for each of the overlap's possible values (the overlap's domain)\n for test in self.domains[neighbor]:\n # if the overlap letter is not the same\n if test[overlap[0]] != value[overlap[1]]:\n # this value constrains the neighbor's domain\n sorting_dict[value] += 1\n # sort the dictionary by the value of the sorting key\n sorted_vars = sorted(domain, key=lambda x: sorting_dict[x])\n return sorted_vars", "def domain_reduction_singleton_domains(csp, queue=None) :\n if (queue==None):\n queue = csp.get_all_variables()\n dequeued = []\n while len(queue)!=0:\n removedVar = queue[0]\n dequeued.append(removedVar)\n queue = queue[1:]\n for constraint in csp.constraints_between(removedVar,None)[:]:\n var2 = constraint.var2\n val2 = csp.get_assigned_value(var2)\n var2Domain = csp.get_domain(var2)[:]\n removedDomain = csp.get_domain(removedVar)[:]\n if len(removedDomain)==0 or len(var2Domain)==0:\n return None\n for domainVal2 in var2Domain:\n anyNonViolators = False\n for domainVal in removedDomain:\n check = constraint.check(domainVal,domainVal2)\n if check==True:\n anyNonViolators = True\n continue\n if anyNonViolators==False:\n csp.eliminate(var2, domainVal2)\n if len(csp.get_domain(var2))==0:\n return None\n if var2 not in queue and len(csp.get_domain(var2))==1:\n queue.append(var2)\n return dequeued", "def min_error_removed_linear_constraints(self):\n n_e_vars = len(self.removed_linear_constraints) * 2\n\n # Add a pair of (continuous) variables e+ >= 0 and e- >= 0, for each (removed) conflicting constraint\n eplus_vars = self.add_variables(n_variables=n_e_vars / 2, lb=0, var_type=\"continuous\")\n eminus_vars = self.add_variables(n_variables=n_e_vars / 2, lb=0, var_type=\"continuous\")\n\n print self.n_tuple_variables\n print len(eplus_vars)\n print len(eminus_vars)\n assert isinstance(self.problem, cplex.Cplex)\n print \"n binaries\", self.problem.variables.get_num_binary()\n print \"n all\", self.problem.variables.get_num()\n print \"n integers\", self.problem.variables.get_num_integer()\n\n # Set objective coefficients of e variables all to 1 (if minimization, otherwise -1)\n if self.problem.objective.get_sense() == cplex.Cplex.objective.sense.minimize:\n self.problem.objective.set_linear(izip(chain(eplus_vars, eminus_vars), repeat(1, n_e_vars)))\n else:\n self.problem.objective.set_linear(izip(chain(eplus_vars, eminus_vars), repeat(-1, n_e_vars)))\n\n adding_constraints = list()\n\n # For minimizing error in SUM(attr) for each attr in the query package\n for i, lc in enumerate(self.removed_linear_constraints):\n def get_coeff_function(_ugc):\n yield 1\n yield -1\n for coeff in self.get_aggregate_constraint_coefficients(_ugc.aggr, _ugc.attr):\n yield coeff\n\n def get_vars_function(_i):\n yield eplus_vars[_i]\n yield eminus_vars[_i]\n for var in self.tuple_variables:\n yield var\n\n lc = LinearConstraint(\n cid=self.new_constraint_id(),\n vals_func=(get_coeff_function, (lc.ugc,)),\n vars_func=(get_vars_function, (i,)),\n op=operator.eq,\n rhs=lc.rhs)\n\n print \"VALS\", lc.get_coeff_function\n print \"VARS\", lc.get_vars_function\n\n adding_constraints.append(lc)\n\n self.add_linear_constraints(adding_constraints)", "def free_variables(self):\n\n free_vars = set()\n self.free_variables_helper(free_vars)\n return free_vars\n # Task 7.6", "def unconstrain_fixed(self):\n unconstrained = self.unconstrain(__fixed__)\n self._highest_parent_._set_unfixed(self, unconstrained)\n return unconstrained", "def clear_domain (cls, base, domain, log=logging.getLogger(\"CLEAN\")):\n base_domain = cls.detect_domains(nffg=base)\n if domain not in base_domain:\n log.warning(\"No node was found in %s with domain: %s for cleanup! \"\n \"Leave NFFG unchanged...\" % (base, domain))\n return base\n for infra in base.infras:\n deletable_ports = set()\n deletable_nfs = set()\n # Skip nodes from other domains\n if infra.domain != domain:\n continue\n # Iterate over out edges from the current BB node\n for infra_id, node_id, link in base.real_out_edges_iter(infra.id):\n # Mark connected NF for deletion\n if base[node_id].type in (NFFG.TYPE_NF,):\n deletable_nfs.add(node_id)\n # Mark related dynamic port for deletion\n deletable_ports.add(link.src)\n if deletable_nfs:\n log.debug(\"Initiated NFs marked for deletion: %s on node: %s\" %\n (deletable_nfs, infra.id))\n # Remove NFs\n base.network.remove_nodes_from(deletable_nfs)\n if deletable_ports:\n log.debug(\"Dynamic ports marked for deletion: %s on node: %s\" %\n (deletable_ports, infra.id))\n # Remove dynamic ports\n for p in deletable_ports:\n base[infra.id].ports.remove(p)\n # Delete flowrules from ports\n for port in base[infra.id].ports:\n port.clear_flowrules()\n return base", "def all_different_propagator(domains: Domains, all_diff_vars: FrozenSet[str]) -> Domains:\r\n assignment = {v: list(domains[v])[0] for v in domains if len(domains[v]) == 1}\r\n reduced_domains = domains\r\n for var in assignment:\r\n if var in all_diff_vars:\r\n reduced_domains = all_different_assignment_propagator(var, assignment[var], reduced_domains, all_diff_vars)\r\n return reduced_domains if reduced_domains == domains else all_different_propagator(reduced_domains, all_diff_vars)", "def solve_constraint_propagate_reduced_domains(problem) :\n agenda=[problem]\n extension=0\n current_prob=agenda.pop(0)\n extension+=1\n\n #check failure\n if has_empty_domains(current_prob) or (not check_all_constraints(current_prob)):\n return (None, extension)\n\n #check success\n all_assigned=True\n variables = current_prob.get_all_variables()\n for var in variables:\n if current_prob.get_assigned_value(var)==None:\n all_assigned=False\n break\n if all_assigned:\n return (current_prob.assigned_values,extension)\n\n #iteration\n next_un_var=current_prob.pop_next_unassigned_var()\n next_domain=current_prob.get_domain(next_un_var)\n new_probs=[]\n for val in next_domain:\n temp=current_prob.copy()\n new=temp.set_assigned_value(next_un_var,val)\n\n queue=[next_un_var]\n domain_reduction(new,queue)\n\n new_probs.append(new)\n agenda=new_probs+agenda\n while (len(agenda)!=0):\n new_prob = agenda.pop(0)\n result=solve_constraint_propagate_reduced_domains(new_prob)\n extension+=result[1]\n if not result[0] is None:\n return (result[0],extension)\n return (None,extension)", "def _reset(lp):\n if hasattr(lp, \"solverModel\"):\n delattr(lp, \"solverModel\")\n for v in lp.variables():\n if hasattr(v, \"_xprs\"):\n delattr(v, \"_xprs\")\n for c in lp.constraints.values():\n if hasattr(c, \"_xprs\"):\n delattr(c, \"_xprs\")", "def select_unassigned_variable(self, assignment):\n var_list= []\n #add unassigned variabled to a list along with the number of words left in its domain\n for var in self.domains:\n if var not in assignment:\n var_list.append((var, len(self.domains[var])))\n #sort this list by the number of words left in its domain\n var_list.sort(key= lambda x:x[1])\n\n #list for variables that are tied for least words left in domain\n equal_vars= [list(var_list[0])]\n for i in range(len(var_list)):\n #adds variables with same number of words left in domain\n if var_list[0][1] == var_list[i][1] and var_list[i] != var_list[0]:\n equal_vars.append(list(var_list[i]))\n\n \n #change the encoded information for words left in domain to the number of neighbors the variable had (highest degree)\n for i in range(len(equal_vars)):\n equal_vars[i][1]= len(self.crossword.neighbors(equal_vars[i][0]))\n\n #sort the list by the highest degree\n equal_vars.sort(key= lambda x:x[1])\n \n #return var with highest degree\n return equal_vars[0][0]", "def check_for_undefined_identifiers(tree, domains):\n for u in tree:\n if u.type == 'var' and u.value not in domains:\n var = u.value\n raise ValueError(\n ('Undefined variable \"{var}\" missing from '\n 'symbol table:\\n\\t{doms}\\n'\n 'in subformula:\\n\\t{f}').format(\n var=var, f=tree.to_recursive_ast(), doms=domains))\n\n if u.type not in {'str', 'num'}:\n continue\n\n # is a Const or Num\n var, c = pair_node_to_var(tree, u)\n\n if c.type == 'str':\n dom = domains[var]\n\n if not isinstance(dom, list):\n raise Exception(\n ('String constant \"{c}\" assigned to non-string '\n 'variable \"{var}\" with domain:\\n\\t{dom}').format(\n var=var, c=c, dom=dom))\n\n if c.value not in domains[var.value]:\n raise ValueError(\n ('String constant \"{c}\" is not in the domain '\n 'of variable \"{var}\"').format(var=var, c=c))\n\n if c.type == 'num':\n dom = domains[var]\n\n if not isinstance(dom, tuple):\n raise Exception(\n ('Number: {c}, assigned to non-integer ' + str(c) +\n 'variable \"{var}\" with domain:\\n\\t{dom}').format(\n var=var, c=c, dom=dom))\n\n if not dom[0] <= c.value <= dom[1]:\n raise Exception(\n ('Integer variable \"{var}\", is assigned the '\n 'value: {c}, that is out of its domain:'\n '{dom[0]} ... {dom[1]}').format(\n var=var, c=c, dom=dom))", "def remove_variables(self):\n self.variables = []", "def add_domains_restriction(self, domain_restriction):\n self._domain_restricion = domain_restriction\n self._size_var = self._get_size_var()\n self._nr_of_bits = self._get_nr_of_bits()", "def __init__(self, crossword):\n self.crossword = crossword\n self.domains = {\n var: self.crossword.words.copy()\n for var in self.crossword.variables\n }", "def __init__(self, crossword):\n self.crossword = crossword\n self.domains = {\n var: self.crossword.words.copy()\n for var in self.crossword.variables\n }", "def __init__(self, crossword):\n self.crossword = crossword\n self.domains = {\n var: self.crossword.words.copy()\n for var in self.crossword.variables\n }", "def remove_unary_constraint(self, var):\n\n del self.__constraints[var]", "def initial_S(domains):\n\n return set([('0',)*len(domains)])", "def __init__(self, variables, domains, neighbors, constraints, C):\r\n super().__init__(())\r\n variables = variables or list(domains.keys())\r\n self.variables = variables\r\n self.domains = domains\r\n self.neighbors = neighbors\r\n self.constraints = constraints\r\n self.curr_domains = None\r\n # visited nodes\r\n self.nassigns = 0\r\n self.conflict_set = {} #dictionary which stores the conflict set of each variable for fc - cbj\r\n self.prev_conflict_set = [] # we store the conflict set from the variable that causes dead-end\r\n self.deadend = None # we save the dead end variable in fc - cbj\r\n # initializating the conflict set array\r\n for x in self.variables:\r\n self.conflict_set[x]=[]\r\n # --------------------------\r\n # keep track of total checks for each algo\r\n self.totchecks=0\r\n # dict for later use in dom / wdeg heuristic\r\n # we initializating weights from constraints to 1\r\n self.weight = {}\r\n for each in C.keys():\r\n self.weight[(each[0],each[1])] = 1", "def __forward_check(self, assigned_var, assigned_value, unassigned_vars):\n for unassigned_neighbor in self.__unassigned_neighbors(assigned_var, unassigned_vars):\n consistent_values = self.__consistent_domain_values(assigned_var, assigned_value, unassigned_neighbor)\n if len(consistent_values) == 0:\n return False\n else:\n unassigned_neighbor.domain = consistent_values\n return True", "def revise(self, x, y):\n # return set a default return value of False\n ret_val = False\n # define a tuple of the two variables without their domains\n var_tup = (x, y)\n # define lists of the variable's domains\n x_values = self.domains[x].copy()\n y_values = self.domains[y].copy()\n # if the two variables exist in overlaps\n if var_tup in self.crossword.overlaps:\n # if that overlap is not None\n if self.crossword.overlaps.get(var_tup) is not None:\n # assign the overlap\n overlap = self.crossword.overlaps[var_tup]\n # generate the list of letters that x has to match with\n y_matches = [val[overlap[1]] for val in y_values]\n # for each of x's domain values\n for value in x_values:\n # if that value cannot match with y's domain values\n if value[overlap[0]] not in y_matches:\n # remove that value from the domain\n self.domains[x].remove(value)\n # set a flag for return value\n ret_val = True\n # return True if any changes were made\n return ret_val", "def forwardcheck(var, val, assignment, user_dict):\n\t\tif curr_domains:\n\t\t\tfor (meal, restaurant) in curr_deleted[var]:\n\t\t\t\tcurr_domains[meal].append(restaurant)\n\t\t\tcurr_deleted[var] = []\n\n\t\t\tfor meal in neighbors[var]:\n\t\t\t\tif meal not in assignment:\n\t\t\t\t\tfor restaurant in curr_domains[meal][:]:\n\t\t\t\t\t\tnum_cats = count_categories(assignment.values())\n\t\t\t\t\t\tif not constraints_match(num_cats, user_dict):\n\t\t\t\t\t\t#if not user_solution_checker(user_dict, meal, restaurant, assignment):\n\t\t\t\t\t\t\tcurr_domains[meal].remove(restaurant)\n\t\t\t\t\t\t\tcurr_deleted[var].append((meal, restaurant))", "def all_different(variables) :\n constraints=[]\n for index,var in enumerate(variables):\n for sub_index in range(index+1,len(variables)):\n var1=var\n var2=variables[sub_index]\n new_constraint=Constraint(var1,var2,constraint_different)\n constraints.append(new_constraint)\n return constraints", "def update(self):\n\n terms_toRemove = []\n\n for termIndex, [term_constantFactor, term_unknowns_attributeAddresses] in enumerate(self.LHS):\n\n # Check if coefficient is 0 - then no need to process any of the unknowns since term will be 0 anyways\n if term_constantFactor == 0:\n terms_toRemove.append(termIndex)\n continue # continue to next term, no need to resolve the unknowns of this term since the product will be 0 anyways\n\n # Check if any unknowns became known\n unknowns_toRemove = []\n for unknown_attributeAddress in term_unknowns_attributeAddresses:\n attribute = getattr_fromAddress(*unknown_attributeAddress)\n if isNumeric(attribute):\n # object.attribute which had previously been identified as unknown now has a value, add it to the constant factor product and remove from the unknowns\n self.LHS[termIndex][0] *= attribute # multiply it with the constant factor product\n unknowns_toRemove.append([termIndex, unknown_attributeAddress])\n for termIndex, unknown_attributeAddress in unknowns_toRemove: # remove unknowns which have become known in the end\n # removing in the end not to tamper with the iteration of the above loop\n self.LHS[termIndex][1].remove(unknown_attributeAddress)\n\n # Move constants to RHS\n if self.LHS[termIndex][1] == []:\n # if term has no unknowns, it is a constant, move to RHS\n self.RHS -= self.LHS[termIndex][0]\n self.LHS.pop(termIndex)\n\n for termIndex in reversed(terms_toRemove): # reversed - otherwise would tamper with indices of items identified for removal\n self.LHS.pop(termIndex)\n\n self._gatherUnknowns()", "def __init__(self, variables, constraints):\n self.__variables = variables\n self.__constraints = constraints\n\n self.__make_node_consistent()", "def compute_bounds(self):\n # Note: linear_constraints object has been been populated at this stage\n L_zero_var = []\n \n for constraint in self._linear_constraints.L_linear_constraints:\n lhs_string = constraint[0]\n rhs_string = constraint[1]\n if float(rhs_string)==0:\n #print \"rhs=0: forcing the variables to zero\"\n L_vars = re.split(r'[+-]',lhs_string)\n \n for var in L_vars:\n modform_var = var.strip()\n \n # forcing all the variables in this constraint to be zero\n self._linear_constraints.modform_space.D_PuLP_variables[modform_var] = pulp.LpVariable(modform_var, lowBound=0, upBound=0)\n #print \"var forced to zero: \", modform_var\n L_zero_var.append(modform_var)\n else: #if float(rhs)==0\n continue\n \n if len(L_zero_var)>0:\n print \"\\n####### Variables forced to zero (rhs = 0) ##########\"\n print \"variables forced to zero: \", set(L_zero_var)\n \n feasible_lc = flc.FeasibleLinearConstraints(self._linear_constraints)\n \n feasible_lc.get_feasible_linear_constraints()\n \n feasible_linear_constraints = feasible_lc.feasible_linear_constraints\n \n lp_solver = lps.LinearProgrammingSolver(feasible_linear_constraints)\n \n D_lower_bounds = {}; D_upper_bounds = {}\n \n for v in [self._linear_constraints.modform_space.D_PuLP_variables[k] for k in sorted(self._linear_constraints.modform_space.D_PuLP_variables.keys(), key=gbfunc.natural_keys)]:\n \n if str(v) in L_zero_var:\n D_lower_bounds[str(v)] = '0'\n D_upper_bounds[str(v)] = '0'\n continue\n #end if str(v) in L_zero_var\n \n objective_function_PuLP = v\n \n list_values_minimize = lp_solver.linear_programming_solver(objective_function_PuLP, pulp.LpMinimize)\n D_lower_bounds[str(v)] = \"%.3f\"%round(pulp.value(v),3)\n \n list_values_maximize = lp_solver.linear_programming_solver(objective_function_PuLP, pulp.LpMaximize)\n D_upper_bounds[str(v)] = \"%.3f\"%round(pulp.value(v),3)\n\n #end for v in ..\n\n return((D_lower_bounds, D_upper_bounds))", "def select_unassigned_variable(csp):\n smallest = -1\n largest = 0\n multiple = False\n returned = None\n\n for unass in csp.variables:\n if not unass.is_assigned():\n if len(unass.domain) < smallest or smallest == -1:\n smallest = len(unass.domain)\n multiple = False\n returned = unass\n if len(unass.domain) == smallest:\n multiple = True\n\n if multiple == False:\n return returned\n else:\n for unass in csp.variables:\n if not unass.is_assigned():\n if len(unass.domain) == smallest:\n if len(csp.constraints[unass]) > largest:\n largest = len(csp.constraints[unass])\n returned = unass\n return returned\n\n\n\n\n\n # TODO implement this\n pass", "def in_domain_set(self, var1, var2, var3, var4, var5):\n var6 = 0\n var7 = var4 - 1\n var8 = -1\n var10 = 2\n while var6 <= var7:\n var8 = (var6 + var7) / 2\n var9 = var1[var5 + 1] - ord(var3[var8][1])\n if var9 > 0:\n var6 = var8 + 1\n elif var9 < 0:\n var7 = var8 - 1\n else:\n var9 = var1[var5] - ord(var3[var8][0])\n if var9 > 0:\n var6 = var8 + 1\n else:\n if var9 >= 0:\n break\n var7 = var8 - 1\n if var6 > var7:\n return 0\n else:\n while var10 < var2 and var1[var5 + var10] == ord(var3[var8][var10]):\n var10 = var10 + 1\n return 1 if var10 == var2 else 0", "def __order_domain_values(self, var, assignment):\n values_to_inconsistencies = {}\n unassigned_vars = self.__unassigned_variables(assignment)\n unassigned_neighbors = self.__unassigned_neighbors(var, unassigned_vars)\n for value in var.domain:\n inconsistent_value_count = 0\n for unassigned_neighbor in unassigned_neighbors:\n consistent_domain_values = self.__consistent_domain_values(var, value, unassigned_neighbor)\n inconsistencies = unassigned_neighbor.domain.difference(consistent_domain_values)\n inconsistent_value_count += len(inconsistencies)\n values_to_inconsistencies[value] = inconsistent_value_count\n\n ordered_values = sorted(values_to_inconsistencies, key=values_to_inconsistencies.get)\n return ordered_values", "def unconstrain(self, regexp):\r\n matches = self.grep_param_names(regexp)\r\n\r\n # tranformed contraints:\r\n for match in matches:\r\n self.constrained_indices = [i[i <> match] for i in self.constrained_indices]\r\n\r\n # remove empty constraints\r\n tmp = zip(*[(i, t) for i, t in zip(self.constrained_indices, self.constraints) if len(i)])\r\n if tmp:\r\n self.constrained_indices, self.constraints = zip(*[(i, t) for i, t in zip(self.constrained_indices, self.constraints) if len(i)])\r\n self.constrained_indices, self.constraints = list(self.constrained_indices), list(self.constraints)\r\n\r\n # fixed:\r\n self.fixed_values = [np.delete(values, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices, values in zip(self.fixed_indices, self.fixed_values)]\r\n self.fixed_indices = [np.delete(indices, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices in self.fixed_indices]\r\n\r\n # remove empty elements\r\n tmp = [(i, v) for i, v in zip(self.fixed_indices, self.fixed_values) if len(i)]\r\n if tmp:\r\n self.fixed_indices, self.fixed_values = zip(*tmp)\r\n self.fixed_indices, self.fixed_values = list(self.fixed_indices), list(self.fixed_values)\r\n else:\r\n self.fixed_indices, self.fixed_values = [], []", "def _move_domain_attributes_into_domain(model):\n if onnx is None:\n raise ModuleNotFoundError(\"Installation of ONNX is required.\")\n\n model = copy.deepcopy(model)\n for n in model.graph.node:\n for a in n.attribute:\n mark_for_removal = False\n if a.name == \"domain\":\n n.domain = a.s\n mark_for_removal = True\n if mark_for_removal:\n n.attribute.remove(a)\n return model", "def __settle_ttl(self):\n self.ttl = {}\n for v in self.g.nodes():\n indices = [self.universe.index((v, w))\n for w in self.g[v] if (v, w) in self.universe]\n self.ttl[v] = max(indices) if len(indices) else 0", "def _defineNAAuxVars(self, aux_vars):\n # Initialise aux var itesms as empty lists unless already defined when\n # setting up independent variables\n for item in (\"ANAME\", \"AMISS\", \"ASCAL\", \"A\"):\n if not item in self.na_dict:\n self.na_dict[item] = [] \n\n for var in aux_vars:\n name = xarray_utils.getBestName(var)\n self.na_dict[\"ANAME\"].append(name)\n miss = xarray_utils.getMissingValue(var)\n miss = self._resolve_float(miss)\n\n self.na_dict[\"AMISS\"].append(miss)\n self.na_dict[\"ASCAL\"].append(1)\n # Populate the variable list with the array\n self.na_dict[\"A\"].append(xarray_utils.getArrayAsList(var, missing_value=miss))\n\n self.na_dict[\"NAUXV\"] = len(self.na_dict[\"A\"])", "def __minimum_remaining_values(self, unassigned_vars):\n min_var = None\n for var in unassigned_vars:\n if min_var is None:\n min_var = var\n elif len(var.domain) < len(min_var.domain):\n min_var = var\n return min_var", "def infer_assignment(self):\r\n self.support_pruning()\r\n return {v: self.curr_domains[v][0]\r\n for v in self.variables if 1 == len(self.curr_domains[v])}", "def __init__(self, variables, constraints):\n self.variables = variables\n self.constraints = constraints\n for c in constraints:\n c.var1.peers.append(c.var2)\n c.var2.peers.append(c.var1)", "def solve_constraint_propagate_singleton_domains(problem) :\n agenda=[problem]\n extension=0\n current_prob=agenda.pop(0)\n extension+=1\n\n #check failure\n if has_empty_domains(current_prob) or (not check_all_constraints(current_prob)):\n return (None, extension)\n\n #check success\n all_assigned=True\n variables = current_prob.get_all_variables()\n for var in variables:\n if current_prob.get_assigned_value(var)==None:\n all_assigned=False\n break\n if all_assigned:\n return (current_prob.assigned_values,extension)\n\n #iteration\n next_un_var=current_prob.pop_next_unassigned_var()\n next_domain=current_prob.get_domain(next_un_var)\n new_probs=[]\n for val in next_domain:\n temp=current_prob.copy()\n new=temp.set_assigned_value(next_un_var,val)\n\n queue=[next_un_var]\n domain_reduction_singleton_domains(new,queue)\n\n new_probs.append(new)\n agenda=new_probs+agenda\n while (len(agenda)!=0):\n new_prob = agenda.pop(0)\n result=solve_constraint_propagate_reduced_domains(new_prob)\n extension+=result[1]\n if not result[0] is None:\n return (result[0],extension)\n return (None,extension)", "def mutate_fix_var_filter(item_counts):\n assert isinstance(item_counts, Counter)\n for i in list(item_counts.keys()):\n if isinstance(i, Literal):\n i_n3 = i.n3()\n if len(i_n3) > config.MAX_LITERAL_SIZE:\n logger.debug(\n 'excluding very long literal %d > %d from mutate_fix_var:\\n'\n '%s...',\n len(i_n3), config.MAX_LITERAL_SIZE, i_n3[:128]\n )\n del item_counts[i]\n elif i.datatype in (XSD['float'], XSD['double']) \\\n and six.text_type(i).lower() in ('nan', 'inf'):\n logger.debug('excluding %s due to Virtuoso Bug', i_n3)\n del item_counts[i]\n elif isinstance(i, URIRef):\n # noinspection PyBroadException\n try:\n i.n3()\n except Exception: # sadly RDFLib doesn't raise a more specific one\n # it seems some SPARQL endpoints (Virtuoso) are quite liberal\n # during their import process, so it can happen that we're\n # served broken URIs, which break when re-inserted into SPARQL\n # later by calling URIRef.n3()\n logger.warning(\n 'removed invalid URI from mutate_fix_var:\\n%r',\n i\n )\n del item_counts[i]\n elif isinstance(i, BNode):\n # make sure that BNodes stay variables\n logger.info('removed BNode from mutate_fix_var')\n del item_counts[i]\n else:\n logger.warning(\n 'exlcuding unknown result type from mutate_fix_var:\\n%r',\n i\n )\n del item_counts[i]", "def update_domain(self, centre=[0.5,0.5]):\n centre = np.array(centre)\n assert(centre.size == 2)\n \n # First, copy dead dude information back to original prey block\n i_dead = self.prey['alive'] == 0\n self.prey_original['alive'][self.prey[i_dead]['index']] = 0\n \n # Remove dead prey items\n self.prey = self.prey[~i_dead]\n \n # Recenter and recompute tree\n self.prey['position'] = np.remainder(self.prey['position'] + \n np.array([0.5, 0.5]) - centre, 1.)\n \n if self.prey.size != 0:\n self.tree = scipy.spatial.cKDTree(self.prey['position'])", "def _defineNAVars(self, vars):\n self.na_dict[\"NV\"] = len(vars)\n self.na_dict[\"VNAME\"] = []\n self.na_dict[\"VMISS\"] = []\n self.na_dict[\"VSCAL\"] = []\n self.na_dict[\"V\"] = []\n\n for var in vars:\n name = xarray_utils.getBestName(var)\n self.na_dict[\"VNAME\"].append(name)\n miss = xarray_utils.getMissingValue(var)\n miss = self._resolve_float(miss)\n\n self.na_dict[\"VMISS\"].append(miss)\n self.na_dict[\"VSCAL\"].append(1)\n\n # Populate the variable list with the array\n # Make sure missing values are converted to real values using the required missing value\n self.na_dict[\"V\"].append(xarray_utils.getArrayAsList(var, missing_value=miss, handle_datetimes=True))\n\n # Create independent variable info\n if not \"X\" in self.na_dict:\n\n # Set up lists ready to populate with values\n self.na_dict[\"NXDEF\"] = []\n self.na_dict[\"NX\"] = []\n\n self.ax0 = xarray_utils.get_coord_by_index(var, 0)\n\n self.na_dict[\"X\"] = [xarray_utils.getArrayAsList(self.ax0)]\n self.na_dict[\"XNAME\"] = [xarray_utils.getBestName(self.ax0)]\n\n if len(self.ax0) == 1:\n self.na_dict[\"DX\"] = [0]\n else:\n # Set default increment as gap between first two\n incr = xarray_utils.get_interval(self.ax0, 0, 1)\n\n self.na_dict[\"DX\"] = [incr]\n # Now overwrite it as zero if non-uniform interval in axis\n\n for i in range(1, len(self.ax0)):\n if xarray_utils.get_interval(self.ax0, i-1, i) != incr:\n self.na_dict[\"DX\"] = [0]\n break\n\n # If 1D only then \"X\" should only be a list and not list of lists\n if self.na_dict[\"FFI\"] in (1001, 1010, 1020):\n self.na_dict[\"X\"] = self.na_dict[\"X\"][0]\n\n # If FFI is 1020 need to reduce axis down to reduced values as most are implied\n if self.na_dict[\"FFI\"] == 1020: \n vals = self.na_dict[\"X\"]\n self.na_dict[\"X\"] = vals[0:len(vals):self.na_dict[\"NVPM\"]] \n\n # Now add the rest of the axes to the self.na_dict objects \n for axis in xarray_utils.getAxisList(var)[1:]:\n self._appendAxisDefinition(axis)\n\n # If FFI is 2110 then need to modify the \"NX\" and \"X\" lists to cope with odd shape\n # Also need to add NX to auxiliary variables\n if self.na_dict[\"FFI\"] == 2110:\n new_x = []\n new_nx = []\n ax2_values = xarray_utils.get_coord_by_index(var, 1).data.tolist()\n\n for i in self.ax0[:]:\n new_x.append([i, ax2_values])\n new_nx.append(len(ax2_values))\n\n # Re-assign to new lists\n self.na_dict[\"NX\"] = new_nx\n self.na_dict[\"X\"] = new_x \n\n # Now auxiliary variable info here with independent var info\n # First aux var is NX\n self.na_dict[\"A\"] = [self.na_dict[\"NX\"][:]]\n ind_var_name = self.na_dict[\"XNAME\"][0]\n self.na_dict[\"ANAME\"] = [\"Number of '%s' values recorded in subsequent data records\" % ind_var_name]\n self.na_dict[\"AMISS\"] = [-9999.999]\n self.na_dict[\"ASCAL\"] = [1.0]\n\n # If FFI is 2310 then need to modify na_dict items for that\n elif self.na_dict[\"FFI\"] == 2310:\n new_x = []\n new_nx = []\n new_dx = []\n ax2_values = xarray_utils.get_coord_by_index(var, 1).data.tolist()\n incr = xarray_utils.get_interval(ax2_values, 0, 1)\n\n for i in self.ax0[:]:\n new_x.append([i, ax2_values])\n new_nx.append(len(ax2_values))\n new_dx.append(incr)\n\n # Re-assign to new lists\n self.na_dict[\"NX\"] = new_nx\n self.na_dict[\"X\"] = new_x\n self.na_dict[\"DX\"] = new_dx\n\n # Now auxiliary variable info here with independent var info\n # First three aux vars are NX, X0 and DX\n self.na_dict[\"A\"] = []\n self.na_dict[\"A\"].append(self.na_dict[\"NX\"][:])\n self.na_dict[\"A\"].append([i[1][0] for i in self.na_dict[\"X\"]])\n self.na_dict[\"A\"].append(self.na_dict[\"DX\"][:])\n\n ind_var_name = self.na_dict[\"XNAME\"][0]\n self.na_dict[\"ANAME\"] = [\"Number of '%s' values recorded in subsequent data records\" % ind_var_name,\n \"'%s' value for first data point\" % ind_var_name,\n \"'%s' increment\" % ind_var_name]\n self.na_dict[\"AMISS\"] = [-9999.999, -9999.999, -9999.999]\n self.na_dict[\"ASCAL\"] = [1.0, 1.0, 1.0]", "def __addValueConstraints(self):\n for x in range(self.width):\n for y in range(self.height):\n g = self.grid[(x, y)]\n self.solver.add(\n Or([g == Magnets.EMPTY, g == Magnets.PLUS, g == Magnets.MINUS]))\n if x > 0:\n left = self.grid[(x-1, y)]\n self.solver.add(Or([g != left, g == Magnets.EMPTY]))\n if y > 0:\n up = self.grid[(x, y-1)]\n self.solver.add(Or([g != up, g == Magnets.EMPTY]))", "def constraints(self, x):\n pass", "def redef_vars(self):\r\n\r\n # Try using redefined source / boundary terms\r\n if self.redefined == True:\r\n self._redef_via_predef_eqn()\r\n else: # If they haven't been set you'll get an exception.\r\n self._redef_sp1_vars()", "def fix_variables(m, variables):\r\n\r\n for var_name, values in variables.items():\r\n for var_index, var_value in values.items():\r\n m.__getattribute__(var_name)[var_index].fix(var_value)\r\n\r\n return m", "def add_all_different_constraint(self, variables):\n for (i, j) in self.get_all_possible_pairs(variables, variables):\n if i != j:\n self.add_constraint_one_way(i, j, lambda x, y: x != y)", "def unfix_variables(m, variables):\r\n\r\n for var_name, values in variables.items():\r\n for var_index, var_value in values.items():\r\n m.__getattribute__(var_name)[var_index].unfix(var_value)\r\n\r\n return m", "def _parse_modelspace(self) :\n\t\tlogging.debug(\"Parsing modelspace hard constraints\")\t\n\t\n\t\tself.modelspace = {}\n\t\t\n\t\tfor varname in ['alpha','beta','g','h'] : \n\t\t\tself._parse_var_modelspace(varname)", "def domain_size(self):\n all_vars = self.all_variables()\n if not all_vars:\n return 0\n return np.prod([v.size for v in all_vars])", "def estimateCost(self, domains):\n return reduce(operator.mul,\n [domains[var].size() for var in self._variables])", "def forward_checking(csp, var, value, assignment, removals):\r\n csp.support_pruning()\r\n check=0\r\n for B in csp.neighbors[var]:\r\n if B not in assignment:\r\n for b in csp.curr_domains[B][:]:\r\n check+=1\r\n if not csp.constraints(var, value, B, b):\r\n csp.prune(B, b, removals)\r\n # we have a failure\r\n # we check if domains list for variable B is not empty\r\n # and increase weight of B,var by 1\r\n if not csp.curr_domains[B]:\r\n csp.weight[(B,var)] += 1\r\n return False,check\r\n return True,check", "def revise(self, x, y):\n # print(\"Entered revise Function\") \n keep_list_x = set()\n keep_list_y = set()\n domain_x = self.domains[x].copy()\n domain_y = self.domains[y].copy()\n overlaps = self.crossword.overlaps\n overlap = overlaps[(x, y)]\n # print(self.domains[x].copy)\n revision = False\n if overlap is not None:\n while domain_x:\n word_x = domain_x.pop()\n # print(word_x)\n while domain_y:\n word_y = domain_y.pop()\n # print(word_y)\n keep_list_y.add(word_y)\n if word_x[overlap[0]] != word_y[overlap[1]]:\n keep_list_x.add(word_x)\n for word in keep_list_y:\n domain_y.add(word)\n \n remove_list = self.domains[x].difference(keep_list_x)\n # print(\"DOMAIN\")\n # print(self.domains[x])\n # print(\"KEEP\")\n # print(keep_list_x)\n # print(\"REMOVE\")\n # print(remove_list)\n for word in remove_list:\n self.domains[x].remove(word)\n revision = True\n\n return revision\n # raise NotImplementedError", "def regenerate_constraints(self):\n\n # Let us not forget to remove fields that migh be empty by now\n if hasattr(self, '_cons_kinds'):\n for k in self._cons_kinds:\n attrname = camel2underscores(k)\n try:\n delattr(self, attrname)\n except AttributeError:\n pass # The attribute may not have been set up yet\n\n _cons_kinds = defaultdict(DictList)\n\n for k, v in self._cons_dict.items():\n _cons_kinds[v.__class__.__name__].append(v)\n\n for k in _cons_kinds:\n attrname = camel2underscores(k)\n setattr(self, attrname, _cons_kinds[k])\n\n self._cons_kinds = _cons_kinds", "def eliminate(self, var, val) :\n values = self.domains.get(var, [])\n found = val in values\n if found:\n values.remove(val)\n self.domains[var] = values\n return found", "def revise(self, x, y):\n revision= False\n #creates a list of words in the domain of node x to remove since we cannot remove the elements in a set while it is iterating\n words_to_remove= []\n #function which returns data of where the two nodes intersect/overlap\n overlap= self.crossword.overlaps[x,y]\n\n if overlap is not None:\n for word_x in self.domains[x]:\n consistent= False\n\n for word_y in self.domains[y]:\n #a word in the domain of x is consistent if there is any word in the domain of y that has the same letter in the intersect\n if word_x[overlap[0]] == word_y[overlap[1]]:\n consistent= True\n #if the word is not consistent it is added to a list to be removed later\n if consistent == False:\n words_to_remove.append(word_x)\n revision= True\n #inconsistent words are removed from the domain of x\n for word in words_to_remove:\n self.domains[x].remove(word)\n\n return revision", "def _parse_var_modelspace(self,varname) :\n\n\t\tmodelspace = self.ss.constraint.modelspace\n\t\tparams = getattr(modelspace,varname)\n\t\tnvars = len(self.ss.variables) # num of variables\n\n\t\tif varname in ('alpha','beta') : \n\t\t\tkeys = params.keys()\n\t\t\tvar_range = (params['defaultLowerBound'],\\\n\t\t\t\tparams['defaultUpperBound'])\n\t\t\tself.modelspace[varname] = [var_range]*nvars\n\t\t\tfor key in keys : \n\t\t\t\tif re.match(varname+'_\\d+',key)\t:\n\t\t\t\t\tidx = int(key.split('_')[1])\t\t\t\t\n\t\t\t\t\tself.modelspace[varname][idx-1] = params[key]\n\n\t\telif varname in ('g','h') :\n\t\t\tkeys = params.keys()\n\t\t\tvar_range = (params['defaultLowerBound'],\\\n\t\t\t\tparams['defaultUpperBound'])\n\n\t\t\t# This step is purely there cuz [[var_range]*nvars]*nvars\n\t\t\t# does not work\n\t\t\tvarlist = []\n\t\t\tfor ii in range(nvars) : \n\t\t\t\tvarlist.append([var_range]*nvars)\n\t\t\tself.modelspace[varname] = varlist\n\t\t\tfor key in keys : \n\t\t\t\tif re.match(varname+'_\\d+_\\d+',key)\t:\n\t\t\t\t\tidr,idc = map(int,(key.split('_')[1:3]))\n\t\t\t\t\tself.modelspace[varname][idr-1][idc-1] = params[key]\n\t\t\n\t\telse :\n\t\t\tlogging.error(\"Unrecognized varname %s quitting..\" \\\n\t\t\t%(varname))\n\t\t\tsys.exit(1)", "def update(self):\n self._g, self._B = self._constraint_assembler.preallocate_g_and_B(self._no_of_dofs_unconstrained,\n self._dofidxs(),\n self._no_of_constraints_by_object())", "def clean(self):\n self.unique_combinations = {}\n self.reverse_combinations = []\n self.label_count = None", "def all_different(variables) :\n constraints = []\n for i in xrange(len(variables)):\n var1 = variables[i]\n for j in xrange(i+1,len(variables)):\n var2 = variables[j]\n if var1!=var2:\n constraints.append(Constraint(var1,var2,constraint_different))\n return constraints", "def eliminate_from_neighbors(csp, var) :\n reduced = []\n val = csp.get_assigned_value(var)\n replacement = []\n for constraint in csp.constraints_between(var,None):\n var2 = constraint.var2\n domainCopy = csp.domains[var2][:]\n numLeft = len(domainCopy)\n if (val!=None):\n for i in xrange(len(domainCopy)):\n possibleVal2 = domainCopy[i]\n check = constraint.check(val,possibleVal2)\n if (check==False):\n didEliminate = csp.eliminate(var2,possibleVal2)\n if (didEliminate):\n numLeft-=1\n if var2 not in reduced:\n reduced.append(var2)\n if numLeft==0:\n return None\n return sorted(reduced)", "def reset(self):\n self.__init__(self.subDomainnumMonomers, self.dim, self.b, self.subDomainNc, self.keepCL, position = self.positions)", "def test_redundant_set_field(self):\n SF1, SF2 = (\"SET_FIELD\", (\"IPV4_DST\", 1)), (\"SET_FIELD\", (\"IPV4_DST\", 2))\n SF3, SF4 = (\"SET_FIELD\", (\"IPV4_DST\", 3)), (\"SET_FIELD\", (\"IPV4_DST\", 4))\n OUT = (\"OUTPUT\", 1)\n n1 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF2, OUT])),\n Rule(priority=0)\n ])\n n2 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF1, SF2, OUT])),\n Rule(priority=0)\n ])\n n3 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF3, SF2, OUT])),\n Rule(priority=0)\n ])\n n4 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF4, SF3, SF1, SF2, OUT])),\n Rule(priority=0)\n ])\n n5 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF2, SF2, SF2, SF2, OUT])),\n Rule(priority=0)\n ])\n self.assertTrue(check_equal(n1, n2))\n self.assertTrue(check_equal(n1, n3))\n self.assertTrue(check_equal(n1, n4))\n self.assertTrue(check_equal(n1, n5))\n\n # Sanity check\n n6 = normalise([\n Rule(priority=10,\n instructions=inst_from_acts([SF4, SF3, SF1, SF1, OUT])),\n Rule(priority=0)\n ])\n self.assertFalse(check_equal(n1, n6))", "def __checkNrVars(self):\n variables = set()\n for q in self.__quantifierList:\n for var in q.getVariableNames():\n variables.add(\"%s\" % var)\n for c in self.__clauseList:\n for var in c.getVariableNames():\n variables.add(\"%s\" % var)\n \n return len(variables)", "def normalize(self):\n for key in self.corpus.keys():\n sum_count = 0\n words = []\n counts = []\n for k, v in self.corpus[key].items():\n sum_count += v\n words.append(k)\n counts.append(v)\n prob = [float(count)/sum_count for count in counts]\n\n self.corpus[key] = [words, prob]", "def domain_reduction_singleton_domains(csp, queue=None) :\n if queue==None:\n queue=csp.get_all_variables()\n dequeued=[]\n while len(queue)!=0:\n current_var=queue.pop(0)\n dequeued.append(current_var)\n eliminated=eliminate_from_neighbors(csp,current_var)\n if(eliminated==None):\n return None\n pre_add_list=[]\n add_list=[]\n for var in eliminated:\n exist=False\n for varr in queue:\n if var == varr:\n exist=True\n break\n if not exist:\n pre_add_list.append(var)\n for var in pre_add_list:\n if len(csp.get_domain(var))==1:\n add_list.append(var)\n queue=queue+add_list\n return dequeued", "def resetDomain(self) -> None:\r\n self._hiddenValue.clear()", "def remove_from_possible_domain(self, value):\n self.possible_domain -= {value}", "def relaxed(self):\n return DataSpace(self.intervals.relaxed,\n {k: v.relaxed for k, v in self.parts.items()})", "def domain(self, value: ArrayLike):\n\n value = as_float_array(value, self.dtype)\n\n if not np.all(np.isfinite(value)):\n runtime_warning(\n f'\"{self.name}\" new \"domain\" variable is not finite: {value}, '\n f\"unpredictable results may occur!\"\n )\n else:\n attest(\n np.all(value[:-1] <= value[1:]),\n \"The new domain value is not monotonic! \",\n )\n\n if value.size != self._range.size:\n self._range = np.resize(self._range, value.shape)\n\n self._domain = value\n self._function = None # Invalidate the underlying continuous function.", "def fill_noncontinous_variables(self, samples):\n init_points_count = samples.shape[0]\n for (idx, var) in enumerate(self.space.space_expanded):\n if isinstance(var, DiscreteVariable) or isinstance(var, CategoricalVariable) :\n sample_var = np.atleast_2d(np.random.choice(var.domain, init_points_count))\n samples[:,idx] = sample_var.flatten()\n\n # sample in the case of bandit variables\n elif isinstance(var, BanditVariable):\n # Bandit variable is represented by a several adjacent columns in the samples array\n idx_samples = np.random.randint(var.domain.shape[0], size=init_points_count)\n bandit_idx = np.arange(idx, idx + var.domain.shape[1])\n samples[:, bandit_idx] = var.domain[idx_samples,:]", "def constraints(self):\n constraints = np.concatenate( (np.ravel(self.noise_var_constraint), \n self.kern.constraints), axis=0)\n return constraints", "def _check_var_conflicts(s, variables):\n # check conflicts with variable names\n vars_redefined = {x for x in s if x in variables}\n if vars_redefined:\n raise Exception('Variables redefined: {v}'.format(v=vars_redefined))\n # check conflicts with values of arbitrary finite data types\n for var, domain in variables.items():\n # not arbitrary finite type ?\n if not isinstance(domain, list):\n continue\n # var has arbitrary finite type\n conflicting_values = {x for x in s if x in domain}\n if conflicting_values:\n raise Exception(\n 'Values redefined: {v}'.format(v=conflicting_values))", "def reset_solver(self):\n self.total_iterations = 0\n self.active_constraints_index = 0\n self.active_constraints_set = False\n return", "def _var_check(self):\n missing = set()\n for v in self.variables:\n if getattr(self, v) is None:\n missing.add(v)\n self.missing = missing" ]
[ "0.80468005", "0.69576985", "0.6794537", "0.63951313", "0.63650477", "0.6052798", "0.5844512", "0.58284026", "0.5795998", "0.5759449", "0.5712677", "0.5712677", "0.56901187", "0.5685086", "0.5629894", "0.5607322", "0.55948013", "0.55813795", "0.5536879", "0.5519875", "0.5494521", "0.54654443", "0.5440014", "0.54168254", "0.53963923", "0.5385804", "0.53682745", "0.5338689", "0.5337776", "0.5336905", "0.53149104", "0.5271483", "0.5265992", "0.52628976", "0.52542883", "0.52416867", "0.5235673", "0.52273244", "0.52269095", "0.52173066", "0.51867783", "0.51867783", "0.51867783", "0.5182429", "0.51792455", "0.5178828", "0.51309794", "0.51292676", "0.5128453", "0.5094395", "0.50843066", "0.5072701", "0.50661236", "0.50603664", "0.50499946", "0.504201", "0.50325483", "0.503147", "0.5021358", "0.50180703", "0.5006751", "0.50060207", "0.5001768", "0.49764863", "0.4966225", "0.49545977", "0.49487555", "0.49483198", "0.4939007", "0.4935526", "0.49293053", "0.49210578", "0.48905253", "0.48862892", "0.48660722", "0.48576662", "0.48541096", "0.48516136", "0.48512217", "0.48463303", "0.4845861", "0.48334938", "0.4825549", "0.48211968", "0.48202008", "0.48161077", "0.4815412", "0.48140687", "0.48095167", "0.48082733", "0.4808098", "0.48079064", "0.48022622", "0.4801603", "0.479937", "0.47885904", "0.4787182", "0.4784969", "0.47791657", "0.47726718" ]
0.673076
3
Make variable `x` arc consistent with variable `y`. To do so, remove values from `self.domains[x]` for which there is no possible corresponding value for `y` in `self.domains[y]`. Return True if a revision was made to the domain of `x`; return False if no revision was made.
def revise(self, x, y): # print("Entered revise Function") keep_list_x = set() keep_list_y = set() domain_x = self.domains[x].copy() domain_y = self.domains[y].copy() overlaps = self.crossword.overlaps overlap = overlaps[(x, y)] # print(self.domains[x].copy) revision = False if overlap is not None: while domain_x: word_x = domain_x.pop() # print(word_x) while domain_y: word_y = domain_y.pop() # print(word_y) keep_list_y.add(word_y) if word_x[overlap[0]] != word_y[overlap[1]]: keep_list_x.add(word_x) for word in keep_list_y: domain_y.add(word) remove_list = self.domains[x].difference(keep_list_x) # print("DOMAIN") # print(self.domains[x]) # print("KEEP") # print(keep_list_x) # print("REMOVE") # print(remove_list) for word in remove_list: self.domains[x].remove(word) revision = True return revision # raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def revise(self, x, y):\n # return set a default return value of False\n ret_val = False\n # define a tuple of the two variables without their domains\n var_tup = (x, y)\n # define lists of the variable's domains\n x_values = self.domains[x].copy()\n y_values = self.domains[y].copy()\n # if the two variables exist in overlaps\n if var_tup in self.crossword.overlaps:\n # if that overlap is not None\n if self.crossword.overlaps.get(var_tup) is not None:\n # assign the overlap\n overlap = self.crossword.overlaps[var_tup]\n # generate the list of letters that x has to match with\n y_matches = [val[overlap[1]] for val in y_values]\n # for each of x's domain values\n for value in x_values:\n # if that value cannot match with y's domain values\n if value[overlap[0]] not in y_matches:\n # remove that value from the domain\n self.domains[x].remove(value)\n # set a flag for return value\n ret_val = True\n # return True if any changes were made\n return ret_val", "def revise(self, x, y):\n revision= False\n #creates a list of words in the domain of node x to remove since we cannot remove the elements in a set while it is iterating\n words_to_remove= []\n #function which returns data of where the two nodes intersect/overlap\n overlap= self.crossword.overlaps[x,y]\n\n if overlap is not None:\n for word_x in self.domains[x]:\n consistent= False\n\n for word_y in self.domains[y]:\n #a word in the domain of x is consistent if there is any word in the domain of y that has the same letter in the intersect\n if word_x[overlap[0]] == word_y[overlap[1]]:\n consistent= True\n #if the word is not consistent it is added to a list to be removed later\n if consistent == False:\n words_to_remove.append(word_x)\n revision= True\n #inconsistent words are removed from the domain of x\n for word in words_to_remove:\n self.domains[x].remove(word)\n\n return revision", "def checkDomainRange(self, x, y):\n if not self.domain.checkSame(x):\n raise ValueError(\"Provided x vector does not match operator domain\")\n if not self.range.checkSame(y):\n raise ValueError(\"Provided y vector does not match operator range\")", "def ac3(self, arcs=None):\n if arcs == None:\n #creates a queue of arcs to update\n arcs= []\n for node1 in self.domains:\n for node2 in self.domains:\n if node1 != node2:\n #for each pair of nodes that intersect, add them as a tuple pair to a list of arcs\n if self.crossword.overlaps[node1,node2] != None: \n arcs.append((node1,node2))\n\n while arcs != []:\n x= arcs[0][0]\n y= arcs[0][1]\n\n if self.revise(x, y):\n #if the domain of node x is empty after revision, this problem has no solution\n if len(self.domains[x]) == 0:\n return False\n #if the arc is updated successfully, node x may no longer be arc consistent in respect to other nodes that it may have been before\n #we must then add the arcs between the revised x and all of its neighbors(except y as we have just checked it) to the queue\n for neighbor in self.crossword.neighbors(x):\n if neighbor != y:\n arcs.append((neighbor, x))\n #remove arcs from queue after revision\n arcs.pop(0)\n else:\n arcs.pop(0)\n \n return True", "def __forward_check(self, assigned_var, assigned_value, unassigned_vars):\n for unassigned_neighbor in self.__unassigned_neighbors(assigned_var, unassigned_vars):\n consistent_values = self.__consistent_domain_values(assigned_var, assigned_value, unassigned_neighbor)\n if len(consistent_values) == 0:\n return False\n else:\n unassigned_neighbor.domain = consistent_values\n return True", "def make_arc_consistent(Xj, Xk, csp):\r\n # csp.curr_domains[Xj] = []\r\n for val1 in csp.domains[Xj]:\r\n keep = False # Keep or remove val1\r\n for val2 in csp.domains[Xk]:\r\n if csp.constraints(Xj, val1, Xk, val2):\r\n # Found a consistent assignment for val1, keep it\r\n keep = True\r\n break\r\n\r\n if not keep:\r\n # Remove val1\r\n csp.prune(Xj, val1, None)\r\n\r\n return csp.curr_domains[Xj]", "def seg_x_in_y(self, x: str, y: str) -> bool:\n return len(set(x + y)) == len(y)", "def dans_cercle(self, r, x, y):\r\n self.r_num(r)\r\n valid = (isinstance(x, int) or isinstance(x, float)) and \\\r\n (isinstance(y, int) or isinstance(y, float))\r\n if valid:\r\n if sqrt(x**2+y**2)<self.r:\r\n return True\r\n else:\r\n return False\r\n else:\r\n raise TypeError", "def enforce_node_consistency(self):\n # Loop over each variable (space for word) in the crossword\n # Use copy to prevent domains from being modified while looping\n for var in self.domains.copy():\n # Get all unary constraints for this variable\n for value in self.domains[var].copy():\n # Check if the value is consistent with all unary constraints\n if len(value) != var.length:\n # If not, remove the value from the domain\n self.domains[var].remove(value)\n # No return value is necessary", "def arcConsistency(self, constraint):\n # start out assuming the constraint is satisfied\n satisfied = True\n # if the tail is assigned then we don't need to do anything\n if (constraint.tail.value != \"none\"):\n # the arc is consistent\n return satisfied\n # if the head is assigned a value then we compare the tail domain to the assigned value\n if (constraint.head.value != \"none\"):\n # make a copy of the tail domain to loop through\n tailDomain = constraint.tail.domain[:]\n # loop through all values in the tail domain\n for tailValue in tailDomain:\n # if this value doesn't satisfy the constraint then remove the value from the domain\n if (not constraint.satisfied(tailValue, constraint.head.value)):\n # record that the constraint wasn't satisfied\n satisfied = False\n # remove the value from the domain\n constraint.tail.domain.remove(tailValue)\n # return whether or not the constraint was satisfied\n return satisfied\n # if the head is not assigned a value then we compare the tail domain to each value in the head domain\n # start assuming the tail domain has not been modified\n domainModified = False\n # make a copy of the tail domain to loop through\n tailDomain = constraint.tail.domain[:]\n # loop through all values in the tail domain\n for tailValue in tailDomain:\n # start out assuming the constraint is not satisfied\n satisfied = False\n # loop through all values in the head domain\n for headValue in constraint.head.domain:\n # does this value satisfy the constraint\n if (constraint.satisfied(tailValue, headValue)):\n # record that the constraint wasn't satisfied\n satisfied = True\n # if we didn't find a value in the head that works with the tail value\n if (not satisfied):\n # remove the tail value from the domain\n constraint.tail.domain.remove(tailValue)\n # mark that we removed something from the tail domain\n domainModified = True\n # return whether or not the constraint was satisfied\n return (not domainModified)", "def x_in_y(self, x: int, y: int) -> bool:\n return len(set(self.MAPPING[x] + self.MAPPING[y])) == len(self.MAPPING[y])", "def ac3(self, arcs=None):\n if arcs is None:\n arcs = [arc for arc in self.crossword.overlaps if arc is not None]\n while len(arcs) != 0:\n (x, y) = arcs.pop()\n if self.revise(x, y):\n if len(self.domains[x]) == 0:\n return False\n # if the domain of x is not empty, enqueue neighbors\n for neighbor in self.crossword.neighbors(x):\n if neighbor is not None and not neighbor == y:\n arcs.append((neighbor, x))\n return True", "def safe(self, x: float, y: float) -> bool:\n return self._run_on_server('safe', {\n 'x': x,\n 'y': y\n })", "def unite(self, x: int, y: int):\n\n x = self.find(x)\n y = self.find(y)\n if x == y:\n return False\n if self.root[x] > self.root[y]:\n x, y = y, x\n self.root[x] += self.root[y]\n self.root[y] = x\n return True", "def union(self, x, y):\n xr, yr = self.find(x), self.find(y)\n if xr == yr:\n return False\n\n if self.sz[xr] < self.sz[yr]:\n xr, yr = yr, xr\n\n self.par[yr] = xr\n self.sz[xr] += self.sz[yr]\n self.sz[yr] = self.sz[xr]\n\n return True", "def fill_space(self, x, y, random, matches_allowed):\n rotation = random.randint(0, 3) * 90\n for _ in range(4):\n try:\n choices = self.choose_and_flip_extra_dominoes(\n random)\n for domino, is_flipped in choices:\n if self.cycles_remaining <= 0:\n return False\n self.cycles_remaining -= 1\n domino.rotate_to(rotation)\n self.add(domino, x, y)\n self.add_count += 1\n has_even_gaps = self.hasEvenGaps()\n if not has_even_gaps:\n self.remove(domino)\n break\n else:\n if is_flipped:\n domino.flip()\n if not matches_allowed and domino.hasMatch():\n pass\n else:\n if self.fill(random,\n matches_allowed,\n reset_cycles=False):\n return True\n self.remove(domino)\n except BadPositionError:\n pass\n rotation = (rotation + 90) % 360\n return False", "def verify(self, y):\n left = self.sgroup.exponentiate(self.a, y)\n right = (self.x * self.sgroup.exponentiate(self.b, self.c)) % self.sgroup.p\n is_ok = (left == right)\n return is_ok", "def _check_domain_additional(cls, domain: D) -> bool:\n action_space = domain.get_action_space().unwrapped()\n observation_space = domain.get_observation_space().unwrapped()\n\n if not isinstance(action_space, Iterable) and not isinstance(action_space, gym.spaces.Tuple):\n action_space = [action_space]\n if not isinstance(observation_space, Iterable) and not isinstance(observation_space, gym.spaces.Tuple):\n observation_space = [observation_space]\n\n flat_action_space = list(flatten(action_space))\n flat_observation_space = list(flatten(observation_space))\n\n print(flat_action_space)\n print(flat_observation_space)\n\n valide_action_space = True\n for x in flat_action_space:\n valide_action_space = isinstance(x,(gym.spaces.Tuple, gym.spaces.Discrete, gym.spaces.Box))\n \n validate_observation_space = True\n for x in flat_observation_space:\n validate_observation_space = isinstance(x,(gym.spaces.Tuple, gym.spaces.Discrete, gym.spaces.Box))\n \n return valide_action_space and validate_observation_space", "def ensure_monotonic(y, x, xlim=None, strict=False, solve=True):\n ylim = map(float, (y.subs({x: xlim[0]}),\n y.subs({x: xlim[1]}))\n )\n if ylim[0] > ylim[1]:\n ylim = (ylim[1], ylim[0])\n incr = False\n elif ylim[0] == ylim[1]:\n return False, None, None\n else:\n incr = True\n\n if solve:\n dydx = y.diff(x)\n d2ydx2 = dydx.diff(x)\n xs = sympy.solve(dydx, x)\n for v in xs:\n if xlim:\n if v < xlim[0] or v > xlim[1]: continue\n if strict: return False, None, None\n if d2ydx2.subs({x: v}) != 0:\n return False, None, None\n return True, ylim, incr", "def continuous(self, x, y, X, Y):\n hor = fabs(x - X) == SSIZE and y == Y\n ver = fabs(y - Y) == SSIZE and x == X\n return (hor and not ver) or (ver and not hor)", "def is_almost_equal(self, x ,y ,epsilon=1*10**(-8)):\n \treturn abs(x-y) <= epsilon", "def suppose(self, var, value):\r\n self.support_pruning()\r\n removals = [(var, a) for a in self.curr_domains[var] if a != value]\r\n self.curr_domains[var] = [value]\r\n return removals", "def has_arc(self, a, b):\n return self.matrix[a][b] != 0", "def evaluate(self, y):\n const_projection = np.zeros(len(self.index_array))\n for i in range(len(self.index_array)):\n const_projection[i] = y[self.index_array[i]]\n for a in self.assignments:\n equal_flag = True\n for i in range(len(const_projection)):\n if const_projection[i] != a[i]:\n equal_flag = False\n break\n if equal_flag:\n return True\n return False", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def test_equality(self):\n\n s3 = space(curvature=1/5)\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s1 = space(fake_curvature=k)\n s2 = space(fake_curvature=k)\n self.assertTrue(s1 == s2)\n self.assertTrue(hash(s1) == hash(s2))\n self.assertTrue(str(s1) == str(s2))\n self.assertTrue(repr(s1) == repr(s2))\n self.assertTrue(s1 != s3)", "def almost_equal_values(x, y, precision):\n return round(x - y, precision) == 0", "def is_allowed(self):\n if self.structure.variable_dependant_path:\n # fill in empty part of the path with the current path\n if len(self.structure.variable_dependant_path) == 3:\n dependant_var_path = (\n self.path[0],\n ) + self.structure.variable_dependant_path\n elif len(self.structure.variable_dependant_path) == 2:\n dependant_var_path = (\n self.path[0],\n self.path[1],\n ) + self.structure.variable_dependant_path\n elif len(self.structure.variable_dependant_path) == 1:\n dependant_var_path = (\n self.path[0],\n self.path[1],\n self.path[2],\n ) + self.structure.variable_dependant_path\n else:\n dependant_var_path = None\n\n # get dependency\n dependant_var = None\n mf_data = self._simulation_data.mfdata\n if dependant_var_path in mf_data:\n dependant_var = mf_data[dependant_var_path]\n\n # resolve dependency\n if self.structure.variable_value_when_active[0] == \"Exists\":\n exists = self.structure.variable_value_when_active[1]\n if dependant_var and exists.lower() == \"true\":\n return True\n elif not dependant_var and exists.lower() == \"false\":\n return True\n else:\n return False\n elif not dependant_var:\n return False\n elif self.structure.variable_value_when_active[0] == \">\":\n min_val = self.structure.variable_value_when_active[1]\n if dependant_var > float(min_val):\n return True\n else:\n return False\n elif self.structure.variable_value_when_active[0] == \"<\":\n max_val = self.structure.variable_value_when_active[1]\n if dependant_var < float(max_val):\n return True\n else:\n return False\n return True", "def _check(self, x, y):\n n = self.n\n # x direction\n xline = self.arr[y]\n if not self.x_regexes[y].match(xline):\n return False\n\n # y direction\n ypos = x + max(0, y + 1 - n)\n yline = []\n x1, y1 = ypos, 0\n while x1 >= 0 and y1 < 2 * n - 1:\n if x1 < len(self.arr[y1]):\n yline.append(self.arr[y1][x1])\n if y1 >= n - 1:\n x1 -= 1\n y1 += 1\n\n if not self.y_regexes[ypos].match(yline):\n return False\n\n # z direction\n zpos = x + max(0, n - 1 - y)\n zline = []\n x1, y1 = zpos, 2 * n - 2\n while x1 >= 0 and y1 >= 0:\n if x1 < len(self.arr[y1]):\n zline.append(self.arr[y1][x1])\n if y1 <= n - 1:\n x1 -= 1\n y1 -= 1\n\n if not self.z_regexes[zpos].match(zline):\n return False\n\n return True", "def movimiento_valido(self, x, y):\n\n f, c = self.det_casilla(x, y)\n\n casilla = self._matriz[f][c]\n\n if casilla.ficha == None:\n\n #self._matriz[f][c] = casilla\n\n return True", "def is_equal_approx(x, y, epsilon=1e-6):\r\n # Check absolute precision.\r\n if -epsilon <= x - y <= epsilon:\r\n return True\r\n\r\n # Is x or y too close to zero?\r\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\r\n return False\r\n\r\n # Check relative precision.\r\n return (-epsilon <= (x - y) / x <= epsilon\r\n or -epsilon <= (x - y) / y <= epsilon)", "def _almost_equal(x, y):\n pass", "def victory_checker() -> bool:\r\n conflict_check()\r\n for x in range(shape):\r\n for y in range(shape):\r\n if conflict_space[x, y] != 0:\r\n return False\r\n if separation_crawler(False):\r\n return False\r\n return True", "def solve(x, r, c):\r\n\r\n\tlg, sm = max(r, c), min(r, c)\r\n\r\n\tprint >> dbg, \"{}x{} {}-omino\".format(r, c, x)\r\n\r\n\tif (r*c) % x != 0:\r\n\t\tprint >> dbg, \"can't divide\"\r\n\t\treturn False\r\n\r\n\telif x == 2:\r\n\t\treturn True\r\n\r\n\telif x > lg:\r\n\t\tprint >> dbg, \"1*x cannot be contained\"\r\n\t\treturn False\r\n\r\n\telif (x + 1) // 2 > sm:\r\n\t\tprint >> dbg, \"an L shape doesn't fit\"\r\n\t\treturn False\r\n\r\n\telif x >= 7:\r\n\t\tprint >> dbg, \"omino exists with a hole\"\r\n\t\treturn False\r\n\r\n\telif x >= 2 * sm:\r\n\t\tprint >> dbg, \"baseline wider than height to force orientation, enough to reach top\"\r\n\t\treturn False\r\n\r\n\telif x == 5 and sm == 3 and lg == 5:\r\n\t\tprint >> dbg, \"W segments fillable space\"\r\n\t\treturn False\r\n\r\n\r\n\treturn True", "def check_deterministic_constraints(self, x):\n return True", "def check_deterministic_constraints(self, x):\n return True", "def orthogonal_to(self, other: 'Concept') -> bool:\n meet = self._extent & other._extent\n return (not not meet and meet != self._extent and meet != other._extent\n and (self._extent | other._extent) != self.lattice.supremum._extent)", "def nearlyEqual(self, x, y):\n return abs(x-y) < self.absoluteerrorrange", "def __contains__(self, x: ArrayLike) -> bool:\n\n return bool(\n np.all(\n np.where(\n np.logical_and(\n x >= np.min(self._domain), # pyright: ignore\n x <= np.max(self._domain), # pyright: ignore\n ),\n True,\n False,\n )\n )\n )", "def test_equality_function(self):\r\n self.assertFalse(directories_equal(self.version1_nodrafts, self.version0_nodrafts))\r\n self.assertFalse(directories_equal(self.version1_drafts_extra_branch, self.version1_drafts))", "def __eq__(self, x):\n assert isinstance(x, AxisDistance), 'incorrect type of arg x: should be type AxisDistance, is type {}'.format(type(x))\n return self.__cmp__(x) == 0", "def circular_nonterminating_orbit(x, f):\n return x == f(collision_point_nonterminating_orbit(x, f))", "def is_leap_valid(self, final_x, final_y) -> bool:\n return True", "def __eq__(A, B):\n if not isinstance(A, type(B)):\n return NotImplemented\n return A.domain == B.domain and A.rep == B.rep", "def set_release(self, x: int, y: int) -> bool:\n\t\tis_click_valid = False\n\t\trelease_position = Point(x, y)\n\t\tif self.state == MouseState.CLICKED:\n\t\t\tself.state = MouseState.RELEASED\n\t\t\tif self.click_position.distance_to(release_position) <= self.eps:\n\t\t\t\tis_click_valid = True\n\t\treturn is_click_valid", "def finalize(self, x):\r\n (x1,x2) = x\r\n if x1 == x2 or self.hash(self. key, x1) == None:\r\n return False\r\n return self.hash(self. key, x1) == self.hash(self.key, x2)", "def connected(self, x, y):\n\n return self.__find_root(x) == self.__find_root(y)", "def isCrossingCircle(self, other):\n vector = Vector.createFromTwoPoints(self.center, other.center)\n return vector.norm < self.radius + other.radius", "def _trim_domain(self, domain_size=None):\n # Label external pores for trimming below\n self['pore.external'] = False\n if len(domain_size) == 1: # Spherical\n # Trim external Delaunay pores\n r = sp.sqrt(sp.sum(self['pore.coords']**2, axis=1))\n Ps = (r > domain_size)*self['pore.delaunay']\n self['pore.external'][Ps] = True\n # Trim external Voronoi pores\n Ps = ~self['pore.external']*self['pore.delaunay']\n Ps = self.find_neighbor_pores(pores=Ps)\n Ps = ~self.tomask(pores=Ps)*self['pore.voronoi']\n self['pore.external'][Ps] = True\n elif len(domain_size) == 2: # Cylindrical\n # Trim external Delaunay pores outside radius\n r = sp.sqrt(sp.sum(self['pore.coords'][:, [0, 1]]**2, axis=1))\n Ps = (r > domain_size[0])*self['pore.delaunay']\n self['pore.external'][Ps] = True\n # Trim external Delaunay pores above and below cylinder\n Ps1 = self['pore.coords'][:, 2] > domain_size[1]\n Ps2 = self['pore.coords'][:, 2] < 0\n Ps = self['pore.delaunay']*(Ps1 + Ps2)\n self['pore.external'][Ps] = True\n # Trim external Voronoi pores\n Ps = ~self['pore.external']*self['pore.delaunay']\n Ps = self.find_neighbor_pores(pores=Ps)\n Ps = ~self.tomask(pores=Ps)*self['pore.voronoi']\n self['pore.external'][Ps] = True\n elif len(domain_size) == 3: # Rectilinear\n # Trim external Delaunay pores\n Ps1 = sp.any(self['pore.coords'] > domain_size, axis=1)\n Ps2 = sp.any(self['pore.coords'] < [0, 0, 0], axis=1)\n Ps = self['pore.delaunay']*(Ps1 + Ps2)\n self['pore.external'][Ps] = True\n # Trim external Voronoi pores\n Ps = ~self['pore.external']*self['pore.delaunay']\n Ps = self.find_neighbor_pores(pores=Ps)\n Ps = ~self.tomask(pores=Ps)*self['pore.voronoi']\n self['pore.external'][Ps] = True\n\n # Begin process of removing, adjusting, and labeling pores\n self['pore.surface'] = False\n self['throat.surface'] = False\n\n # Label Delaunay pores on the surface\n Ps = self.pores('external', mode='not')\n Ps = self.find_neighbor_pores(pores=Ps)\n Ps = self.filter_by_label(pores=Ps, labels='delaunay')\n self['pore.surface'][Ps] = True\n self['pore.external'][Ps] = False # So they aren't deleted below\n\n # Label Voronoi pores on surface\n Ps = self.pores('external')\n Ps = self.find_neighbor_pores(pores=Ps)\n Ps = self.filter_by_label(pores=Ps, labels='voronoi')\n self['pore.surface'][Ps] = True\n\n # Label Voronoi and interconnect throats on surface\n Ps = self.pores('surface')\n Ts = self.find_neighbor_throats(pores=Ps, mode='intersection')\n self['throat.surface'][Ts] = True\n\n # Trim external pores\n Ps = self.pores('external')\n self.trim(pores=Ps)\n\n # Trim throats between Delaunay surface pores\n Ps = self.pores(labels=['surface', 'delaunay'], mode='intersection')\n Ts = self.find_neighbor_throats(pores=Ps, mode='intersection')\n self.trim(throats=Ts)\n\n # Move Delaunay surface pores to centroid of Voronoi facet\n Ps = self.pores(labels=['surface', 'delaunay'], mode='intersection')\n for P in Ps:\n Ns = self.find_neighbor_pores(pores=P)\n Ns = self.filter_by_label(pores=Ns, labels='voronoi')\n coords = sp.mean(self['pore.coords'][Ns], axis=0)\n self['pore.coords'][P] = coords\n\n self['pore.internal'] = ~self['pore.surface']\n self['throat.internal'] = ~self['throat.surface']\n\n # Clean-up\n del self['pore.external']", "def realEqual(x,y,eps=10e-10):\n return abs(x-y) < eps", "def __eq__(self, y):\n if isinstance(y, Annotation):\n if (self._name == y._name) and (self._value == y._value):\n\n if self.start <= y.start:\n\n if self.end <= y.end:\n y._start = self.start\n self._end = y.end\n return True\n elif self.end > y.end:\n y._start = self.start\n y._end = self.end\n return True\n \n elif self.start > y.start:\n \n if self.end <= y.end:\n self._start = y._start\n self._end = y.end\n return True\n elif self.end > y.end:\n self._start = y._start\n y._end = self.end\n return True\n return False", "def _post_dominate(self, reversed_graph, n1, n2):\n\n ds = networkx.dominating_set(reversed_graph, n1)\n return n2 in ds", "def condition_domain_reduction(csp, var) :\n return True", "def condition_domain_reduction(csp, var) :\n return True", "def in_domain_set(self, var1, var2, var3, var4, var5):\n var6 = 0\n var7 = var4 - 1\n var8 = -1\n var10 = 2\n while var6 <= var7:\n var8 = (var6 + var7) / 2\n var9 = var1[var5 + 1] - ord(var3[var8][1])\n if var9 > 0:\n var6 = var8 + 1\n elif var9 < 0:\n var7 = var8 - 1\n else:\n var9 = var1[var5] - ord(var3[var8][0])\n if var9 > 0:\n var6 = var8 + 1\n else:\n if var9 >= 0:\n break\n var7 = var8 - 1\n if var6 > var7:\n return 0\n else:\n while var10 < var2 and var1[var5 + var10] == ord(var3[var8][var10]):\n var10 = var10 + 1\n return 1 if var10 == var2 else 0", "def _isproperdist(X):\n X = np.asarray(X)\n if not np.allclose(np.sum(X), 1) or not np.all(X>=0) or not np.all(X<=1):\n return False\n else:\n return True", "def prune(self,domains,constraint):\n left_var = constraint.left[0]\n left_const_mult = constraint.left[1]\n left_val = constraint.left[2]\n\n right_var = constraint.right[0]\n right_const_mult = constraint.right[1]\n right_val = constraint.right[2]\n\n new_domains = deepcopy(domains)\n\n\n # Simple Variable-Value Labeling\n if (left_val == [0] and left_const_mult == [1]) and (right_const_mult == [0]):\n new_domains[left_var[0]] = [right_val[0]]\n \n # Simple Variable-Variable Labeling\n elif (left_val == [0] and left_const_mult == [1]) and (right_val == [0] and right_const_mult == [1]):\n new_set = set(new_domains[left_var[0]]) & set(new_domains[right_var[0]])\n new_domains[left_var[0]] = list(new_set)\n new_domains[right_var[0]] = list(new_set)\n\n else:\n l = 0\n for var,mult in zip(left_var,left_const_mult):\n l += mult*max(domains[var])\n for const in left_val:\n l += const\n\n r = 0\n for var,mult in zip(right_var,right_const_mult):\n r += mult*min(domains[var])\n for const in right_val:\n r += const\n\n # print(l,r)\n # print(new_domains)\n # print(constraint)\n\n for var,mult in zip(left_var,left_const_mult):\n max_var = max(domains[var])\n comp = (r-(l-mult*max_var)) / mult\n for elem in domains[var]:\n if elem < comp:\n new_domains[var].remove(elem)\n\n for var,mult in zip(right_var,right_const_mult):\n min_var = min(domains[var])\n comp = (l-(r-mult*min_var)) / mult\n for elem in domains[var]:\n if elem > comp:\n new_domains[var].remove(elem)\n\n # for i,domain in enumerate(new_domains):\n # if len(domain) == 0:\n # print(i,l,r)\n # print(\"Old:\",domains)\n # print(\"New:\",new_domains)\n # print(domains)\n # print(constraint)\n # print(\"------------------------\")\n # raise SystemError(\"Domain is Empty!!\")\n\n return new_domains", "def fn(x):\n if x not in graph: return x == destination\n if color[x]: return color[x] == 1\n color[x] = -1 \n for xx in graph[x]: \n if not fn(xx): return False \n color[x] = 1\n return True", "def reversible(self) -> bool:\n xy_row = np.column_stack(\n (\n np.linspace(\n -self.imgsz[0] / (2 * self.f[0]),\n self.imgsz[0] / (2 * self.f[0]),\n int(self.imgsz[0]),\n ),\n np.zeros(int(self.imgsz[0])),\n )\n )\n dxy = self._distort(xy_row)\n continuous_row = np.all(dxy[1:, 0] >= dxy[:-1, 0])\n xy_col = np.column_stack(\n (\n np.zeros(int(self.imgsz[1])),\n np.linspace(\n -self.imgsz[1] / (2 * self.f[1]),\n self.imgsz[1] / (2 * self.f[1]),\n int(self.imgsz[1]),\n ),\n )\n )\n dxy = self._distort(xy_col)\n continuous_col = np.all(dxy[1:, 1] >= dxy[:-1, 1])\n return continuous_row and continuous_col", "def replace_version(self, other, logger):\n\n if other.library_name != self.library_name:\n logger.debug(\n 'not replacable: {} != {} ()'\n .format(other.library_name, self.library_name)\n )\n return False\n elif int(other.major_version) != int(self.major_version):\n logger.debug(\n 'not replacable: {} != {} ({})'\n .format(\n int(self.major_version),\n int(other.major_version),\n other.filename,\n )\n )\n return False\n elif float(other.minor_version) >= float(self.minor_version):\n logger.debug(\n 'not replacable: {} >= {} ({})'\n .format(\n other.minor_version,\n self.minor_version,\n other.filename,\n )\n )\n return False\n else:\n return True", "def test_point_is_not_in_arc_range(p):\n arc = ConstructionArc((0, 0), 1, -90, 90)\n assert arc._is_point_in_arc_range(Vec2(p)) is False", "def symetrisch(x, y):\n if ((x % 10) == (y // 10)) and ((x // 10) == (y % 10)):\n return True\n else:\n return False", "def is_goal_unreachable(self, x, y, theta):\n self.current_x = x\n self.current_y = y\n self.wp_goal_unreachable = Point(self.current_x,self.current_y)\n self.dist_btw_follow_goal_unreachable = abs(self.wp_goal_unreachable.distance_to(self.wp_follow))\n #print self.is_left_line\n #print self.dist_btw_follow_goal_unreachable\n if self.dist_btw_follow_goal_unreachable < self.TOLERANCE and self.is_left_line == 1:\n print \"goal unreachable\"\n return True\n else:\n return False", "def nearlyEqual(self, x, y):\n return self.absoluteerror(x).nearlyEqual(x, y)", "def can_safely_release(*repo_paths):\n if repo_has_uncommitted():\n return False\n if repo_has_incoming(*repo_paths):\n return False\n if repo_has_outgoing():\n return continue_with_outgoing()\n return True", "def remove_arcs(stts, domain, neighbs, statei, statej):\n\t\t\tgone = False\n\t\t\tfor restaurantx in domain[statei]:\n\t\t\t\tarc_checker = (map(lambda restauranty: not check_sol(states, statei, restaurantx, statej, restauranty), domain[statej]))\n\t\t\t\tif arc_checker == [False for x in range(len(arc_checker))]:\n\t\t\t\t\tdomain[statei].remove(restaurantx)\n\t\t\t\t\tgone = True\n\t\t\treturn gone", "def _check_youngest(self, src_repo, dst_repo):\n try:\n if not (os.path.exists(src_repo)):\n logging.warn('repository %s does not exist.' % src_repo)\n return False\n if not os.path.isdir(src_repo):\n logging.warn('repository %s should be directory.' % src_repo)\n return False \n if not (os.path.exists(dst_repo)):\n return True \n if not os.path.isdir(dst_repo):\n logging.warn('%s exists as a file, it should not.' % dst_repo)\n return False \n src_rev = subprocess.check_output(\n [self._svnlook, self._SVN_LOOK_YOUNGEST, src_repo],\n shell=True).strip()\n dst_rev = subprocess.check_output(\n [self._svnlook, self._SVN_LOOK_YOUNGEST, dst_repo],\n shell=True).strip()\n if not (src_rev.isdigit() and dst_rev.isdigit()):\n logging.error(\"%s %s don't return pure numeric revision number.\" %\n (self._svnlook, self._SVN_LOOK_YOUNGEST ))\n return False\n if int(src_rev) > int(dst_rev):\n logging.debug('%s is younger than %s' % (src_repo, dst_repo))\n return True\n return False\n except subprocess.CalledProcessError:\n logging.exception('%s %s running fails.' % \n (self._svnlook, self._SVN_LOOK_YOUNGEST))\n return False", "def _check_guts_eq(attr, old, new, last_build):\n if old != new:\n print \"building because %s changed\" % attr\n return True\n return False", "def almost_equals(self, other):\n import math\n ox, oy = other\n dx = self[0] - ox\n dy = self[1] - oy\n return (dx*dx + dy*dy) < pygonal.EPSILON2", "def check_in(x, y, R=Re):\n r = np.sqrt(x ** 2 + y ** 2)\n return r <= R", "def revise(csp, Xi, Xj, removals, checks=0):\r\n revised = False\r\n for x in csp.curr_domains[Xi][:]:\r\n conflict = True\r\n for y in csp.curr_domains[Xj]:\r\n if csp.constraints(Xi, x, Xj, y):\r\n conflict = False\r\n checks += 1\r\n if not conflict:\r\n break\r\n if conflict:\r\n csp.prune(Xi, x, removals)\r\n revised = True\r\n # we check if domains list for our current variable is empty\r\n # and we increase weight for current variable\r\n if not csp.curr_domains[Xi]:\r\n csp.weight[(Xi,Xj)] += 1\r\n\r\n return revised, checks", "def update_domain(self, centre=[0.5,0.5]):\n centre = np.array(centre)\n assert(centre.size == 2)\n \n # First, copy dead dude information back to original prey block\n i_dead = self.prey['alive'] == 0\n self.prey_original['alive'][self.prey[i_dead]['index']] = 0\n \n # Remove dead prey items\n self.prey = self.prey[~i_dead]\n \n # Recenter and recompute tree\n self.prey['position'] = np.remainder(self.prey['position'] + \n np.array([0.5, 0.5]) - centre, 1.)\n \n if self.prey.size != 0:\n self.tree = scipy.spatial.cKDTree(self.prey['position'])", "def consistent(self, assignment):\n # print(\"Entered consistent Function\")\n # print(\"assignment\")\n # print(assignment)\n\n overlaps = self.crossword.overlaps\n value_set = set()\n for variable in assignment: \n #checking overlaps with neighbors\n neighbors = self.crossword.neighbors(variable)\n for neighbor in neighbors:\n overlap = overlaps[(variable, neighbor)]\n if (neighbor in assignment):\n # print(\"var 1 overlap letter\")\n # print(assignment[variable][overlap[0]])\n # print(\"var 2 overlap letter\")\n # print(assignment[neighbor][overlap[1]])\n if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]):\n return False\n \n # print(\"neighbors\")\n # print(neighbors)\n\n #checking that the assignment is the correct length for the variable\n if (variable.length != len(assignment[variable])):\n return False\n\n #the set to check for distinct variables later\n value_set.add(assignment[variable])\n\n #Checking that all variables are distinct\n #these should be the same length unless two or more variables share an value\n if( len(value_set) is not len(assignment)): \n return False\n \n return True\n\n # raise NotImplementedError", "def _numpy_checker(x, y):\r\n x, y = x[0], y[0]\r\n if (x.dtype != y.dtype or x.shape != y.shape\r\n or numpy.any(numpy.abs(x - y) > 1e-10)):\r\n raise Exception(\"Output mismatch.\", {'performlinker': x, 'clinker': y})", "def compare_geometrycollection(config, geometry_x, geometry_y):\n if config in BLIST:\n # print('arc distance: %s' % str(arc_distance(x, y)))\n return arc_distance(geometry_x, geometry_y) < EPOCH_CURVE_RELATIVE\n # return True\n # else:\n # print('arc distance: %s' % str(arc_distance(x, y)))\n # return False\n # else:\n\n if not config in BLIST:\n arct = wkt.loads(geometry_x)\n pgis = wkt.loads(geometry_y)\n result = arct.equals(pgis)\n return result\n\n return False", "def is_collision_at(self, x, y):\n return self._on_post(x, y)", "def fixDomains(self, domainMin, domainMax, fixToDomain):\n\n return 0", "def test_for_discontinuity(a_n,b_n,c_n,d_n,x_n,x_n_plus_1,y_n_plus_1):\n\ty_n_final = a_n + b_n*(x_n_plus_1-x_n) + c_n*(x_n_plus_1-x_n)**2 + d_n*(x_n_plus_1-x_n)**3\n\tresult = abs(y_n_final-y_n_plus_1)<0.001\n\treturn(result)", "def forwardcheck(var, val, assignment, user_dict):\n\t\tif curr_domains:\n\t\t\tfor (meal, restaurant) in curr_deleted[var]:\n\t\t\t\tcurr_domains[meal].append(restaurant)\n\t\t\tcurr_deleted[var] = []\n\n\t\t\tfor meal in neighbors[var]:\n\t\t\t\tif meal not in assignment:\n\t\t\t\t\tfor restaurant in curr_domains[meal][:]:\n\t\t\t\t\t\tnum_cats = count_categories(assignment.values())\n\t\t\t\t\t\tif not constraints_match(num_cats, user_dict):\n\t\t\t\t\t\t#if not user_solution_checker(user_dict, meal, restaurant, assignment):\n\t\t\t\t\t\t\tcurr_domains[meal].remove(restaurant)\n\t\t\t\t\t\t\tcurr_deleted[var].append((meal, restaurant))", "def __ne__(self, other):\n return self.x != other.x or self.y != other.y", "def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines", "def check_equal(x, y):\r\n #I put the import here to allow using theano without scipy.\r\n import scipy.sparse as sp\r\n x, y = x[0], y[0]\r\n\r\n # TODO: bug in current scipy, two sparse matrices are never equal,\r\n # remove when moving to 0.7\r\n if sp.issparse(x):\r\n x = x.todense()\r\n if sp.issparse(y):\r\n y = y.todense()\r\n\r\n if isinstance(x, numpy.ndarray) and isinstance(y, numpy.ndarray):\r\n if (x.dtype != y.dtype\r\n or x.shape != y.shape\r\n or numpy.any(abs(x - y) > 1e-10)):\r\n raise Exception(\"Output mismatch.\",\r\n {'performlinker': x, 'clinker': y})\r\n else:\r\n if x != y:\r\n raise Exception(\"Output mismatch.\",\r\n {'performlinker': x, 'clinker': y})", "def forward_checking(csp, var, value, assignment, removals):\r\n csp.support_pruning()\r\n check=0\r\n for B in csp.neighbors[var]:\r\n if B not in assignment:\r\n for b in csp.curr_domains[B][:]:\r\n check+=1\r\n if not csp.constraints(var, value, B, b):\r\n csp.prune(B, b, removals)\r\n # we have a failure\r\n # we check if domains list for variable B is not empty\r\n # and increase weight of B,var by 1\r\n if not csp.curr_domains[B]:\r\n csp.weight[(B,var)] += 1\r\n return False,check\r\n return True,check", "def _isArcTangentToArc(self, px, py, cx1, cy1, cx2, cy2):\n p = QPointF(px, py)\n v1 = QVector2D(p - QPointF(cx1, cy1)).normalized()\n v2 = QVector2D(p - QPointF(cx1, cy1)).normalized()\n if abs(v1.dotProduct(v1, v2)) - 1.0 <= 1e-6:\n # TODO: handle case where arc turns back into the other arc\n return True\n else:\n return False", "def remove_point(self, x):\n\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False", "def remove_point(self, x):\n\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False", "def is_equal_to(self, another_labyrinth):\n \n if self.equals_list_nodes(self.list_empty_nodes, another_labyrinth.list_empty_nodes) and \\\n self.equals_list_nodes(self.list_wall_nodes, another_labyrinth.list_wall_nodes) and \\\n self.start_point.position_is_equal_to(another_labyrinth.start_point) and \\\n self.exit_point.position_is_equal_to(another_labyrinth.exit_point):\n return True\n \n else:\n return False", "def check_deviation(x, y_comp, y_ref, rtol, x_range=None):\n if x_range is not None:\n condition = (x >= x_range[0]) * (x <= x_range[1])\n y_ref = y_ref[condition]\n y_comp = y_comp[condition]\n return np.allclose(y_comp, y_ref, atol=0, rtol=rtol)", "def torch_the_same(X, Y, eps=1e-8):\n return (X - Y).abs().min() < eps", "def remove_point(self, x):\n idx = np.sum(np.abs(self.proposed_points - x), axis=1).argmin()\n if np.sum(np.abs(self.proposed_points[idx, :] - x)) < 1e-10:\n self.proposed_points = np.delete(self.proposed_points, idx, axis=0)\n return True\n return False", "def _isLineTanToArc(self, x1, y1, x2, y2, cx, cy, d):\n p = QPointF(x2, y2)\n # line start -> end\n v1 = QVector2D(p - QPointF(x1, y1)).normalized()\n # arc center -> arc start\n v2 = QVector2D(p - QPointF(cx, cy)).normalized()\n if abs(v1.dotProduct(v1, v2)) <= 1e-6:\n # TODO: handle case where arc turns back into the line\n return True\n else:\n return False", "def __contains__(self, other):\n x, y = other\n return self.radius >= sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def almost_equal(x, y):\n return abs(x-y) < FP_PREC", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def check_nondiff_rop(self, y):\r\n raised = False\r\n try:\r\n tmp = tensor.Rop(y, self.x, self.v)\r\n except ValueError:\r\n raised = True\r\n if not raised:\r\n self.fail((\r\n 'Op did not raise an error even though the function'\r\n ' is not differentiable'))", "def complement_of(self, other: 'Concept') -> bool:\n return (not self._extent & other._extent\n and (self._extent | other._extent) == self.lattice.supremum._extent)", "def compare_geometry(config, geometry_x, geometry_y):\n if geometry_x.upper().endswith('EMPTY') and geometry_y.upper().endswith(\n 'EMPTY'):\n return True\n\n if config in BLIST:\n return arc_distance(geometry_x, geometry_y) < EPOCH_CURVE_RELATIVE\n # return True\n # else:\n # print('arc distance: %s' %\n # str(arc_distance(geometry_x, geometry_y)))\n # return False\n\n if not config in BLIST:\n arct = wkt.loads(geometry_x)\n pgis = wkt.loads(geometry_y)\n result = arct.equals_exact(pgis, EPOCH)\n return result\n\n return False", "def _cryptovariables_equal(x, y):\n\n return (\n _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, x) ==\n _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, y))", "def forward_check(self, values):\n made_changes = True\n while made_changes:\n pre_solved_vals = len([box for box in values.keys() if len(values[box]) == 1])\n values = self.remove(values)\n # Check how many boxes have a determined value, to compare\n post_solved_vals = len([box for box in values.keys() if len(values[box]) == 1])\n # If no new values were added, stop the loop.\n made_changes = pre_solved_vals != post_solved_vals\n # If any box have an invalid domain, puzzle has no solution.\n if len([box for box in values.keys() if len(values[box]) == 0]):\n return False\n return values" ]
[ "0.7254103", "0.62908906", "0.567439", "0.5597688", "0.53487086", "0.5329222", "0.5265961", "0.51735663", "0.5155395", "0.5139437", "0.51281583", "0.5127078", "0.50768644", "0.49855748", "0.49679717", "0.49677834", "0.49434954", "0.49179846", "0.4910346", "0.48950416", "0.48812333", "0.48795614", "0.48634267", "0.48520654", "0.4828561", "0.4819834", "0.4778261", "0.4777642", "0.47768545", "0.4769784", "0.47574493", "0.47487372", "0.47478953", "0.47213757", "0.47193474", "0.47193474", "0.47190732", "0.47150022", "0.47115207", "0.47097096", "0.4709513", "0.470859", "0.47039434", "0.470349", "0.4695849", "0.46897888", "0.46788767", "0.46246284", "0.46229237", "0.46208733", "0.46139187", "0.4609456", "0.46032324", "0.46032324", "0.4593893", "0.45910853", "0.45750654", "0.45748073", "0.45676404", "0.4564944", "0.45554978", "0.45542592", "0.45483184", "0.45394278", "0.4539318", "0.45331413", "0.45285207", "0.4527907", "0.45254478", "0.45177978", "0.45163232", "0.45160234", "0.45088807", "0.4507976", "0.4505035", "0.44934505", "0.44926542", "0.44866803", "0.44857937", "0.44845927", "0.44805753", "0.44797412", "0.44787222", "0.4476222", "0.44683272", "0.44683272", "0.4467919", "0.4464472", "0.44568482", "0.4450823", "0.4448934", "0.44449678", "0.44403434", "0.4436195", "0.4436195", "0.44346756", "0.44345766", "0.4434489", "0.44328052", "0.4425906" ]
0.61482406
2
Update `self.domains` such that each variable is arc consistent. If `arcs` is None, begin with initial list of all arcs in the problem. Otherwise, use `arcs` as the initial list of arcs to make consistent. Return True if arc consistency is enforced and no domains are empty; return False if one or more domains end up empty.
def ac3(self, arcs=None): # print("Entered ac3 Function") revise = False if arcs is None: arcs = set() for arc in self.crossword.overlaps: arcs.add(arc) while arcs: arc = arcs.pop() # print("arc") # print(arc) revise = self.revise(arc[0], arc[1]) if revise: arcs.update(self.crossword.neighbors(arc[0])) if (self.domains[arc[0]] is None): return False # print("revise") # print(revise) # print("arc") # print(arc) # input() # print("") # print("") # print("arcs") # print(arcs) # print("") # print("") return True # raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ac3(self, arcs=None):\n if arcs == None:\n #creates a queue of arcs to update\n arcs= []\n for node1 in self.domains:\n for node2 in self.domains:\n if node1 != node2:\n #for each pair of nodes that intersect, add them as a tuple pair to a list of arcs\n if self.crossword.overlaps[node1,node2] != None: \n arcs.append((node1,node2))\n\n while arcs != []:\n x= arcs[0][0]\n y= arcs[0][1]\n\n if self.revise(x, y):\n #if the domain of node x is empty after revision, this problem has no solution\n if len(self.domains[x]) == 0:\n return False\n #if the arc is updated successfully, node x may no longer be arc consistent in respect to other nodes that it may have been before\n #we must then add the arcs between the revised x and all of its neighbors(except y as we have just checked it) to the queue\n for neighbor in self.crossword.neighbors(x):\n if neighbor != y:\n arcs.append((neighbor, x))\n #remove arcs from queue after revision\n arcs.pop(0)\n else:\n arcs.pop(0)\n \n return True", "def ac3(self, arcs=None):\n if arcs is None:\n arcs = [arc for arc in self.crossword.overlaps if arc is not None]\n while len(arcs) != 0:\n (x, y) = arcs.pop()\n if self.revise(x, y):\n if len(self.domains[x]) == 0:\n return False\n # if the domain of x is not empty, enqueue neighbors\n for neighbor in self.crossword.neighbors(x):\n if neighbor is not None and not neighbor == y:\n arcs.append((neighbor, x))\n return True", "def ac3(csp, arcs=None):\n\n queue_arcs = deque(arcs if arcs is not None else csp.constraints.arcs())\n while queue_arcs:\n var1, var2 = queue_arcs.popleft()\n\n # Propagate changes in var1.domain to neighbors\n if revise(csp, var1, var2):\n if len(var1.domain) == 0:\n return False\n for (v, neighbor) in csp.constraints[var1].arcs():\n if (neighbor != var2):\n queue_arcs.append((v, neighbor))\n return True", "def arcConsistency(self, constraint):\n # start out assuming the constraint is satisfied\n satisfied = True\n # if the tail is assigned then we don't need to do anything\n if (constraint.tail.value != \"none\"):\n # the arc is consistent\n return satisfied\n # if the head is assigned a value then we compare the tail domain to the assigned value\n if (constraint.head.value != \"none\"):\n # make a copy of the tail domain to loop through\n tailDomain = constraint.tail.domain[:]\n # loop through all values in the tail domain\n for tailValue in tailDomain:\n # if this value doesn't satisfy the constraint then remove the value from the domain\n if (not constraint.satisfied(tailValue, constraint.head.value)):\n # record that the constraint wasn't satisfied\n satisfied = False\n # remove the value from the domain\n constraint.tail.domain.remove(tailValue)\n # return whether or not the constraint was satisfied\n return satisfied\n # if the head is not assigned a value then we compare the tail domain to each value in the head domain\n # start assuming the tail domain has not been modified\n domainModified = False\n # make a copy of the tail domain to loop through\n tailDomain = constraint.tail.domain[:]\n # loop through all values in the tail domain\n for tailValue in tailDomain:\n # start out assuming the constraint is not satisfied\n satisfied = False\n # loop through all values in the head domain\n for headValue in constraint.head.domain:\n # does this value satisfy the constraint\n if (constraint.satisfied(tailValue, headValue)):\n # record that the constraint wasn't satisfied\n satisfied = True\n # if we didn't find a value in the head that works with the tail value\n if (not satisfied):\n # remove the tail value from the domain\n constraint.tail.domain.remove(tailValue)\n # mark that we removed something from the tail domain\n domainModified = True\n # return whether or not the constraint was satisfied\n return (not domainModified)", "def ac3(csp, arcs=None):\n #print \"============BEGIN==================\"\n\n queue_arcs = deque(arcs if arcs is not None else csp.constraints.arcs())\n\n \"\"\"\n print \"QUEUE ARCS\"\n for x in queue_arcs:\n print x\n print \"fin queue arcs\"\n print \"\\nconstraints\"\n for x in csp.constraints:\n print x\n print \"end constraints\"\n \"\"\"\n while queue_arcs:\n (v1, v2) = queue_arcs.pop()\n #print str(v1) + \"---\"+ str(v2)\n if revise(csp, v1, v2):\n if not v1.domain:\n return False\n #print str(v1)+ \"LOOK HEREREREREREREAFVSD\"\n for c in csp.constraints[v1]:\n #print \"WTF IS THE ARC\" + str(c)\n if c.var2 != v1 and c.var2 != v2:\n queue_arcs.append((c.var2,v1))\n\n \"\"\"print \"AC3 IS RETURNING TRUE\"\n for x in queue_arcs:\n print x\"\"\"\n\n return True\n \n\n # TODO implement this\n pass", "def is_consistent(self, constraints):\n for constraint in constraints:\n if not constraint.is_satisfied_with(self):\n return False\n return True", "def __forward_check(self, assigned_var, assigned_value, unassigned_vars):\n for unassigned_neighbor in self.__unassigned_neighbors(assigned_var, unassigned_vars):\n consistent_values = self.__consistent_domain_values(assigned_var, assigned_value, unassigned_neighbor)\n if len(consistent_values) == 0:\n return False\n else:\n unassigned_neighbor.domain = consistent_values\n return True", "def make_arc_consistent(Xj, Xk, csp):\r\n # csp.curr_domains[Xj] = []\r\n for val1 in csp.domains[Xj]:\r\n keep = False # Keep or remove val1\r\n for val2 in csp.domains[Xk]:\r\n if csp.constraints(Xj, val1, Xk, val2):\r\n # Found a consistent assignment for val1, keep it\r\n keep = True\r\n break\r\n\r\n if not keep:\r\n # Remove val1\r\n csp.prune(Xj, val1, None)\r\n\r\n return csp.curr_domains[Xj]", "def checkIncToSets(_session, _el, _sets, _arc_type):\n for set in _sets:\n if _session.search_one_shot(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_f,\n set,\n sc.SC_ARC | _arc_type,\n _el), True, 3) is None:\n return False\n \n return True", "def _check_domain_additional(cls, domain: D) -> bool:\n action_space = domain.get_action_space().unwrapped()\n observation_space = domain.get_observation_space().unwrapped()\n\n if not isinstance(action_space, Iterable) and not isinstance(action_space, gym.spaces.Tuple):\n action_space = [action_space]\n if not isinstance(observation_space, Iterable) and not isinstance(observation_space, gym.spaces.Tuple):\n observation_space = [observation_space]\n\n flat_action_space = list(flatten(action_space))\n flat_observation_space = list(flatten(observation_space))\n\n print(flat_action_space)\n print(flat_observation_space)\n\n valide_action_space = True\n for x in flat_action_space:\n valide_action_space = isinstance(x,(gym.spaces.Tuple, gym.spaces.Discrete, gym.spaces.Box))\n \n validate_observation_space = True\n for x in flat_observation_space:\n validate_observation_space = isinstance(x,(gym.spaces.Tuple, gym.spaces.Discrete, gym.spaces.Box))\n \n return valide_action_space and validate_observation_space", "def consistent(self):\n return all((constraint.consistent() for constraint in self.constraints))", "def check_reco_dist_consistency(self, dist_list):\n logging.trace(\" Verifying correct normalisation of resolution function.\")\n # Obtain list of all distributions. The sum of their relative weights\n # should yield 1.\n frac_sum = np.zeros_like(dist_list[0]['fraction'])\n for dist_dict in dist_list:\n frac_sum += dist_dict['fraction']\n if not recursiveEquality(frac_sum, np.ones_like(frac_sum)):\n err_msg = (\"Total normalisation of resolution function is off\"\n \" (fractions do not add up to 1).\")\n raise ValueError(err_msg)\n return True", "def any_holds(self, domains, const, env, other_vars, ind=0, assigned_vars=[]):\r\n # All the variables in the scope of the constraints has been assigned.\r\n if ind == len(other_vars): \r\n return const.holds(env)\r\n else:\r\n var = other_vars[ind]\r\n for val in domains[var]:\r\n env[var] = val\r\n if is_all_unique(env, [var] + assigned_vars) and self.any_holds(domains, const, env, other_vars, ind + 1, [var] + assigned_vars):\r\n return True\r\n return False", "def isarc(a):\n if not isinstance(a,list):\n return False\n n = len(a)\n if n < 2 or n > 3:\n return False\n if not (ispoint(a[0]) and isvect(a[1])):\n return False\n if a[1][3] not in (-1,-2): # is psuedovector marked?\n return False\n r =a[1][0] \n if r < 0:\n # is radius less than zero? if so, not a valid arc\n return False\n if n == 3 and ( not ispoint(a[2]) or abs(mag(a[2])-1.0) > epsilon):\n # if plane-definition vector is included but is non-unitary,\n # it's not a valid arc\n return False\n \n return True", "def remove_arcs(stts, domain, neighbs, statei, statej):\n\t\t\tgone = False\n\t\t\tfor restaurantx in domain[statei]:\n\t\t\t\tarc_checker = (map(lambda restauranty: not check_sol(states, statei, restaurantx, statej, restauranty), domain[statej]))\n\t\t\t\tif arc_checker == [False for x in range(len(arc_checker))]:\n\t\t\t\t\tdomain[statei].remove(restaurantx)\n\t\t\t\t\tgone = True\n\t\t\treturn gone", "def can_left_arc(c, correct_arcs):\n try:\n return Arc(c.buffer[0], c.sentence[c.stack[-1]].deprel, c.stack[-1]) in correct_arcs\n except IndexError:\n return False", "def assignment_complete(self, assignment):\n if len(assignment) == len(self.domains):\n return True\n\n else:\n return False", "def check_paths(self):\n for path in self.paths:\n # check that arc starts at s\n arc = path[0]\n arc_start = self.arc_info[arc][\"start\"]\n assert(arc_start == self.source()), \"Path does not start at s\"\n # check that internal arcs are valid\n for (i, arc) in enumerate(path[:-1]):\n next_arc = path[i + 1]\n arc_destin = self.arc_info[arc][\"destin\"]\n next_arc_start = self.arc_info[next_arc][\"start\"]\n assert (arc_destin == next_arc_start), \"Invalid path\"\n arc = path[-1]\n arc_end = self.arc_info[arc][\"destin\"]\n assert(arc_end == self.sink()), \"Path does not end at t\"", "def can_right_arc(c, correct_arcs):\n try:\n return Arc(c.stack[-1], c.sentence[c.buffer[0]].deprel, c.buffer[0]) in correct_arcs \\\n and has_all_children(c.buffer[0], c, correct_arcs)\n except IndexError:\n return False", "def is_solvable(self):\n if self._is_solvable is None:\n if self.order() % 2 != 0:\n return True\n ds = self.derived_series()\n terminator = ds[len(ds) - 1]\n gens = terminator.generators\n degree = self.degree\n identity = _af_new(list(range(degree)))\n if all(g == identity for g in gens):\n self._is_solvable = True\n return True\n else:\n self._is_solvable = False\n return False\n else:\n return self._is_solvable", "def __isOnDomainList(self, rules, domain):\n for rule in rules:\n if rule.startswith(\".\"):\n if domain.endswith(rule):\n return True\n \n withoutDot = rule[1:]\n if domain == withoutDot:\n return True\n else:\n domainEnding = domain[-(len(rule) + 1):]\n if (\n domainEnding and\n domainEnding[0] == \".\" and\n domain.endswith(rule)\n ):\n return True\n \n if rule == domain:\n return True\n \n return False", "def consistent(self, assignment):\n # print(\"Entered consistent Function\")\n # print(\"assignment\")\n # print(assignment)\n\n overlaps = self.crossword.overlaps\n value_set = set()\n for variable in assignment: \n #checking overlaps with neighbors\n neighbors = self.crossword.neighbors(variable)\n for neighbor in neighbors:\n overlap = overlaps[(variable, neighbor)]\n if (neighbor in assignment):\n # print(\"var 1 overlap letter\")\n # print(assignment[variable][overlap[0]])\n # print(\"var 2 overlap letter\")\n # print(assignment[neighbor][overlap[1]])\n if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]):\n return False\n \n # print(\"neighbors\")\n # print(neighbors)\n\n #checking that the assignment is the correct length for the variable\n if (variable.length != len(assignment[variable])):\n return False\n\n #the set to check for distinct variables later\n value_set.add(assignment[variable])\n\n #Checking that all variables are distinct\n #these should be the same length unless two or more variables share an value\n if( len(value_set) is not len(assignment)): \n return False\n \n return True\n\n # raise NotImplementedError", "def is_complete(self, variables):\n for var in variables:\n if not self.has_assignment_for(var):\n return False\n\n return True", "def _check_consistency(self, item, path, targets):\n for neighbor in self._edges[path[-1]]:\n if neighbor in path:\n continue\n elif self._nodes[neighbor][item] in (EMPTY, VISITED):\n continue\n\n remaining = set(targets)\n if neighbor in targets:\n remaining.remove(neighbor)\n if len(remaining) == 0:\n return True\n\n if self._check_consistency(item, path + [neighbor], remaining):\n return True\n\n return False", "def enforce_node_consistency(self):\n # Loop over each variable (space for word) in the crossword\n # Use copy to prevent domains from being modified while looping\n for var in self.domains.copy():\n # Get all unary constraints for this variable\n for value in self.domains[var].copy():\n # Check if the value is consistent with all unary constraints\n if len(value) != var.length:\n # If not, remove the value from the domain\n self.domains[var].remove(value)\n # No return value is necessary", "def all(self):\n for v in self.sects.values():\n if not np.all(v):\n return False\n if self.is_full():\n return True\n else:\n return np.all(self.defval)", "def has_arc(self, a, b):\n return self.matrix[a][b] != 0", "def check_sa_ea_for_each_branch(self, conn_components):\n parallel_cut_sa = list(set(self.initial_start_activities).union(\n infer_start_activities_from_prev_connections_and_current_dfg(self.initial_dfg, self.dfg, self.activities,\n include_self=False)).intersection(\n self.activities))\n parallel_cut_ea = list(set(self.initial_end_activities).union(\n infer_end_activities_from_succ_connections_and_current_dfg(self.initial_dfg, self.dfg, self.activities,\n include_self=False)).intersection(\n self.activities))\n\n if conn_components is None:\n return False\n\n for comp in conn_components:\n comp_sa_ok = False\n comp_ea_ok = False\n\n for sa in parallel_cut_sa:\n if sa in comp:\n comp_sa_ok = True\n break\n for ea in parallel_cut_ea:\n if ea in comp:\n comp_ea_ok = True\n break\n\n if not (comp_sa_ok and comp_ea_ok):\n return False\n\n return True", "def do_access_ranges_overlap_conservative(\n self, insn1, insn1_dir, insn2, insn2_dir, var_name):\n\n insn1_arange = self._get_access_range_for_var(insn1, insn1_dir, var_name)\n insn2_arange = self._get_access_range_for_var(insn2, insn2_dir, var_name)\n\n if insn1_arange is False or insn2_arange is False:\n return False\n if insn1_arange is True or insn2_arange is True:\n return True\n\n return not (insn1_arange & insn2_arange).is_empty()", "def check_consistency(self):\n assert len(self.shape) == len(self.qhape) == len(self.dirs)\n # Qnums must be unique within a qim and correspond one-to-one with\n # dimensions in dim.\n assert all(\n (\n len(dim) == len(qim) == len(set(qim))\n for dim, qim in zip(self.shape, self.qhape)\n )\n )\n assert all(d == 1 or d == -1 for d in self.dirs)\n assert all(q == self._qod_func(q) for q in sum(self.qhape, []))\n # Check that every sect has a valid key and the correct shape and\n # dtype.\n for k, v in self.sects.items():\n assert v.dtype == self.dtype\n assert self.is_valid_key(k)\n block_shp_real = v.shape\n qnum_inds = tuple(\n self.qhape[i].index(qnum) for i, qnum in enumerate(k)\n )\n block_shp_claimed = tuple(\n [self.shape[i][j] for i, j in enumerate(qnum_inds)]\n )\n assert block_shp_claimed == block_shp_real\n if self.invar and (self.charge != 0 or not self.isscalar()):\n assert self.defval == 0\n return True", "def check_angle_of_arcs(self):\n\n if self.thin_arc_start_angle >= 3600:\n self.thin_arc_start_angle %= 360\n self.thin_arc_start_angle += 360\n\n elif self.thin_arc_start_angle <= -3600:\n self.thin_arc_start_angle %= 360\n self.thin_arc_start_angle -= 360\n\n if self.thin_arc_end_angle >= 3600:\n self.thin_arc_end_angle %= 360\n self.thin_arc_end_angle += 360\n\n elif self.thin_arc_end_angle <= -3600:\n self.thin_arc_end_angle %= 360\n self.thin_arc_end_angle -= 360\n\n if self.thick_arc_start_angle >= 3600:\n self.thick_arc_start_angle %= 360\n self.thick_arc_start_angle += 360\n\n elif self.thick_arc_start_angle <= -3600:\n self.thick_arc_start_angle %= 360\n self.thick_arc_start_angle -= 360\n\n if self.thick_arc_end_angle >= 3600:\n self.thick_arc_end_angle %= 360\n self.thick_arc_end_angle += 360\n\n elif self.thick_arc_end_angle <= -3600:\n self.thick_arc_end_angle %= 360\n self.thick_arc_end_angle -= 360", "def consistent(self,assignment):\n return all(con.holds(assignment)\n for con in self.constraints\n if all(v in assignment for v in con.scope))", "def is_valid(self) -> bool:\n if self.total <= 1:\n # Definitely valid (i.e. no conflict) if 0 or 1. In practice, this\n # function probably won't be called if there are 0 fixes, but 0 is\n # valid; it simply means \"no fixes to apply\".\n return True\n if self.total == 2:\n # This is only OK for this special case. We allow this because\n # the intent is clear (i.e. no conflict): Insert something *before*\n # the segment and something else *after* the segment.\n return self.create_before == 1 and self.create_after == 1\n # Definitely bad if > 2.\n return False # pragma: no cover", "def complete(self):\n return all((constraint.satisfied() for constraint in self.constraints))", "def can_auralise(self):\n\n if not self.sources:\n raise ValueError('No sources available')\n\n if not self.receivers:\n raise ValueError('No receivers available')\n\n if not self.atmosphere:\n raise ValueError('No atmosphere available.')\n\n if not self.geometry:\n raise ValueError('No geometry available.')\n\n return True", "def scale_arc_constraints(blk):\n for arc in blk.component_data_objects(Arc, descend_into=True):\n arc_block = arc.expanded_block\n if arc_block is None: # arc not expanded or port empty?\n _log.warning(\n f\"{arc} has no constraints. Has the Arc expansion transform \"\n \"been applied?\"\n )\n continue\n warning = (\n \"Automatic scaling for arc constraints is supported for \"\n \"only the Equality rule. Variable {name} on Port {port} was \"\n \"created with a different rule, so the corresponding constraint \"\n \"on {arc_name} will not be scaled.\"\n )\n port1 = arc.ports[0]\n port2 = arc.ports[1]\n for name in port1.vars.keys():\n if not port1.is_equality(name):\n _log.warning(\n warning.format(name=name, port=port1.name, arc_name=arc.name)\n )\n continue\n if not port2.is_equality(name):\n _log.warning(\n warning.format(name=name, port=port2.name, arc_name=arc.name)\n )\n continue\n con = getattr(arc_block, name + \"_equality\")\n for i, c in con.items():\n if i is None:\n sf = min_scaling_factor([port1.vars[name], port2.vars[name]])\n else:\n sf = min_scaling_factor([port1.vars[name][i], port2.vars[name][i]])\n constraint_scaling_transform(c, sf)", "def solved(self):\n if not self.all_variables_assigned():\n return False\n for constraint in self.constraints:\n if not constraint.satisfied(*[self.var_dict[name] for name in constraint.var_names]):\n return False\n return True", "def isAcyclic(self, adjacencyList):\n\n def cyclic(fNode, visited, stack):\n if fNode not in visited:\n visited.add(fNode)\n assert fNode not in stack\n stack.append(fNode)\n for tNode in adjacencyList[fNode]:\n if cyclic(tNode, visited, stack):\n return True\n assert stack.pop() == fNode\n return fNode in stack\n\n visited = set()\n for i in range(len(adjacencyList)):\n if cyclic(i, visited, []):\n return False\n return True", "def consistency(node, sequence, orientation, overlap):\n from_id, to_id = node\n from_sequence, to_sequence = sequence\n from_orn, to_orn = orientation\n if from_orn == '-':\n from_sequence = reverse_and_complement(from_sequence)\n if to_orn == '-':\n to_sequence = reverse_and_complement(to_sequence)\n size_overlap = real_overlap(from_sequence, to_sequence)\n if not size_overlap == overlap:\n GRAPH_LOGGER.debug('Edge between node %s and %s have \\\n \tno consistency between CIGAR overlap end \"real\" overlap', from_id, to_id)\n return False\n\n return True", "def has_groups(self, resolvables, all=True):\n total_checks = 0\n\n for group in resolvables:\n if self.has_group(group):\n total_checks += 1\n\n if not all:\n return True\n\n return True if all and total_checks == len(resolvables) else False", "def is_valid(self):\n sum_prob_per_var = {}\n for rule in self.rules:\n var, prob = rule.variable, rule.probability\n if prob < 0:\n return False\n sum_prob_per_var[var] = sum_prob_per_var.get(var, 0) + prob\n return all(sum_prob == 1.0 for sum_prob in sum_prob_per_var.values())", "def has_atomic_overlaps(self):\n atomic_overlaps = self._get_atomic_overlaps()\n return len(atomic_overlaps) > 0", "def has_all_children(t_id, c, correct_arcs):\n return {arc for arc in correct_arcs if arc.h == t_id} <= c.arcs", "def maybe_distal(self):\n return bool(set(self.locations) & set(StandardTerminology.DISTAL_LOCATIONS))", "def isColliding(self, distances_norm):\n isColl = distances_norm < self.d_coll\n\n # A particle does not collide with itself\n for i in range(len(isColl)):\n np.fill_diagonal(isColl[i], 0)\n return isColl", "def is_full(self):\n core_full = self.drone.complete() and self.subject.complete()\n if self.peds is None:\n return core_full\n else:\n return core_full and all([p.complete() for p in self.peds.values()])", "def can_relax_constraints(self):\n if len(self.mand_classroom_constraints) == 0:\n if len(self.high_classroom_constraints) > 0:\n return True\n else:\n for cc in self.low_classroom_constraints:\n if cc.can_relax_constraints():\n return True\n\n if len(self.mand_timeblock_ids) == 0:\n if len(self.high_timeblock_ids) > 0:\n return True\n\n return False", "def _ready_to_decrypt(self, shares: Dict[GuardianId, DecryptionShare]) -> bool:\n # If all guardian shares are represented including if necessary\n # the missing guardians reconstructed shares, the decryption can be made\n return len(shares) == self._context.number_of_guardians", "def assert_continuous(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_continuous() cannot be called on an empty list\")\n\n previous_curve = curves[0]\n for curve in curves[1:]:\n if previous_curve.p1 != curve.p0:\n return False\n previous_curve = curve\n return True", "def assert_differentiable(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_differentiable() cannot be called on an empty list\")\n\n if not assert_continuous(*curves):\n return False\n\n for curve0, curve1 in zip(curves, curves[1:]):\n if not assert_collinear(curve0.c1, curve1.p0, curve1.c0):\n return False\n return True", "def has_start_stop_acqtamps(self):\n try:\n if not all([isinstance(x, datetime) for x in self.start_acq]):\n raise Exception(\"Invalid value encountered in start_acq\")\n if not all([isinstance(x, datetime) for x in self.stop_acq]):\n raise Exception(\"Invalid value encountered in stop_acq\")\n if not all([len(self) == len(x) for x in [self.start_acq,\\\n self.stop_acq]]):\n raise Exception(\"Lengths of arrays do not match...\")\n return True\n except Exception as e:\n print((repr(e)))\n return False", "def is_complete(self, assignment):\n for a in self.agents:\n if self.calc_agent_budget(a, assignment):\n return False\n for t in self.tasks:\n if self.calc_task_budget(t, assignment):\n return False\n return True", "def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True", "def update(self):\n self.haveDistrict = len(self.districts()) > 0", "def is_consistent(self, item):\n targets = set(ident for ident, node in self._nodes.iteritems() \\\n if node[item] == OCCUPIED)\n return self._check_consistency(item, [self.current], targets)", "def check_all_constraints(csp) :\n constraints=csp.get_all_constraints()\n for constraint in constraints:\n var1 = constraint.var1\n var2 = constraint.var2\n val1=csp.get_assigned_value(var1)\n val2=csp.get_assigned_value(var2)\n if val1!=None and val2!=None:\n if not constraint.check(val1,val2):\n return False\n return True", "def _check_if_satisfiable(self):\n # Search for a satisfying assignment\n all_variables = self.all_variables()\n\n # Try to find some assignment of the constrained vars\n counter = count()\n next_count = next(counter)\n queue = [(0, 0, next_count, {})]\n\n while queue:\n num_attempts, _, _, assignments = hq.heappop(queue)\n num_attempts += 1\n # Full assignment?\n # keep out of loop for empty constraint edge case\n if len(assignments) == len(all_variables):\n return True\n for v in sorted(all_variables - set(assignments.keys())):\n if isinstance(v, DiscreteVariable):\n possible_assignments = self.get_possible_assignments(v)\n else:\n possible_assignments = [v.sample() \\\n for _ in range(10*(1+num_attempts))]\n for assignment in possible_assignments:\n new_assignments = assignments.copy()\n new_assignments[v] = assignment\n # Constraint violated\n if not self.check(new_assignments):\n continue\n # Finish early\n if len(new_assignments) == len(all_variables):\n return True\n next_count = next(counter)\n hq.heappush(queue, (num_attempts, -len(new_assignments),\n -next_count, new_assignments))\n\n if next_count > gc.max_satisfy_tries:\n import ipdb; ipdb.set_trace()\n break\n\n return False", "def validate_missing_guardians(\n self, guardian_keys: List[ElectionPublicKey]\n ) -> bool:\n # Check this guardian's collection of public keys\n # for other guardians that have not announced\n missing_guardians: Dict[GuardianId, ElectionPublicKey] = {\n guardian_key.owner_id: guardian_key\n for guardian_key in guardian_keys\n if guardian_key.owner_id not in self._available_guardians\n }\n\n # Check that the public keys match for any missing guardians already reported\n # note this check naively assumes that the first guardian to annouce is telling the truth\n # but for this implementation it is simply a sanity check on the input data.\n # a consuming application should implement better validation of the guardian state\n # before announcing a guardian is available for decryption.\n for guardian_id, public_key in missing_guardians.items():\n if guardian_id in self._missing_guardians:\n if self._missing_guardians[guardian_id] != public_key:\n log_warning(\n (\n f\"announce guardian: {guardian_id} \"\n f\"expected public key mismatch for missing {guardian_id}\"\n )\n )\n return False\n else:\n self._missing_guardians[guardian_id] = missing_guardians[guardian_id]\n return True", "def check_consistent(self):\n # * END LIST The end list itself must be consistent.\n # ** Each end must be of understood type\n # ** Each end must have a valid sequence or no sequence\n # ** There must be no more than one instance of each name\n # ** WARN if there are ends with no namecounts\n # * TILE LIST\n # ** each tile must be of understood type (must parse)\n # ** ends in the tile list must be consistent (must merge)\n # ** there must be no more than one tile with each name\n # self.tiles.check_consistent()\n endsfromtiles = self.tiles.glues_from_tiles()\n\n # ** WARN if any end that appears does not have a complement used or vice versa\n # ** WARN if there are tiles with no name\n # * TILE + END\n # ** The tile and end lists must merge validly\n # (checks sequences, adjacents, types, complements)\n self.glues | endsfromtiles\n\n # ** WARN if tilelist has end references not in ends\n # ** WARN if merge is not equal to the endlist\n # ** WARN if endlist has ends not used in tilelist\n # * ADAPTERS / SEEDS\n # SEED stuff was here", "def validate(self, fgraph):\r\n\r\n if self.destroyers:\r\n ords = self.orderings(fgraph)\r\n\r\n if _contains_cycle(fgraph, ords):\r\n raise InconsistencyError(\"Dependency graph contains cycles\")\r\n else:\r\n #James's Conjecture:\r\n #If there are no destructive ops, then there can be no cycles.\r\n pass\r\n return True", "def _paths_are_consistent_with_hash_prefixes(self, paths,\n path_hash_prefixes):\n\n # Assume that 'paths' and 'path_hash_prefixes' are inconsistent until\n # proven otherwise.\n consistent = False\n\n if len(paths) > 0 and len(path_hash_prefixes) > 0:\n for path in paths:\n path_hash = self._get_target_hash(path)\n # Assume that every path is inconsistent until proven otherwise.\n consistent = False\n\n for path_hash_prefix in path_hash_prefixes:\n if path_hash.startswith(path_hash_prefix):\n consistent = True\n break\n\n # This path has no matching path_hash_prefix. Stop looking further.\n if not consistent: break\n\n return consistent", "def validate(self):\n if len(self.independent_nodes) > 0:\n try:\n self.topological_sort()\n return True\n except ValueError:\n return False\n return False", "def _check_all(self, groups):\n for group in groups:\n if not self._check_group(group):\n return False\n return True", "def _has_all_host_addresses(self, addresses):\n for s_id, s_size in enumerate(self.subnets[1:]):\n for m in range(s_size):\n # +1 to s_id since first subnet is 1\n if str((s_id + 1, m)) not in addresses:\n return False\n return True", "def is_alld(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'c':\n return False\n return True", "def balanced(self):\n tmp = self.distro.values()\n tmp.sort()\n bal = [tmp == [2, 3, 4, 4],\n tmp == [2, 3, 3, 5],\n tmp == [3, 3, 3, 4]]\n if any(bal):\n return True\n else:\n return False", "def check_deterministic_constraints(self, x):\n return True", "def check_deterministic_constraints(self, x):\n return True", "def is_solution(self, csp):\n return self.is_consistent(csp.get_constraints()) and self.is_complete(csp.get_variables())", "def leaves_are_consistent(self):\n self.log(u\"Checking if leaves are consistent\")\n leaves = self.leaves()\n if len(leaves) < 1:\n self.log(u\"Empty leaves => return True\")\n return True\n min_time = min([l.interval.begin for l in leaves])\n self.log([u\" Min time: %.3f\", min_time])\n max_time = max([l.interval.end for l in leaves])\n self.log([u\" Max time: %.3f\", max_time])\n self.log(u\" Creating SyncMapFragmentList...\")\n smf = SyncMapFragmentList(\n begin=min_time,\n end=max_time,\n rconf=self.rconf,\n logger=self.logger\n )\n self.log(u\" Creating SyncMapFragmentList... done\")\n self.log(u\" Sorting SyncMapFragmentList...\")\n result = True\n not_head_tail = [l for l in leaves if not l.is_head_or_tail]\n for l in not_head_tail:\n smf.add(l, sort=False)\n try:\n smf.sort()\n self.log(u\" Sorting completed => return True\")\n except ValueError:\n self.log(u\" Exception while sorting => return False\")\n result = False\n self.log(u\" Sorting SyncMapFragmentList... done\")\n return result", "def _isArcTangentToArc(self, px, py, cx1, cy1, cx2, cy2):\n p = QPointF(px, py)\n v1 = QVector2D(p - QPointF(cx1, cy1)).normalized()\n v2 = QVector2D(p - QPointF(cx1, cy1)).normalized()\n if abs(v1.dotProduct(v1, v2)) - 1.0 <= 1e-6:\n # TODO: handle case where arc turns back into the other arc\n return True\n else:\n return False", "def enforce_node_consistency(self):\n # print(\"Entered enforce_node_consistency Function\")\n # print(\"self.domains\")\n # print(self.domains)\n for mystery in self.domains:\n # print(\"!!!!!!!!!!!!\")\n # print(mystery)\n # print(self.domains[mystery])\n keep_list = set()\n while self.domains[mystery]:\n word = self.domains[mystery].pop()\n if(len(word) == mystery.length):\n keep_list.add(word)\n for word in keep_list:\n self.domains[mystery].add(word)\n # print(self.domains[mystery])\n\n # raise NotImplementedError", "def static_collision_check(self, paths, obstacles):\r\n\t\tcollision_check_array = np.zeros(len(paths), dtype=bool)\r\n\t\tfor i in range(len(paths)):\r\n\t\t\tcollision_free = True\r\n\t\t\tpath = paths[i]\r\n\r\n\t\t\t# Iterate over the points in the path.\r\n\t\t\tfor j in range(len(path[0])):\r\n\t\t\t\t\r\n\t\t\t\tcircle_locations = np.zeros((1, 2))\r\n\r\n\t\t\t\tcircle_locations[0, 0] = path[0][j] + self._circle_offset*cos(path[2][j])\r\n\t\t\t\tcircle_locations[0, 1] = path[1][j] + self._circle_offset*sin(path[2][j])\r\n\t\t\t\t\r\n\t\t\t\tfor k in range(len(obstacles)):\r\n\t\t\t\t\tcollision_dists = scipy.spatial.distance.cdist(np.array([obstacles[k]]), circle_locations)\r\n\t\t\t\t\tcollision_dists = np.subtract(collision_dists, self._circle_radii)\r\n\t\t\t\t\tcollision_free = collision_free and not np.any(collision_dists < 0)\r\n\r\n\t\t\t\t\tif not collision_free:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tif not collision_free:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\tcollision_check_array[i] = collision_free\r\n\r\n\t\treturn collision_check_array", "def is_allc(self):\n g = self.get_gene().get_seq()\n for i in range(1, len(g)):\n if g[i] is 'd':\n return False\n return True", "def ARC_update(input, address):\n \n t_update_1 = datetime.now()\n \n client_arclink = Client_arclink()\n \n events, address_events = quake_info(address, 'info')\n len_events = len(events)\n \n Stas_arc = []\n \n for i in range(0, len_events):\n \n target_path = address_events\n Sta_arc = ARC_available(input, events[i], target_path[i], event_number = i)\n Stas_arc.append(Sta_arc)\n \n print 'ArcLink-Availability for event: ' + str(i+1) + str('/') + \\\n str(len_events) + ' --->' + 'DONE'\n \n if input['get_continuous'] == 'Y':\n for j in range(1, len_events):\n Stas_arc.append(Sta_arc)\n print 'ArcLink-Availability for event: ' + str(j+1) + str('/') + \\\n str(len_events) + ' --->' + 'DONE'\n break\n \n Stas_req = []\n \n for k in range(0, len_events):\n Sta_all = Stas_arc[k]\n Stas_req.append(rm_duplicate(Sta_all, \\\n address = os.path.join(address_events[k])))\n \n return Stas_req", "def graphConsistency(self, feature):\n # get a list of all constraints in which feature appears in the head\n headConstraints = self.getHeadConstraints(feature.name)\n # make a copy of the constraints list - we will treat this like a stack\n constraintList = headConstraints[:]\n # loop through all the constraints\n while len(constraintList) > 0:\n if (len(constraintList) % 100 == 0):\n print \"\\tconsistency checking constraints = \" + str(len(constraintList))\n # grab a constraint off the stack\n constraint = constraintList.pop()\n # check the constraint for arc consistency\n consistent = self.arcConsistency(constraint)\n # if we removed all the values from the domain of the tail then we need to backtrack\n if (len(constraint.tail.domain) == 0):\n return False\n # if the arc wasn't consistent then we need to add back all the constraints\n # with a head equal to the tail of the changed constraint to the queue\n constraintsAdded = 0\n if (not consistent):\n # get a list of constraints where the tail feature we just changed appears as\n # the head\n reCheckConstraints = self.getHeadConstraints(constraint.tail.name)\n # go through the list, add back all constraints that are not already in the stack\n for c in reCheckConstraints:\n # if the constraint is not already in the stack\n if not c in constraintList:\n # put it at the bottom of the stack\n constraintList.insert(0, c)\n constraintsAdded += 1\n print \"\\t\\tNumber of constraints added: \" + str(constraintsAdded)\n return True", "def _parallel(*segments):\n if not all(isinstance(s, Line) for s in segments):\n raise TypeError(\"Line._parallel requires all Line objects\")\n\n unique_segments = list(set(segments))\n\n if len(unique_segments) == 0:\n return False\n elif len(unique_segments) == 1:\n return True\n else:\n # take the first segment and translate it to the origin\n first_translated_seg = Line([Point3(0, 0, 0), (segments[0].end - segments[0].start)])\n\n # the given segments are parallel if they are all parallel to the first\n for s in segments[1:]:\n translated_seg = Line([Point3(0, 0, 0), (s.end - s.start)])\n if not first_translated_seg.is_collinear_with(translated_seg):\n return False\n\n return True", "def is_solved(self) -> bool:\n return set(self.boxes) == set(self.storage_locations)", "def domain_success_dstc(self, turn_data):\n for curr_doms in turn_data['others']['services']:\n if turn_data['goal_state'][curr_doms] != {}:\n for ke in list(turn_data['goal_state'][curr_doms].keys()):\n if turn_data['goal_state'][curr_doms][ke] == '?':\n return False\n return True\n # if domain not in self.goal:\n # return None\n # if domain in self.complete_domain:\n # return 0\n\n # if ref2goal:\n # goal = {}\n # goal[domain] = deepcopy(self.goal[domain])\n # else:\n # goal = self._init_dict()\n # if 'book' in self.goal[domain]:\n # goal[domain]['book'] = self.goal[domain]['book']\n # for da in self.usr_da_array:\n # d, i, s, v = da.split('-', 3)\n # if d != domain:\n # continue\n # if s in self.mapping[d]:\n # if i == 'inform':\n # goal[d][s] = v\n # elif i == 'request':\n # goal[d][s] = '?'\n\n # match_rate = self._match_rate_goal(goal, self.booked, [domain])\n # match_rate = np.mean(match_rate) if match_rate else None\n\n # inform = self._inform_F1_goal(goal, self.sys_da_array, [domain])\n # try:\n # inform_rec = inform[0] / (inform[0] + inform[2])\n # except ZeroDivisionError:\n # inform_rec = None\n\n # if (match_rate == 1 and inform_rec == 1) \\\n # or (match_rate == 1 and inform_rec is None) \\\n # or (match_rate is None and inform_rec == 1):\n # self.complete_domain.append(domain)\n # return 1\n # else:\n # return 0", "def consistent(self):\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return True\n\n return self.var1.value != self.var2.value", "def is_all_visited(self):\n cond = [node.visited if node and node.belongs and node.valid else True for node in self.nodes.flatten()]\n return all(cond)", "def is_distal(self):\n return bool(set(self.locations) and set(self.locations) <= set(StandardTerminology.DISTAL_LOCATIONS)) \\\n or bool(self.depth and 16 < self.depth < 82)", "def is_valid_single_attempt(self, atoms_init, atoms_final):\n from scipy.spatial import cKDTree as KDTree\n from random import shuffle\n atoms1 = atoms_init.copy()\n atoms2 = atoms_final.copy()\n\n vol1 = atoms1.get_volume()\n vol2 = atoms2.get_volume()\n if vol2 > vol1:\n ratio = (vol2/vol1)**(1.0/3.0)\n cell1 = atoms1.get_cell()\n atoms1.set_cell(cell1*ratio, scale_atoms=True)\n else:\n ratio = (vol1/vol2)**(1.0/3.0)\n cell2 = atoms2.get_cell()\n atoms2.set_cell(cell2*ratio, scale_atoms=True)\n\n # Try construct the relation\n used_indices = []\n tree = KDTree(atoms2.get_positions())\n indices = list(range(0, len(atoms1)))\n shuffle(indices)\n for atom in atoms1:\n if atom.symbol in self.exclude:\n continue\n dist, closest = tree.query(atom.position, k=12)\n srt_indx = np.argsort(dist)\n dist = [dist[indx] for indx in srt_indx]\n closest = [closest[indx] for indx in srt_indx]\n\n if all(c in used_indices for c in closest):\n # More than one atom is closest to this\n # structure\n self.rejected_reason = \"More than one atom mapped onto the \"\n self.rejected_reason += \"same atoms in the initial structure\"\n return False\n\n # First, unused with mathing symbol\n closest_indx = None\n closest_dist = None\n for i, indx in enumerate(closest):\n if atoms2[indx].symbol == atom.symbol and indx not in used_indices:\n closest_indx = indx\n closest_dist = dist[i]\n break\n\n if closest_indx is None:\n self.rejected_reason = \"No unused atoms with macthing symbol!\"\n return False\n \n used_indices.append(closest_indx)\n if closest_dist > self.max_displacement:\n # The displacement is larger than the tolereance\n self.rejected_reason = \"Max displacement too large\"\n return False\n \n if atom.symbol != atoms2[closest_indx].symbol:\n self.rejected_reason = \"Mapped symbol does not match!\"\n return False\n return True", "def valid(self):\n if (self._npix == []\n or self._gpix == []\n or self._epix == []\n or self._ppix == []) :\n return False\n return True", "def isGoal(self):\n for index in range(self.DIM):\n if not self.values('r',index).count(0) is 0:\n return False\n if not self.isValid():\n return False\n return True", "def is_achromatic(self) -> bool:\n\n value = self._space.is_achromatic(self.coords(nans=False))\n if value is None:\n xyz = self.convert('xyz-d65')\n return bool(xyz._space.is_achromatic(xyz[:-1]))\n return value", "def check_all_constraints(csp) :\n\n for constraint in csp.get_all_constraints():\n assigned1 = csp.get_assigned_value(constraint.var1)\n assigned2 = csp.get_assigned_value(constraint.var2)\n check = constraint.check(assigned1,assigned2)\n if check==False and assigned1!=None and assigned2!=None:\n return False \n return True", "def _update_same(self, update_set):\n for upd in update_set:\n cupd = None\n for rd, wrt, inst in self.syncinfo.rd_wrt_list:\n log.debug(\" UPD0-CHK: %s - RD: %s - WRT: %s [%s]\" \\\n % (upd ,rd, wrt, inst))\n if wrt == \"\":\n continue\n if upd == wrt:\n if self._is_updated(rd):\n log.debug(\" UPD0-FAIL: %s - RD: %s - WRT: %s [%s]\"\\\n % (upd ,rd, wrt, inst))\n return False\n elif AsmParser.is_register(wrt):\n cupd = self.arch.expand_reg_expr(upd) if not cupd else cupd\n cwrt = self.arch.expand_reg_expr(wrt)\n if self._overlap_cvars(cupd, cwrt) != None:\n if self._is_updated(rd):\n log.debug(\" UPD1: %s - RD: %s - WRT: %s [%s]\" \\\n % (upd ,rd, wrt, inst))\n return False\n return True", "def is_final_interval(self):\n return self.increasing_cover_relations() == []", "def _directors_validity_checker(directors, tangents, n_elements):\n _assert_shape(\n directors, (MaxDimension.value(), MaxDimension.value(), n_elements), \"directors\"\n )\n\n # Check if d1, d2, d3 are unit vectors\n d1 = directors[0, ...]\n d2 = directors[1, ...]\n d3 = directors[2, ...]\n assert_allclose(\n _batch_norm(d1),\n np.ones((n_elements)),\n atol=Tolerance.atol(),\n err_msg=(\" d1 vector of input director matrix is not unit vector \"),\n )\n assert_allclose(\n _batch_norm(d2),\n np.ones((n_elements)),\n atol=Tolerance.atol(),\n err_msg=(\" d2 vector of input director matrix is not unit vector \"),\n )\n assert_allclose(\n _batch_norm(d3),\n np.ones((n_elements)),\n atol=Tolerance.atol(),\n err_msg=(\" d3 vector of input director matrix is not unit vector \"),\n )\n\n # Check if d3xd1 = d2\n assert_allclose(\n _batch_cross(d3, d1),\n d2,\n atol=Tolerance.atol(),\n err_msg=(\" d3 x d1 != d2 of input director matrix\"),\n )\n\n # Check if computed tangents from position is the same with d3\n assert_allclose(\n tangents,\n d3,\n atol=Tolerance.atol(),\n err_msg=\" Tangent vector computed using node positions is different than d3 vector of input directors\",\n )", "def is_crossed(self):\n left_boundary_clusters = np.extract(self.cluster[0] > 0,\n self.cluster[0])\n right_boundary_clusters = np.extract(self.cluster[-1] > 0,\n self.cluster[-1])\n return np.in1d(left_boundary_clusters, right_boundary_clusters).any()", "def isscalar(cls, dataset, dim, per_geom=False):\n dim = dataset.get_dimension(dim)\n if (dim in cls.geom_dims(dataset)):\n return False\n elif per_geom:\n return all(isscalar(v) or len(list(unique_array(v))) == 1\n for v in dataset.data[dim.name])\n dim = dataset.get_dimension(dim)\n return len(dataset.data[dim.name].unique()) == 1", "def has_amino_acids(self):\n for frag in self.iter_amino_acids():\n return True\n return False", "def check_net_finality(self):\n for place in self.P:\n if place.M > 0 and not self.check_place_finality(place):\n return False\n return True", "def is_mountain_array(self, a):\r\n n = len(a)\r\n if n < 3:\r\n return False\r\n # Invalidate monotonic slopes\r\n elif (a[0] > a[1] or\r\n a[n - 2] < a[n - 1]):\r\n return False\r\n\r\n p = None\r\n for i in range(0, n - 1):\r\n\r\n # Search for local maxima\r\n if p is None:\r\n if a[i] > a[i + 1]:\r\n p = i\r\n if a[i] == a[i + 1]:\r\n return False\r\n\r\n # Confirm maxima as global maxima\r\n else:\r\n if a[i] <= a[i + 1]:\r\n return False\r\n\r\n return True", "def set_all_domains(self, domains_dict) :\n if not set(domains_dict.keys()) <= set(self.variables):\n invalid_vars = filter(lambda v: v not in self.variables, domains_dict.keys())\n raise KeyError(str(invalid_vars) + \" are not variables in this problem.\")\n self.domains = deepcopy(domains_dict)\n return self", "def is_acyclic(self, queue):\n if len(self.graph) == 0:\n return True\n\n elif not queue.is_empty():\n source = queue.dequeue()\n source_node = self.graph.get_node(source)\n for node in source_node.data.children:\n if (node.data.in_degree - 1) == 0:\n queue.enqueue(node.data.element)\n self.remove_vertex(source)\n result = self.is_acyclic(queue)\n else:\n result = False\n\n return result", "def _check_collisions(self, link_pose_mat, avoidance_radius):\n for link_pose in link_pose_mat:\n # only use x,y,z from link pose\n x_3x1 = np.array((link_pose[0, 0], link_pose[0, 1], link_pose[0, 2]))\n if self.check_collision(x_3x1, avoidance_radius):\n return True\n return False", "def empty(self) -> bool:\n return len(self.a) == 0 and len(self.b) == 0", "def is_fulfilled(self):\n if self.constraints is not None:\n for key in self.constraints:\n value = self.constraints[key]\n game_value = self.gameEngine.get_soft_state(key)\n\n if game_value is None:\n game_value = self.gameEngine.get_hard_state(key)\n\n if isinstance(value, list):\n if min(value) > game_value or game_value > max(value):\n return False\n else:\n if value != game_value:\n return False\n return True\n return False" ]
[ "0.7341803", "0.69774306", "0.6603775", "0.58208483", "0.5818385", "0.55777776", "0.55287546", "0.5475581", "0.5416812", "0.53949845", "0.5291726", "0.5171484", "0.51074433", "0.5098393", "0.5090242", "0.5040056", "0.49685684", "0.49068654", "0.48768777", "0.48698935", "0.48645192", "0.48564023", "0.48428378", "0.4837337", "0.48208138", "0.47945696", "0.47737432", "0.474903", "0.47152758", "0.46992224", "0.46847904", "0.46808627", "0.46151048", "0.45949778", "0.45388415", "0.45378822", "0.45373908", "0.45280987", "0.45245075", "0.45209646", "0.45170802", "0.45147026", "0.45118892", "0.45101407", "0.45000166", "0.4494708", "0.4488443", "0.4487049", "0.44836205", "0.4481704", "0.44813243", "0.4477356", "0.44743285", "0.44727513", "0.4462993", "0.44560292", "0.44521728", "0.4439765", "0.44295543", "0.44231138", "0.44221723", "0.44154364", "0.44129667", "0.43998474", "0.4398932", "0.4386243", "0.4385775", "0.4385775", "0.4378283", "0.43694463", "0.43668312", "0.43559122", "0.4347356", "0.43471298", "0.43463063", "0.434181", "0.43383873", "0.4337511", "0.433585", "0.4315894", "0.4315383", "0.4313363", "0.43120235", "0.431037", "0.43081775", "0.43047014", "0.43024737", "0.43021208", "0.43003613", "0.42853844", "0.42826158", "0.4282004", "0.42777982", "0.4273239", "0.4272457", "0.42700607", "0.42688787", "0.42593974", "0.425535", "0.4252777" ]
0.6537715
3
Return True if `assignment` is complete (i.e., assigns a value to each crossword variable); return False otherwise.
def assignment_complete(self, assignment): # print("Entered assignment_complete Function") for var in assignment: if assignment[var] is None: return False return self.consistent(assignment) # raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True", "def assignment_complete(self, assignment):\n if len(assignment) == len(self.domains):\n return True\n\n else:\n return False", "def is_complete(self, variables):\n for var in variables:\n if not self.has_assignment_for(var):\n return False\n\n return True", "def consistent(self, assignment):\n # print(\"Entered consistent Function\")\n # print(\"assignment\")\n # print(assignment)\n\n overlaps = self.crossword.overlaps\n value_set = set()\n for variable in assignment: \n #checking overlaps with neighbors\n neighbors = self.crossword.neighbors(variable)\n for neighbor in neighbors:\n overlap = overlaps[(variable, neighbor)]\n if (neighbor in assignment):\n # print(\"var 1 overlap letter\")\n # print(assignment[variable][overlap[0]])\n # print(\"var 2 overlap letter\")\n # print(assignment[neighbor][overlap[1]])\n if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]):\n return False\n \n # print(\"neighbors\")\n # print(neighbors)\n\n #checking that the assignment is the correct length for the variable\n if (variable.length != len(assignment[variable])):\n return False\n\n #the set to check for distinct variables later\n value_set.add(assignment[variable])\n\n #Checking that all variables are distinct\n #these should be the same length unless two or more variables share an value\n if( len(value_set) is not len(assignment)): \n return False\n \n return True\n\n # raise NotImplementedError", "def isAssignment(self):\n return _libsbml.Rule_isAssignment(self)", "def isAssigned(self):\n if self.getProton1Assignments() and self.getProton2Assignments():\n return 1\n else:\n return 0", "def is_assignment(*args):\n return _ida_hexrays.is_assignment(*args)", "def is_complete(self, assignment):\n for a in self.agents:\n if self.calc_agent_budget(a, assignment):\n return False\n for t in self.tasks:\n if self.calc_task_budget(t, assignment):\n return False\n return True", "def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False", "def has_assignment_for(self, var):\n return self.variable_to_value.get(var) != None", "def consistent(self, assignment):\n # for each of the current assignments\n for word in assignment:\n # if the word does not fit in the gaps\n if len(assignment[word]) != word.length:\n # reject attempt\n return False\n # if the word is already in the assignment\n if list(assignment.values()).count(assignment[word]) > 1:\n # reject attempt\n return False\n # for each of the overlaps\n for overlap in self.crossword.overlaps:\n # if the overlap isn't empty and is an overlap for the word\n # overlaps are a superset: if the overlap of (x, y) is in the set, so is (y, x), so we can just go by the first overlap element\n if self.crossword.overlaps[overlap] is not None and overlap[0] == word:\n # try to access the word assignment for the other overlap target\n try:\n test_word = assignment[overlap[1]]\n # if it does not exist in the assignment\n except KeyError:\n # continue to the next overlap\n continue\n # if the other overlap target has been assigned\n else:\n # extract the letter we want to match for the overlap\n test_letter = test_word[self.crossword.overlaps[overlap][1]]\n # if the letters do not match\n if assignment[word][self.crossword.overlaps[overlap][0]] != test_letter:\n # reject attempt\n return False\n return True", "def backtrack(self, assignment):\n # As stated above, if all variables in assignment is 1\n # then all values have been set and we return assignment \n if all(len(l) == 1 for l in assignment.values()):\n return assignment\n\n # Pick the next unnassigned variable that we are going to check \n key, values = self.select_unassigned_variable(assignment)\n # Loop through all the allowed values of this square in the sudoku board\n for value in values:\n # Do a deepcopy cuz otherwise R.I.P\n deep = copy.deepcopy(assignment)\n # Checks if this current value is consistent with the rest\n # of the sudoku board \n if self.check_consistency(deep, key, value):\n # IF it is consistent then we set this square to have this value \n deep[key] = [value]\n # Do inference check for hyper optimized code\n if self.inference(deep, self.get_all_arcs()):\n self.counter += 1\n result = self.backtrack(deep)\n if result is not False:\n return result\n else:\n self.fails += 1\n else:\n # Continue looping through the values of the currently selected \n # sudoku-square if the value was inconsistent with the board \n continue\n return False", "def backtrack(self, assignment):\n # if the assignment is complete\n if self.assignment_complete(assignment):\n # return the assignment, crossword is complete\n return assignment\n # pick a variable to try to assign\n var = self.select_unassigned_variable(assignment)\n # for each value in the variable's domain\n for value in self.order_domain_values(var, assignment):\n # attempt to assign this value and fit it into the crossword\n # make a copy of the current assignments\n trial = assignment.copy()\n # add the trial value to the test assignment\n trial[var] = value\n # if the test assignment is consistent\n if self.consistent(trial):\n # add the trial assignment to the current list of assignments\n assignment[var] = value\n # take the next backtrack step with this new assign,ent\n result = self.backtrack(assignment)\n # if the backtrack is a success\n if result is not None:\n # we have a match\n return result\n # a backtrack further down failed, so remove the trial assignment\n assignment.pop(var)\n # no assignment was possible, return None\n return None", "def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True", "def goal_test(self, state):\r\n assignment = dict(state)\r\n return (len(assignment) == len(self.variables)\r\n and all(self.nconflicts(variables, assignment[variables], assignment) == 0\r\n for variables in self.variables))", "def holds(self,assignment):\n return self.condition(*tuple(assignment[v] for v in self.scope))", "def consistent(self,assignment):\n return all(con.holds(assignment)\n for con in self.constraints\n if all(v in assignment for v in con.scope))", "def assignment(self):\n shards = self.line.split('=')\n if len(shards) == 2:\n return True", "def is_assign(self):\n return self.var.initializer is not None", "def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0", "def fold_assignment(self):\n return self._parms.get(\"fold_assignment\")", "def backtrack(csp):\n\n if len(csp.assignment) == len(csp.variables):\n return True\n\n variable = select_unassigned_variable(csp)\n value = order_domain_values(csp, variable)\n #print variable\n #print value\n flag = 0\n for x in value:\n csp.variables.begin_transaction()\n if is_consistent(csp, variable, x):\n #print \"past is_consistent\"\n for var in csp.variables:\n if var == variable:\n var.assign(x)\n var.is_assigned()\n solution = backtrack(csp)\n if solution != False:\n return True\n csp.variables.rollback()\n return False", "def _check_if_satisfiable(self):\n # Search for a satisfying assignment\n all_variables = self.all_variables()\n\n # Try to find some assignment of the constrained vars\n counter = count()\n next_count = next(counter)\n queue = [(0, 0, next_count, {})]\n\n while queue:\n num_attempts, _, _, assignments = hq.heappop(queue)\n num_attempts += 1\n # Full assignment?\n # keep out of loop for empty constraint edge case\n if len(assignments) == len(all_variables):\n return True\n for v in sorted(all_variables - set(assignments.keys())):\n if isinstance(v, DiscreteVariable):\n possible_assignments = self.get_possible_assignments(v)\n else:\n possible_assignments = [v.sample() \\\n for _ in range(10*(1+num_attempts))]\n for assignment in possible_assignments:\n new_assignments = assignments.copy()\n new_assignments[v] = assignment\n # Constraint violated\n if not self.check(new_assignments):\n continue\n # Finish early\n if len(new_assignments) == len(all_variables):\n return True\n next_count = next(counter)\n hq.heappush(queue, (num_attempts, -len(new_assignments),\n -next_count, new_assignments))\n\n if next_count > gc.max_satisfy_tries:\n import ipdb; ipdb.set_trace()\n break\n\n return False", "def check_assignment(assignments: dict, point: Point, value: str) -> bool:\n\n # check base condition: do the constraints hold for current point\n if not check_constraint_satisfied(assignments, point, value):\n print(' → base constraint failed:', point, '=', value)\n return False\n\n # check neighbouring conditions: do the constraints (still) hold for other points\n temp_assignment = copy.deepcopy(assignments)\n temp_assignment[point] = value\n\n # loop through points that can attack the current point, as kings\n print(' > checking neighbouring kings')\n for pt in filter(lambda p: p in assignments and assignments[p] == 'king', attack_points_king[point]):\n if not check_constraint_satisfied(temp_assignment, pt, assignments[pt]):\n print(' → neighbouring constraint failed for neighbour', pt, '=', assignments[pt])\n return False\n\n # loop through points that can attack the current point, as knights\n print(' > checking neighbouring knights')\n for pt in filter(lambda p: p in assignments and assignments[p] == 'knight', attack_points_knight[point]):\n if not check_constraint_satisfied(temp_assignment, pt, assignments[pt]):\n print(' → neighbouring constraint failed for neighbour', pt, '=', assignments[pt])\n return False\n\n # all constraints are satisfied!\n return True", "def __forward_check(self, assigned_var, assigned_value, unassigned_vars):\n for unassigned_neighbor in self.__unassigned_neighbors(assigned_var, unassigned_vars):\n consistent_values = self.__consistent_domain_values(assigned_var, assigned_value, unassigned_neighbor)\n if len(consistent_values) == 0:\n return False\n else:\n unassigned_neighbor.domain = consistent_values\n return True", "def _are_last_assignments_valid(assignments, output_vars, ignore_exception=True):\n assert isinstance(assignments, collections.OrderedDict)\n if len(assignments) == 0:\n return False\n last_assignments = []\n for assign_outvar in reversed(assignments):\n last_assignments.append([assign_outvar, assignments[assign_outvar]])\n if len(last_assignments) == len(output_vars):\n break\n last_assignments = list(reversed(last_assignments)) # proper order\n for i, (assign_outvar, expr) in enumerate(last_assignments):\n if not(assign_outvar == output_vars[i]) or \\\n not(isinstance(expr, SSAReturn)) or \\\n not(expr.args[0] not in output_vars):\n if not ignore_exception:\n last_assignments_vrepr = [(k.vrepr(), v.vrepr()) for (k, v) in last_assignments]\n raise ValueError(\"last assignments are not of the form \"\n \"output_var <- SSAReturn(non_output_var)\"\n f\"\\noutput vars = {output_vars}\"\n f\"\\nlast assignments {last_assignments_vrepr}\"\n f\"\\n{assignments}\")\n return False\n return True", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def _check_assigned(self):\n\n if self.values is None and self.lazy:\n raise ValueError(\"This instance has not been assigned any data.\")", "def __inferences(self, assigned_var, assigned_value, unassigned_vars, do_forward_checking):\n if do_forward_checking:\n return self.__forward_check(assigned_var, assigned_value, unassigned_vars)\n else:\n return True", "def are_all_jobs_assigned(self, numjobsassigned):\n if not self.uniform_mode:\n return numjobsassigned == len(self.jobs)\n return True", "def is_assigned(self):\n if \"isAssigned\" in self._prop_dict:\n return self._prop_dict[\"isAssigned\"]\n else:\n return None", "def iterate_over_assignment(assignment_funct, max_iterations=12, **args):\n args['knowing_minimum'] = 0\n args['knowing_maximum'] = sys.maxsize\n args['maxtime'] = 16 # in secs\n vars = None\n prob_status = pulp.LpStatusNotSolved\n iterations = 0\n while pulp.LpStatusOptimal != prob_status and pulp.LpStatusInfeasible != prob_status and iterations <= max_iterations:\n prob_status, vars = assignment_funct(**args)\n iterations+=1\n return prob_status, vars", "def complete(self):\n return not self.numfalse", "def inference(self, assignment, queue):\n # Do this as long as there is elements in the queue\n # e.g there is still more arcs to check \n while queue:\n # Pop the first element in the queue\n xi, xj = queue.pop(0)\n # Do the revise check \n if self.revise(assignment, xi, xj):\n # IF zero, CSP has no consistent soluton and AC-3 returns failure \n if len(assignment[xi]) == 0:\n return False\n # If NOT ZERO loop throuh the neighboring arcs of node\n # and append the neighbor and this node to the queue for further checking.\n # We do this so that we keep checking after we do changes and make sure \n # all is gucci gang\n for n in self.get_all_neighboring_arcs(xi):\n if n[0] != xj:\n queue.append((n[0], xi))\n return True", "def forward_checking(csp, var, value, assignment, removals):\r\n csp.support_pruning()\r\n check=0\r\n for B in csp.neighbors[var]:\r\n if B not in assignment:\r\n for b in csp.curr_domains[B][:]:\r\n check+=1\r\n if not csp.constraints(var, value, B, b):\r\n csp.prune(B, b, removals)\r\n # we have a failure\r\n # we check if domains list for variable B is not empty\r\n # and increase weight of B,var by 1\r\n if not csp.curr_domains[B]:\r\n csp.weight[(B,var)] += 1\r\n return False,check\r\n return True,check", "def isComplete(self):\n for n in range(9):\n for m in range(9):\n if self.puzzle[n][m] == 0:\n return False\n return True", "def __assign_policy_def(self):\n\n self.logger.info(\n f\"Creating policy assignment of definition {self.policy_id} to assignment {self.assignment_id}\"\n )\n policy_assignment_res = self.interactor.put_policy_assignment(\n self.policy_id, self.assignment_id\n )\n\n if policy_assignment_res.status_code != 201:\n self.output_res[\"result\"][\"status\"] = \"ERROR\"\n self.output_res[\"result\"][\n \"message\"\n ] = f\"Policy assignment {self.assignment_id} could not be created - {policy_assignment_res.status_code}: {policy_assignment_res.text}\"\n\n self.running_evaluations[self.eval_id] = self.output_res\n return False\n\n return True", "def is_complete(self):\n\n return self._pat_id is not None and self._mat_id is not None", "def isComplete(self):\n assert len(self._x) > 0\n assert len(self._y) > 0\n assert 2 == len(self._data_array.shape)\n assert self.wkt is not None\n assert self.wkt != ''\n\n return True", "def verify_assign(self, d_stmt, table):\n lvalue = DanaExpr.factory(d_stmt.find_first_child(\"p_lvalue\"), table)\n expr = DanaExpr.factory(d_stmt.find_first_child(\"p_expr\"), table)\n self.exprs = [lvalue, expr]\n\n expr.type.check_type(d_stmt.linespan, lvalue.type)\n expr.type.in_types(d_stmt.linespan, [DanaType(\"int\"), DanaType(\"byte\")])", "def is_complete(self):\n is_complete = True\n \n if (type(self.N) is not IntType) or self.N < 2:\n warnings.warn('N not set up properly.')\n is_complete = False\n \n if self.m is None or len(self.m) != self.N:\n warnings.warn('m not set up properly.')\n is_complete = False\n \n if self.R is None or len(self.R) != self.N:\n warnings.warn('R not set up properly.')\n is_complete = False\n \n if self.a is None or len(self.a) != self.N - 1:\n warnings.warn('a not set up properly.')\n is_complete = False\n \n if self.force is None or len(self.force) != self.N:\n warnings.warn('force not set up properly.')\n is_complete = False\n \n if self.Delta is None or len(self.Delta) != self.N - 1:\n warnings.warn('Delta not set up properly.')\n is_complete = False\n \n if self.n is None or len(self.n) != self.N - 1:\n warnings.warn('n not set up properly.')\n is_complete = False\n \n if self.beta < 0.0:\n warnings.warn('beta not set up properly.')\n is_complete = False\n \n if self.m0 < 0.0:\n warnings.warn('m0 not set up properly.')\n is_complete = False\n \n if self.mu < 0.0:\n warnings.warn('mu not set up properly.')\n is_complete = False\n \n return is_complete", "def is_full(self):\n return self.name and self.variables and self.assumptions and self.guarantees", "def is_assigned(self):\n if self.status == \"ASSIGNED\":\n return True\n else:\n return False", "def is_full(self):\n core_full = self.drone.complete() and self.subject.complete()\n if self.peds is None:\n return core_full\n else:\n return core_full and all([p.complete() for p in self.peds.values()])", "def test_save_assignment_file(self):\n\n results = GenomePropertiesResultsWithMatches(*self.test_genome_property_results, properties_tree=self.test_tree)\n\n engine = self.engine\n results.to_assignment_database(engine)\n\n assignment_caches = load_assignment_caches_from_database_with_matches(engine)\n new_results = GenomePropertiesResultsWithMatches(*assignment_caches, properties_tree=self.test_tree)\n\n self.assertEqual(results.sample_names, new_results.sample_names)\n self.assertEqual(results.property_results.equals(new_results.property_results), True)\n self.assertEqual(results.step_results.equals(new_results.step_results), True)\n self.assertEqual(results.step_matches.equals(new_results.step_matches), True)", "def solved(self):\n if not self.all_variables_assigned():\n return False\n for constraint in self.constraints:\n if not constraint.satisfied(*[self.var_dict[name] for name in constraint.var_names]):\n return False\n return True", "def complete(self):\n if self.__hasTABLE and self.__hasGRAPHS and self.__ndoubledollar == 4:\n return True\n else:\n return False", "def backtrack(self, assignment):\n # print(\"Entered backtrack Function\")\n # Check if assignment is complete\n if len(assignment) == len(self.domains):\n return assignment\n\n # Try a new variable\n var = self.select_unassigned_variable(assignment)\n word_list = self.order_domain_values(var, assignment)\n \n for word in word_list:\n new_assignment = assignment.copy()\n new_assignment[var] = word[0]\n if self.consistent(new_assignment):\n result = self.backtrack(new_assignment)\n if result is not None:\n return result\n \n return None\n\n # raise NotImplementedError", "def satisfying_assignment(formula):\n # convert the formula to a list of sets.\n formula = [set(i) for i in formula]\n\n # call the helper starting with the givne formula and an empty assignments\n # dictionary.\n result = sat_helper(formula, {})\n if result[0]:\n return result[1] # result[1] will be the dictionary of assignments.\n else:\n return None", "def check_assignment_consistency(self, assign_df=None, threshold=0.1):\n \n # If the user hasn't specified an assign_df, use one already calculated \n # for this NAPS_assigner instance\n if assign_df is None:\n set_assign_df = True\n assign_df = self.assign_df\n else:\n set_assign_df = False\n \n # First check if there are any sequential atoms\n carbons = pd.Series([\"C\",\"CA\",\"CB\"])\n carbons_m1 = carbons + \"m1\"\n seq_atoms = carbons[carbons.isin(assign_df.columns) & \n carbons_m1.isin(assign_df.columns)]\n seq_atoms_m1 = seq_atoms+\"m1\"\n #seq_atoms = list(seq_atoms)\n \n if seq_atoms.size==0:\n # You can't do a comparison\n assign_df[\"Max_mismatch_prev\"] = np.NaN\n assign_df[\"Max_mismatch_next\"] = np.NaN\n assign_df[\"Num_good_links_prev\"] = np.NaN\n assign_df[\"Num_good_links_next\"] = np.NaN\n return(assign_df)\n else:\n # First, get the i and i-1 shifts for the preceeding and \n # succeeding residues\n tmp = assign_df.copy()\n tmp = tmp.loc[tmp[\"Dummy_res\"]==False,]\n tmp.index = tmp[\"Res_N\"]\n tmp = tmp[list(seq_atoms)+list(seq_atoms_m1)]\n tmp_next = tmp.copy()\n tmp_next.index -= 1\n tmp_prev = tmp.copy()\n tmp_prev.index += 1\n tmp = tmp.join(tmp_next, rsuffix=\"_next\")\n tmp = tmp.join(tmp_prev, rsuffix=\"_prev\")\n # Calculate mismatch for each atom type\n for atom in seq_atoms:\n tmp[\"d\"+atom+\"_prev\"] = tmp[atom+\"m1\"] - tmp[atom+\"_prev\"]\n tmp[\"d\"+atom+\"_next\"] = tmp[atom] - tmp[atom+\"m1_next\"]\n # Calculate maximum mismatch\n tmp[\"Max_mismatch_prev\"] = tmp[\"d\"+seq_atoms+\"_prev\"].max(axis=1, \n skipna=True)\n tmp[\"Max_mismatch_next\"] = tmp[\"d\"+seq_atoms+\"_next\"].max(axis=1,\n skipna=True)\n \n # Calculate number of consistent matches\n tmp[\"Num_good_links_prev\"] = (tmp[\"d\"+seq_atoms+\"_prev\"]<threshold).sum(axis=1)\n tmp[\"Num_good_links_next\"] = (tmp[\"d\"+seq_atoms+\"_next\"]<threshold).sum(axis=1)\n \n # Join relevant columns back onto assign_df\n tmp[\"Res_N\"] = tmp.index\n assign_df = assign_df.join(tmp.loc[:,[\"Max_mismatch_prev\", \n \"Max_mismatch_next\", \n \"Num_good_links_prev\", \n \"Num_good_links_next\"]], \n on=\"Res_N\")\n if set_assign_df:\n self.assign_df = assign_df\n return(assign_df)", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def episode_done(self):\n if self.get_status() == AssignState.STATUS_DONE:\n return False\n else:\n return True", "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def is_complete_multipartite(self):\n if self._.d != 2:\n return False\n if not self._has(\"p\"):\n self.pTable()\n return any(self._.p[0, i, i] == self._.p[j, i, i]\n for i, j in [(1, 2), (2, 1)])", "def select_unassigned_variable(self, assignment):\n # Simply just pick the next value that has more than one value\n # in the variable list\n for key, value in assignment.iteritems():\n if len(value) > 1:\n return key, value", "def execute_assignment_cases(self, test_plan, test_data):\n def check_results(expected, actual, param_arg_count):\n if param_arg_count == 0:\n # It was an unfiltered call, so default fixture assignments\n # might be polluting our answer - so we take into account\n # how many assignments there were before the test.\n self.assertEqual(\n len(expected) + test_data['initial_assignment_count'],\n len(actual))\n else:\n self.assertThat(actual, matchers.HasLength(len(expected)))\n\n for each_expected in expected:\n expected_assignment = {}\n for param in each_expected:\n if param == 'inherited_to_projects':\n expected_assignment[param] = each_expected[param]\n elif param == 'indirect':\n # We're expecting the result to contain an indirect\n # dict with the details how the role came to be placed\n # on this entity - so convert the key/value pairs of\n # that dict into real entity references.\n indirect_term = {}\n for indirect_param in each_expected[param]:\n key, value = self._convert_entity_shorthand(\n indirect_param, each_expected[param],\n test_data)\n indirect_term[key] = value\n expected_assignment[param] = indirect_term\n else:\n # Convert a simple shorthand entry into a full\n # entity reference\n key, value = self._convert_entity_shorthand(\n param, each_expected, test_data)\n expected_assignment[key] = value\n self.assertIn(expected_assignment, actual)\n\n def convert_group_ids_sourced_from_list(index_list, reference_data):\n value_list = []\n for group_index in index_list:\n value_list.append(\n reference_data['groups'][group_index]['id'])\n return value_list\n\n # Go through each test in the array, processing the input params, which\n # we build into an args dict, and then call list_role_assignments. Then\n # check the results against those specified in the test plan.\n for test in test_plan.get('tests', []):\n args = {}\n for param in test['params']:\n if param in ['effective', 'inherited', 'include_subtree']:\n # Just pass the value into the args\n args[param] = test['params'][param]\n elif param == 'source_from_group_ids':\n # Convert the list of indexes into a list of IDs\n args[param] = convert_group_ids_sourced_from_list(\n test['params']['source_from_group_ids'], test_data)\n else:\n # Turn 'entity : 0' into 'entity_id = ac6736ba873d'\n # where entity in user, group, project or domain\n key, value = self._convert_entity_shorthand(\n param, test['params'], test_data)\n args[key] = value\n results = self.assignment_api.list_role_assignments(**args)\n check_results(test['results'], results, len(args))", "def backtrack(csp):\n\n # Base case\n if (is_complete(csp)):\n return True\n\n # Get first unassigned variable\n var = select_unassigned_variable(csp)\n\n # Iterate through domain\n for value in order_domain_values(csp, var):\n\n # Inference\n if is_consistent(csp, var, value):\n\n # Set rollback point\n csp.variables.begin_transaction()\n var.assign(value)\n\n # Explore this assignment\n if (inference(csp, var)):\n # GGWP\n if backtrack(csp):\n return True\n # Nope\n csp.variables.rollback()\n return False", "def is_assign_to_name(statement):\n return isinstance(statement, ast.Assign) and \\\n len(statement.targets) == 1 and \\\n isinstance(statement.targets[0], ast.Name)", "def is_empty(self):\n if len(self._table) == 0:\n return True\n\n if len(self._table) > 1:\n return False\n\n return list(self._table.keys())[0] == Assignment.create_default(self._head_vars)", "def done(self):\n return self.goal == (0, 0)", "def isPossibleAssign(self, position, value):\n\n # Check horizontal\n for i, x in enumerate(self.board[position[0], :]):\n if i != position[1] and x == value:\n return False\n\n # Check vertical\n for i, x in enumerate(self.board[:, position[1]]):\n if i != position[0] and x == value:\n return False\n\n # Check square\n square = [\n self.board[\n (position[0] // 3) * 3 + i,\n (position[1] // 3) * 3 + j\n ]\n for i in range(3) for j in range(3)\n ]\n for i, x in enumerate(square):\n if i != (position[0] % 3 * 3 + position[1] % 3) and x == value:\n return False\n\n return True", "def complete(self):\n if bool(self.namespace) and bool(self.kind) and bool(self.id):\n return True\n else:\n return False", "def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False", "def keep_cross_validation_fold_assignment(self):\n return self._parms.get(\"keep_cross_validation_fold_assignment\")", "def isSetVariable(self):\n return _libsbml.EventAssignment_isSetVariable(self)", "def check(self):\n\n return self.variable.check(self.cval, self.conditional)", "def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,450))", "def done(self, streetlearn):\n return not bool(self._coin_pano_id_set)", "def backtrack(self, assignment):\n #if a solution has been found, returns the solution, this is used for recursive purposes\n if self.assignment_complete(assignment) and self.consistent(assignment):\n return assignment\n #select the most optimal variable/node\n var = self.select_unassigned_variable(assignment)\n #assigns a word left in the domain of var and assigns it to var\n for word in self.order_domain_values(var, assignment):\n assignment[var]= word\\\n #if the assignment is consistent, recursively call backtrack\n if self.consistent(assignment):\n result= self.backtrack(assignment)\n if result != False:\n return assignment\n #if the assignment is not consistent at any point, remove the latest assignment\n assignment.pop(var)\n\n return None", "def is_fulfilled(self):\n if self.constraints is not None:\n for key in self.constraints:\n value = self.constraints[key]\n game_value = self.gameEngine.get_soft_state(key)\n\n if game_value is None:\n game_value = self.gameEngine.get_hard_state(key)\n\n if isinstance(value, list):\n if min(value) > game_value or game_value > max(value):\n return False\n else:\n if value != game_value:\n return False\n return True\n return False", "def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node", "def is_complete(self):\n if self.input_alphabet is None:\n raise ValueError(\"No input alphabet is given. \"\n \"Try calling determine_alphabets().\")\n\n for state in self.iter_states():\n for transition in state.transitions:\n if len(transition.word_in) != 1:\n return False\n\n transition_classes_by_word_in = full_group_by(\n state.transitions,\n key=lambda t: t.word_in)\n\n for key, transition_class in transition_classes_by_word_in:\n if len(transition_class) > 1:\n return False\n\n # all input labels are lists, extract the only element\n outgoing_alphabet = [key[0] for key, transition_class in\n transition_classes_by_word_in]\n if not sorted(self.input_alphabet) == sorted(outgoing_alphabet):\n return False\n\n return True", "def evaluate(self, y):\n const_projection = np.zeros(len(self.index_array))\n for i in range(len(self.index_array)):\n const_projection[i] = y[self.index_array[i]]\n for a in self.assignments:\n equal_flag = True\n for i in range(len(const_projection)):\n if const_projection[i] != a[i]:\n equal_flag = False\n break\n if equal_flag:\n return True\n return False", "def is_completed(self, course):\n info = self.courses_progress_dict.get(course.get_namespace_name())\n if info and 'final_grade' in info:\n return True\n return False", "def is_valid(self):\n sum_prob_per_var = {}\n for rule in self.rules:\n var, prob = rule.variable, rule.probability\n if prob < 0:\n return False\n sum_prob_per_var[var] = sum_prob_per_var.get(var, 0) + prob\n return all(sum_prob == 1.0 for sum_prob in sum_prob_per_var.values())", "def select_unassigned_variable(self, assignment):\n var_list= []\n #add unassigned variabled to a list along with the number of words left in its domain\n for var in self.domains:\n if var not in assignment:\n var_list.append((var, len(self.domains[var])))\n #sort this list by the number of words left in its domain\n var_list.sort(key= lambda x:x[1])\n\n #list for variables that are tied for least words left in domain\n equal_vars= [list(var_list[0])]\n for i in range(len(var_list)):\n #adds variables with same number of words left in domain\n if var_list[0][1] == var_list[i][1] and var_list[i] != var_list[0]:\n equal_vars.append(list(var_list[i]))\n\n \n #change the encoded information for words left in domain to the number of neighbors the variable had (highest degree)\n for i in range(len(equal_vars)):\n equal_vars[i][1]= len(self.crossword.neighbors(equal_vars[i][0]))\n\n #sort the list by the highest degree\n equal_vars.sort(key= lambda x:x[1])\n \n #return var with highest degree\n return equal_vars[0][0]", "def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node", "def checkProduction(self, replica=False, stepselection=[], outextension='out'):\n replica = replica or self.replica\n if not replica: raise NAMDCheckError, \"Replica not assigned.\"\n if not isinstance(stepselection, list): stepselection=[stepselection]\n\n selection = stepselection or range(1, replica.ntrajfiles+1)\n for i in selection:\n out = self.getProductionOutputFile(i, replica, outextension)\n if not out:\n if self.warn: self.log.warn(\"Production output file not found for step %i\"%i)\n return False\n else:\n if not S.NAMD_FILE_COMPLETE in out:\n if self.warn: self.log.warn(\"MD production step %i not completed or errors arised\"%i)\n return False\n return True", "def any_holds(self, domains, const, env, other_vars, ind=0, assigned_vars=[]):\r\n # All the variables in the scope of the constraints has been assigned.\r\n if ind == len(other_vars): \r\n return const.holds(env)\r\n else:\r\n var = other_vars[ind]\r\n for val in domains[var]:\r\n env[var] = val\r\n if is_all_unique(env, [var] + assigned_vars) and self.any_holds(domains, const, env, other_vars, ind + 1, [var] + assigned_vars):\r\n return True\r\n return False", "def propagate(formula, var, val, assignment):\n # make a copy of assignment to work with, and set the given variable to the\n # given value in it.\n assignment = dict(assignment)\n assignment[var] = val\n # update the formula based on this assignment.\n # clauses containing (var, val) are satisfied already (so remove them from the formula).\n # clauses containing (var, not val) must be satisfied by another variable,\n # so remove (var, not val) from them but otherwise leave them intact.\n new_form = [clause - {(var, not val)} for clause in formula if (var, val) not in clause]\n\n # at this point, if any empty clauses exist, they cannot be satisfied. and\n # if no clauses remain, we have already satisfied the formula.\n if not new_form:\n # if the list is empty, we win\n return True, assignment, []\n if not all(new_form):\n # if any clause is empty, we lose\n return False, {}, []\n # otherwise, we're still going\n return None, assignment, new_form", "def _is_goal_reached(self, state):\n return self._goal.holds(state.literals)", "def select_unassigned_variable(self, assignment):\n # sort crossword variables that are not in assignment by the length of their domain lists\n available = sorted([x for x in self.crossword.variables if x not in assignment], key=lambda x: len(self.domains[x]))\n # sort the list of available variables that have the same size domain as the shortest by the number of neighbors they have\n available = sorted([x for x in available if len(self.domains[x]) == len(self.domains[available[0]])], key=lambda x: len(self.crossword.neighbors(x)))\n # return the last element of the array\n return available.pop()", "def _get_assignment_completion_status(self, assignments):\n\n status_summary = {}\n\n for a in assignments:\n project_id = a.project_id\n user_id = a.user_id\n lof_labels_for_assignment = self._get_users_labels_for_assignment(project_id,\n user_id,\n a.id)\n status_summary[a.id] = len(lof_labels_for_assignment)\n return status_summary", "def is_waiting_to_be_assigned(self):\n if self.status == \"WAITING_TO_BE_ASSIGNED\":\n return True\n else:\n return False", "def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))", "def is_complete(self):\n acquired_points = self.dset.shape[0]\n total_nr_pts = np.shape(self.get_sweep_points())[0]\n if acquired_points < total_nr_pts:\n return False\n elif acquired_points >= total_nr_pts:\n if self.soft_avg() != 1 and self.soft_iteration == 0:\n return False\n else:\n return True", "def passed(self):\n return self.is_executed and self.is_executed_ok and self.is_equal_result", "def assign_value(Xj, Xk, csp, assignment):\r\n parent_assignment = assignment[Xj]\r\n for val in csp.curr_domains[Xk]:\r\n if csp.constraints(Xj, parent_assignment, Xk, val):\r\n return val\r\n\r\n # No consistent assignment available\r\n return None", "def is_complete(self) -> bool:\n return (\n (\n self.materialized_subset | self.failed_and_downstream_subset\n ).num_partitions_and_non_partitioned_assets\n == self.target_subset.num_partitions_and_non_partitioned_assets\n )", "def is_evaluated(evalassignment):\n if evalassignment.assignment.document.name == '' or evalassignment.\\\n assignment.assignmentype.deadline_submission > timezone.now():\n return -30\n else:\n if evalassignment.is_questions_graded:\n if evalassignment.grade_evaluation:\n return evalassignment.grade_evaluation\n else:\n return -10\n else:\n return -20", "def test_uc_to_assignment(self):\r\n expected = {'q1': (['A', 'B', 'C'], 1.0, 2),\r\n 'q2': (['A', 'H', 'I', 'J'], 2. / 3., 3),\r\n 'q3': (['Unassigned'], 1.0, 1),\r\n 'q4': (['Unassigned'], 1.0, 1),\r\n 'q5': (['Unassigned'], 1.0, 1)\r\n }\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp}\r\n t = UclustConsensusTaxonAssigner(params)\r\n actual = t._uc_to_assignment(self.uc1_lines)\r\n self.assertEqual(actual, expected)\r\n\r\n # change label for unassignable\r\n expected = {'q1': (['A', 'B', 'C'], 1.0, 2),\r\n 'q2': (['A', 'H', 'I', 'J'], 2. / 3., 3),\r\n 'q3': (['x'], 1.0, 1),\r\n 'q4': (['x'], 1.0, 1),\r\n 'q5': (['x'], 1.0, 1)\r\n }\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp,\r\n 'unassignable_label': 'x'}\r\n t = UclustConsensusTaxonAssigner(params)\r\n actual = t._uc_to_assignment(self.uc1_lines)\r\n self.assertEqual(actual, expected)", "def _check_for_completion(self, node):\n dis=0\n for i in range(node.state.size):\n dis+=(node.state[i]-self.goal.state[i])**2\n\n dis=np.sqrt(dis)\n if(dis<=self.step_size):\n return True\n else: return False", "def done(self):\n try:\n if self.doneCondition(self.currState): return True\n except:\n # Add to log\n print(\"WARNING: doneCondition looks to have error OR is not initialized\")\n\n return False", "def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None", "def test_47_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,447))", "def isComplete(self):\n if self.encryptionGroupBox.isChecked():\n if self.encryptionKeyEdit.text() == \"\":\n complete = False\n else:\n if self.reencryptCheckBox.isChecked():\n complete = (self.encryptionKeyEdit.text() ==\n self.encryptionKeyAgainEdit.text())\n else:\n complete = True\n else:\n complete = True\n \n return complete", "def is_satisfiable(formula: Formula) -> bool:\n # Task 2.5c\n variables = list(sorted(formula.variables()))\n assignment_dict = all_models(list(variables))\n for val in truth_values(formula, assignment_dict):\n if val:\n return True\n return False", "def is_assigned(self, worker_id: str, cycle_id: str):\n return self._worker_cycles.first(worker_id=worker_id, cycle_id=cycle_id) != None", "def check_variable_copy_condition(\n nlp, phase_idx: int, use_from_phase_idx: int, name: str, decision_variable_attribute: str\n ):\n return (\n use_from_phase_idx is not None\n and use_from_phase_idx < phase_idx\n and name in getattr(nlp[use_from_phase_idx], decision_variable_attribute)\n )", "def add_an_assignment(cls):\n os.system('clear')\n while True:\n data = Ui.get_inputs(['Start date\\n\\tday(1-31): ', '\\tmonth(1-12): ', '\\tyear(2000+): ',\n 'End date\\n\\tday(1-31): ', '\\tmonth(1-12): ', '\\tyear(2000+): ',\n 'Assignment name\\n\\t'], \"Please provide the assignment details: \\n\")\n try:\n start_date_day = int(data[0])\n start_date_month = int(data[1])\n start_date_year = int(data[2])\n end_date_day = int(data[3])\n end_date_month = int(data[4])\n end_date_year = int(data[5])\n name_of_assign = str(data[6])\n except ValueError:\n Ui.print_message(\"\\nDate must be an integer!\\n\\n\")\n break\n\n if start_date_day > 31 or start_date_day < 1:\n Ui.print_message('\\nStart day value is incorrect')\n else:\n if start_date_month > 12 or start_date_month < 1:\n Ui.print_message('\\nStart month value is incorrect')\n else:\n if start_date_year > 9999 or start_date_year < 2000:\n Ui.print_message('\\nStart year value is incorrect')\n else:\n if end_date_day > 31 or end_date_day < 1:\n Ui.print_message('\\nEnd day value is incorrect')\n else:\n if end_date_month > 12 or end_date_month < 1:\n Ui.print_message('\\nEnd month value is incorrect')\n else:\n if end_date_year > 9999 or end_date_year < 1000:\n Ui.print_message('\\nEnd year value is incorrect')\n else:\n if len(name_of_assign) <= 1:\n Ui.print_message(\"\\nAssignment name have to be longer!\")\n else:\n list_of_names_of_assignments = []\n for i in Assignments.assignments_list:\n list_of_names_of_assignments.append(i.assignment_name)\n if name_of_assign in list_of_names_of_assignments:\n Ui.print_message(\"\\nAssignment name already exist, \"\n \"type another one!\")\n else:\n start_date = '{}-{}-{}'.format(start_date_year,\n start_date_month,\n start_date_day)\n end_date = '{}-{}-{}'.format(end_date_year,\n end_date_month,\n end_date_day)\n new_assignment = cls(start_date, end_date, name_of_assign)\n Assignments.assignments_list.append(new_assignment)\n Ui.print_message(\"\\nAssignment added!\\n\")\n Ui.get_inputs([''], \"Click enter to go back\")\n break # it stops the WHILE loop whenever passed information is incorrect, or assignment has been added" ]
[ "0.8660917", "0.7774996", "0.7236496", "0.715314", "0.6777561", "0.67663616", "0.6716149", "0.6668017", "0.6563448", "0.65057194", "0.64147335", "0.6396104", "0.6369181", "0.63129765", "0.63119334", "0.6073311", "0.6054601", "0.603511", "0.6031876", "0.5906102", "0.5893759", "0.5834775", "0.58189565", "0.5803393", "0.5753126", "0.56860167", "0.56567794", "0.5616562", "0.5597932", "0.55476034", "0.55349374", "0.5502036", "0.5442483", "0.54257125", "0.54251164", "0.5386905", "0.5356433", "0.53537357", "0.5352315", "0.53496486", "0.5288254", "0.5282782", "0.5272664", "0.5257124", "0.52427065", "0.5231361", "0.5228852", "0.5220992", "0.52146167", "0.5189411", "0.51813376", "0.5179374", "0.51552695", "0.51173854", "0.51086354", "0.5107649", "0.50892186", "0.50831074", "0.508216", "0.50756633", "0.5070073", "0.5065957", "0.50608295", "0.50582826", "0.5050397", "0.5042477", "0.5041276", "0.5039917", "0.50337684", "0.50243175", "0.50211626", "0.50185317", "0.5003243", "0.4997279", "0.4996392", "0.4987445", "0.49818665", "0.4977783", "0.4973448", "0.49733877", "0.49709395", "0.49702573", "0.49537027", "0.49536788", "0.49393794", "0.4936841", "0.4920291", "0.49195352", "0.49085668", "0.49051026", "0.4897632", "0.48942015", "0.4889904", "0.4876297", "0.4866493", "0.48623696", "0.48445946", "0.48423362", "0.48406896", "0.48276576" ]
0.81902444
1
Return True if `assignment` is consistent (i.e., words fit in crossword puzzle without conflicting characters); return False otherwise.
def consistent(self, assignment): # print("Entered consistent Function") # print("assignment") # print(assignment) overlaps = self.crossword.overlaps value_set = set() for variable in assignment: #checking overlaps with neighbors neighbors = self.crossword.neighbors(variable) for neighbor in neighbors: overlap = overlaps[(variable, neighbor)] if (neighbor in assignment): # print("var 1 overlap letter") # print(assignment[variable][overlap[0]]) # print("var 2 overlap letter") # print(assignment[neighbor][overlap[1]]) if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]): return False # print("neighbors") # print(neighbors) #checking that the assignment is the correct length for the variable if (variable.length != len(assignment[variable])): return False #the set to check for distinct variables later value_set.add(assignment[variable]) #Checking that all variables are distinct #these should be the same length unless two or more variables share an value if( len(value_set) is not len(assignment)): return False return True # raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def consistent(self, assignment):\n # for each of the current assignments\n for word in assignment:\n # if the word does not fit in the gaps\n if len(assignment[word]) != word.length:\n # reject attempt\n return False\n # if the word is already in the assignment\n if list(assignment.values()).count(assignment[word]) > 1:\n # reject attempt\n return False\n # for each of the overlaps\n for overlap in self.crossword.overlaps:\n # if the overlap isn't empty and is an overlap for the word\n # overlaps are a superset: if the overlap of (x, y) is in the set, so is (y, x), so we can just go by the first overlap element\n if self.crossword.overlaps[overlap] is not None and overlap[0] == word:\n # try to access the word assignment for the other overlap target\n try:\n test_word = assignment[overlap[1]]\n # if it does not exist in the assignment\n except KeyError:\n # continue to the next overlap\n continue\n # if the other overlap target has been assigned\n else:\n # extract the letter we want to match for the overlap\n test_letter = test_word[self.crossword.overlaps[overlap][1]]\n # if the letters do not match\n if assignment[word][self.crossword.overlaps[overlap][0]] != test_letter:\n # reject attempt\n return False\n return True", "def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True", "def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True", "def assignment_complete(self, assignment):\n if len(assignment) == len(self.domains):\n return True\n\n else:\n return False", "def assignment_complete(self, assignment):\n # print(\"Entered assignment_complete Function\")\n for var in assignment:\n if assignment[var] is None:\n return False\n return self.consistent(assignment)\n\n # raise NotImplementedError", "def consistent(self,assignment):\n return all(con.holds(assignment)\n for con in self.constraints\n if all(v in assignment for v in con.scope))", "def isAssignment(self):\n return _libsbml.Rule_isAssignment(self)", "def is_assignment(*args):\n return _ida_hexrays.is_assignment(*args)", "def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False", "def assignment(self):\n shards = self.line.split('=')\n if len(shards) == 2:\n return True", "def backtrack(self, assignment):\n # As stated above, if all variables in assignment is 1\n # then all values have been set and we return assignment \n if all(len(l) == 1 for l in assignment.values()):\n return assignment\n\n # Pick the next unnassigned variable that we are going to check \n key, values = self.select_unassigned_variable(assignment)\n # Loop through all the allowed values of this square in the sudoku board\n for value in values:\n # Do a deepcopy cuz otherwise R.I.P\n deep = copy.deepcopy(assignment)\n # Checks if this current value is consistent with the rest\n # of the sudoku board \n if self.check_consistency(deep, key, value):\n # IF it is consistent then we set this square to have this value \n deep[key] = [value]\n # Do inference check for hyper optimized code\n if self.inference(deep, self.get_all_arcs()):\n self.counter += 1\n result = self.backtrack(deep)\n if result is not False:\n return result\n else:\n self.fails += 1\n else:\n # Continue looping through the values of the currently selected \n # sudoku-square if the value was inconsistent with the board \n continue\n return False", "def isLegal(self):\n # checks for same values in rows\n for n in range(9):\n rows = set()\n for m in range(9):\n if self.puzzle[n][m] != 0:\n size = len(rows)\n rows.add(self.puzzle[n][m])\n if size == len(rows):\n return False\n\n #checks for same values in columns\n for m in range(9):\n cols = set()\n for n in range(9):\n if self.puzzle[n][m] != 0:\n size = len(cols)\n cols.add(self.puzzle[n][m])\n if size == len(cols):\n return False\n\n #checks for same values in sections\n sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n for r in sections:\n for c in sections:\n sects = set()\n for n in r:\n for m in c:\n if self.puzzle[n][m] != 0:\n size = len(sects)\n sects.add(self.puzzle[n][m])\n if size == len(sects):\n return False\n return True", "def goal_test(self, state):\r\n assignment = dict(state)\r\n return (len(assignment) == len(self.variables)\r\n and all(self.nconflicts(variables, assignment[variables], assignment) == 0\r\n for variables in self.variables))", "def check_assignment_consistency(self, assign_df=None, threshold=0.1):\n \n # If the user hasn't specified an assign_df, use one already calculated \n # for this NAPS_assigner instance\n if assign_df is None:\n set_assign_df = True\n assign_df = self.assign_df\n else:\n set_assign_df = False\n \n # First check if there are any sequential atoms\n carbons = pd.Series([\"C\",\"CA\",\"CB\"])\n carbons_m1 = carbons + \"m1\"\n seq_atoms = carbons[carbons.isin(assign_df.columns) & \n carbons_m1.isin(assign_df.columns)]\n seq_atoms_m1 = seq_atoms+\"m1\"\n #seq_atoms = list(seq_atoms)\n \n if seq_atoms.size==0:\n # You can't do a comparison\n assign_df[\"Max_mismatch_prev\"] = np.NaN\n assign_df[\"Max_mismatch_next\"] = np.NaN\n assign_df[\"Num_good_links_prev\"] = np.NaN\n assign_df[\"Num_good_links_next\"] = np.NaN\n return(assign_df)\n else:\n # First, get the i and i-1 shifts for the preceeding and \n # succeeding residues\n tmp = assign_df.copy()\n tmp = tmp.loc[tmp[\"Dummy_res\"]==False,]\n tmp.index = tmp[\"Res_N\"]\n tmp = tmp[list(seq_atoms)+list(seq_atoms_m1)]\n tmp_next = tmp.copy()\n tmp_next.index -= 1\n tmp_prev = tmp.copy()\n tmp_prev.index += 1\n tmp = tmp.join(tmp_next, rsuffix=\"_next\")\n tmp = tmp.join(tmp_prev, rsuffix=\"_prev\")\n # Calculate mismatch for each atom type\n for atom in seq_atoms:\n tmp[\"d\"+atom+\"_prev\"] = tmp[atom+\"m1\"] - tmp[atom+\"_prev\"]\n tmp[\"d\"+atom+\"_next\"] = tmp[atom] - tmp[atom+\"m1_next\"]\n # Calculate maximum mismatch\n tmp[\"Max_mismatch_prev\"] = tmp[\"d\"+seq_atoms+\"_prev\"].max(axis=1, \n skipna=True)\n tmp[\"Max_mismatch_next\"] = tmp[\"d\"+seq_atoms+\"_next\"].max(axis=1,\n skipna=True)\n \n # Calculate number of consistent matches\n tmp[\"Num_good_links_prev\"] = (tmp[\"d\"+seq_atoms+\"_prev\"]<threshold).sum(axis=1)\n tmp[\"Num_good_links_next\"] = (tmp[\"d\"+seq_atoms+\"_next\"]<threshold).sum(axis=1)\n \n # Join relevant columns back onto assign_df\n tmp[\"Res_N\"] = tmp.index\n assign_df = assign_df.join(tmp.loc[:,[\"Max_mismatch_prev\", \n \"Max_mismatch_next\", \n \"Num_good_links_prev\", \n \"Num_good_links_next\"]], \n on=\"Res_N\")\n if set_assign_df:\n self.assign_df = assign_df\n return(assign_df)", "def isAssigned(self):\n if self.getProton1Assignments() and self.getProton2Assignments():\n return 1\n else:\n return 0", "def win_condition(self):\n if self.letters_wrong < 5:\n if '__ ' in self.new_string:\n return False\n else:\n return True\n else:\n return True", "def FullCheck(field):\n temp_list = field[:]\n field_copy = field[:]\n if temp_list == Transform(field_copy, \"w\"):\n if temp_list == Transform(field_copy, \"a\"):\n if temp_list == Transform(field_copy, \"s\"):\n if temp_list == Transform(field_copy, \"d\"):\n return True\n return False", "def _consistentWithWA_(self, span, lan):\n\t\tif lan == 'src':\n\t\t\twordAlign = self.waMatrix\n\t\telse:\n\t\t\twordAlign = [[self.waMatrix[i][j] for i in xrange(len(self.waMatrix))] for j in xrange(len(self.waMatrix[0]))] \n\n\t\tpos1 = [j for i in xrange(span[0], span[1]) for j in xrange(len(wordAlign[i])) if wordAlign[i][j] == 1]\n\t\tif pos1 == []: return True\n\n\t\tfor i in xrange(span[0], span[1]):\n\t\t\tfor j in xrange(min(pos1), max(pos1) + 1):\n\t\t\t\tif sum([wordAlign[row][j] for row in xrange(len(wordAlign[:span[0]]))]) == 0 and \\\n\t\t\t\t\t\tsum([wordAlign[row][j] for row in xrange(span[1], len(wordAlign))]) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\t\t#print >> debug_log, 'consistent:', span\n\t\treturn True", "def check_correctness(self):\n\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = gt_file.readlines()\n\n # Check for inequality\n if len(out_lines) != len(gt_lines):\n return 0\n\n # Check for inequality\n for i in range(len(out_lines)):\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n\n return 1", "def viableWord(self, word, filledInSpaces):\r\n \r\n # Check if it fits blanks\r\n for (index, letter) in filledInSpaces:\r\n if letter != word[index]:\r\n return False\r\n\r\n # Check if it fits unused\r\n for letter in word:\r\n if letter in self.wrongLetters:\r\n return False\r\n\r\n return True", "def checkPermutation(string1, string2):\n string1_content = {}\n # Hash the first string\n for i in string1:\n if string1_content.get(i) is None:\n string1_content[i] = 1\n else:\n string1_content[i] += 1\n\n # For each character in the section string, search for it\n for i in string2:\n if string1_content.get(i) is None:\n return False\n string1_content[i] -= 1\n\n # Make sure every character in the first string had a matching character in the second string\n for key, value in string1_content.items():\n if value != 0:\n return False\n return True", "def inconsistent(p, guesses):\n for guess in guesses:\n res = check(guess[0], p)\n (rightly_positioned, permutated) = guess[1]\n if res != [rightly_positioned, permutated]:\n return True # inconsistent\n return False # i.e. consistent", "def check_combination(self, combination):\n\n # we first check if there are any pieces of the right value well placed.\n for j in range(0, 4):\n if combination[j] == self.answer[j]:\n self.try_return['well_placed'] += 1\n self.already_checked += [combination[j]]\n self.avoid += [j]\n\n for p in range(0, 4):\n for s in range(0, 4):\n if not p in self.avoid:\n if combination[s] == self.answer[p] and not combination[s] in self.already_checked:\n\n self.try_return['misplaced'] += 1\n self.duplicate += [combination[s]]\n if self.duplicate.count(combination[s]) > 1:\n self.try_return['misplaced'] -= 1", "def check_permutation_of(string1,string2):\n if len(string1) != len(string2): #O(1)\n return False\n return collections.Counter(string1) == collections.Counter(string2) #O(n+n) to make the dictionaries\n #O(n+n) to compare equality?\n #so O(4n) == O(n).", "def same_as(self, space, in_space):\n if self.marks == space.marks and self.genus == space.genus:\n return True\n space = space.complementary_component(in_space)\n if self.marks == space.marks and self.genus == space.genus:\n return True\n return False", "def backtrack(self, assignment):\n # if the assignment is complete\n if self.assignment_complete(assignment):\n # return the assignment, crossword is complete\n return assignment\n # pick a variable to try to assign\n var = self.select_unassigned_variable(assignment)\n # for each value in the variable's domain\n for value in self.order_domain_values(var, assignment):\n # attempt to assign this value and fit it into the crossword\n # make a copy of the current assignments\n trial = assignment.copy()\n # add the trial value to the test assignment\n trial[var] = value\n # if the test assignment is consistent\n if self.consistent(trial):\n # add the trial assignment to the current list of assignments\n assignment[var] = value\n # take the next backtrack step with this new assign,ent\n result = self.backtrack(assignment)\n # if the backtrack is a success\n if result is not None:\n # we have a match\n return result\n # a backtrack further down failed, so remove the trial assignment\n assignment.pop(var)\n # no assignment was possible, return None\n return None", "def test_get_consensus_assignment_overlapping_names(self):\r\n # here the 3rd level is different, but the 4th level is the same\r\n # across the three assignments. this can happen in practice if\r\n # three different genera are assigned, and under each there is\r\n # an unnamed species\r\n # (e.g., f__x;g__A;s__, f__x;g__B;s__, f__x;g__B;s__)\r\n # in this case, the assignment should be f__x.\r\n in1 = [['Ab', 'Bc', 'De', 'Jk'],\r\n ['Ab', 'Bc', 'Fg', 'Jk'],\r\n ['Ab', 'Bc', 'Hi', 'Jk']]\r\n\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp}\r\n expected = (['Ab', 'Bc'], 1., 3)\r\n t = UclustConsensusTaxonAssigner(params)\r\n self.assertEqual(t._get_consensus_assignment(in1),\r\n expected)\r\n\r\n # here the third level is the same in 4/5 of the\r\n # assignments, but one of them (z, y, c) refers to a\r\n # different taxa since the higher levels are different.\r\n # the consensus value should be 3/5, not 4/5, to\r\n # reflect that.\r\n in2 = [['a', 'b', 'c'],\r\n ['a', 'd', 'e'],\r\n ['a', 'b', 'c'],\r\n ['a', 'b', 'c'],\r\n ['z', 'y', 'c']]\r\n expected = (['a', 'b', 'c'], 0.6, 5)\r\n t = UclustConsensusTaxonAssigner(params)\r\n self.assertEqual(t._get_consensus_assignment(in2),\r\n expected)", "def is_permutation(a, b):\n a, b = str(a), str(b)\n return(len(a) == len(b) and Counter(a) == Counter(b))", "def check_win(puzzle: str, solution: str) -> bool:\r\n # Check if every character besides the last is the same\r\n return puzzle[:-1] == solution[:-1]", "def _does_words_matches(original_word: str, encoded_word: str) -> bool:\n return(\n len(original_word) == len(encoded_word) and\n original_word[0] == encoded_word[0] and\n original_word[-1] == encoded_word[-1] and\n sorted(original_word[1:-1]) == sorted(encoded_word[1:-1])\n )", "def check_partition(self, sectioned_text, full_text):\n\n restitched_text = self.restitch_text(sectioned_text)\n\n length_check = (len(restitched_text) == len(full_text))\n\n return length_check", "def keep_cross_validation_fold_assignment(self):\n return self._parms.get(\"keep_cross_validation_fold_assignment\")", "def permutations_equal(str_a, str_b):\n if not isinstance(str_a, str) or not isinstance(str_b, str):\n raise AttributeError(\"inputs must be valid str\")\n if len(str_a) != len(str_b):\n return False\n\n return Counter(str_a) == Counter(str_b)", "def check_assignment(assignments: dict, point: Point, value: str) -> bool:\n\n # check base condition: do the constraints hold for current point\n if not check_constraint_satisfied(assignments, point, value):\n print(' → base constraint failed:', point, '=', value)\n return False\n\n # check neighbouring conditions: do the constraints (still) hold for other points\n temp_assignment = copy.deepcopy(assignments)\n temp_assignment[point] = value\n\n # loop through points that can attack the current point, as kings\n print(' > checking neighbouring kings')\n for pt in filter(lambda p: p in assignments and assignments[p] == 'king', attack_points_king[point]):\n if not check_constraint_satisfied(temp_assignment, pt, assignments[pt]):\n print(' → neighbouring constraint failed for neighbour', pt, '=', assignments[pt])\n return False\n\n # loop through points that can attack the current point, as knights\n print(' > checking neighbouring knights')\n for pt in filter(lambda p: p in assignments and assignments[p] == 'knight', attack_points_knight[point]):\n if not check_constraint_satisfied(temp_assignment, pt, assignments[pt]):\n print(' → neighbouring constraint failed for neighbour', pt, '=', assignments[pt])\n return False\n\n # all constraints are satisfied!\n return True", "def _spaceEfficientHasRepeatCharacters(check_string):\n for i in range(len(check_string)):\n for j in range(len(check_string)):\n if check_string[i] == check_string[j] and i != j:\n return True\n return False", "def is_assignment_modified(self, updates, original):\n if 'assigned_to' not in updates:\n return False\n updates_assigned_to = updates.get('assigned_to') or {}\n original_assigned_to = original.get('assigned_to') or {}\n return updates_assigned_to.get('desk') != original_assigned_to.get('desk') or \\\n updates_assigned_to.get('user') != original_assigned_to.get('user') or \\\n updates_assigned_to.get('contact') != original_assigned_to.get('contact')", "def is_solved(self):\n return self._from_word == self._to_word", "def is_complete(self, variables):\n for var in variables:\n if not self.has_assignment_for(var):\n return False\n\n return True", "def checkPermutation(s, t):\n\n # Count each unique letter in both strings and compare the two dicts.\n s_count = {}\n t_count = {}\n for character in s:\n s_count[character] = s_count.get(character, 0) + 1\n\n for character in t:\n t_count[character] = t_count.get(character, 0) + 1\n\n return s_count == t_count\n\n # Time Complexity: O(n)\n # Space Complexity: O(n)", "def consistent(self):\n if self.var1.get_value() is None or self.var2.get_value() is None:\n return True\n\n return self.var1.value != self.var2.value", "def uses_all(w, letters):\n\treturn set(letters).issubset(set(w))", "def string_permutation(self, a,b):\n for c in a:\n if c not in b:\n return False\n return True", "def is_cyclic_conjugate(self, w):\n l1 = len(self)\n l2 = len(w)\n if l1 != l2:\n return False\n w1 = self.identity_cyclic_reduction()\n w2 = w.identity_cyclic_reduction()\n letter1 = w1.letter_form\n letter2 = w2.letter_form\n str1 = ' '.join(map(str, letter1))\n str2 = ' '.join(map(str, letter2))\n if len(str1) != len(str2):\n return False\n\n return str1 in str2 + ' ' + str2", "def is_permutation3(A, B, C):\n return set(A) == set(B) == set(C)", "def test_backward_compatibility_for_multiple_answers(self):\r\n\r\n answers = [\"Second\", \"Third\", \"Fourth\"]\r\n problem = self.build_problem(answer=\"_or_\".join(answers), case_sensitive=True)\r\n\r\n for answer in answers:\r\n # Exact string should be correct\r\n self.assert_grade(problem, answer, \"correct\")\r\n # Other strings and the lowercase version of the string are incorrect\r\n self.assert_grade(problem, \"Other String\", \"incorrect\")\r\n\r\n problem = self.build_problem(answer=\"_or_\".join(answers), case_sensitive=False)\r\n for answer in answers:\r\n # Exact string should be correct\r\n self.assert_grade(problem, answer, \"correct\")\r\n self.assert_grade(problem, answer.lower(), \"correct\")\r\n self.assert_grade(problem, \"Other String\", \"incorrect\")", "def is_full(self):\n return set(self._parent.letters()) == set(self.winners())", "def fold_assignment(self):\n return self._parms.get(\"fold_assignment\")", "def __eq__(self, c):\n for i in range(SPACE_DIMENSION):\n if self[i]!=c[i]:\n return False\n return True", "def uses_only(w, letters):\n\treturn set(w).issubset(set(letters))", "def _validate_word(self, word):\n return type(word) == type('a') and set(self._letters) == set(list(word))", "def isInconsistent(self, problemname : str) -> bool:\n return problemname in self.inconsistentset", "def is_solved(bd):\n \"\"\" CONSTRAINT: Assumes board is valid\"\"\"\n count = 0\n for pos in bd:\n if pos == \" \":\n count += 1\n else:\n continue\n if count > 0:\n return False\n else:\n return True", "def are_same(string_window, pat_window):\n for i in range(256):\n if string_window[i] != pat_window[i]:\n return False\n return True", "def antecedants_matched(self, datacase):\n for item in self.cond_set:\n if datacase[item] != self.cond_set[item]:\n return False\n return True", "def isUnique(self, word):\n if len(word) <= 1:\n n = word\n else:\n n = word[0] + str(len(word) - 2) + word[-1] #Get the abbrviation.\n if n not in self.abbrdict or (self.abbrdict[n] == 1 and word in self.origdict): #If it is not in abbrdict or the abbrevation count is 1 and the word has appeared in dictionary, return true.\n return True\n else: #Otherwise, return false.\n return False", "def test_check_ambigous(self):\r\n\r\n flow0 = Flowgram(\"\")\r\n flow1 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 1.23 0.0 3.1\")\r\n flow2 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.23 0.0 3.1\")\r\n flow3 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.0 0.0 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 0.0 0.0 1.23 0.0 3.1\")\r\n\r\n self.assertEqual(check_ambigous(flow0, 4), False)\r\n self.assertEqual(check_ambigous(flow1, 4), False)\r\n self.assertEqual(check_ambigous(flow2, 4), True)\r\n self.assertEqual(check_ambigous(flow2, 7), True)\r\n self.assertEqual(check_ambigous(flow2, 8), False)\r\n self.assertEqual(check_ambigous(flow3, 3), True)\r\n self.assertEqual(check_ambigous(flow3, 4), False)", "def answer_ok(a):\n (rightly_positioned, permutated) = a\n if (rightly_positioned + permutated > number_of_positions) \\\n or (rightly_positioned + permutated < len(colours) - number_of_positions):\n return False\n if rightly_positioned == 3 and permutated == 1:\n return False\n return True", "def backtrack(csp):\n\n if len(csp.assignment) == len(csp.variables):\n return True\n\n variable = select_unassigned_variable(csp)\n value = order_domain_values(csp, variable)\n #print variable\n #print value\n flag = 0\n for x in value:\n csp.variables.begin_transaction()\n if is_consistent(csp, variable, x):\n #print \"past is_consistent\"\n for var in csp.variables:\n if var == variable:\n var.assign(x)\n var.is_assigned()\n solution = backtrack(csp)\n if solution != False:\n return True\n csp.variables.rollback()\n return False", "def check_strings(aword, anotherword):\n if aword == anotherword:\n return True\n else:\n return False", "def _check_if_satisfiable(self):\n # Search for a satisfying assignment\n all_variables = self.all_variables()\n\n # Try to find some assignment of the constrained vars\n counter = count()\n next_count = next(counter)\n queue = [(0, 0, next_count, {})]\n\n while queue:\n num_attempts, _, _, assignments = hq.heappop(queue)\n num_attempts += 1\n # Full assignment?\n # keep out of loop for empty constraint edge case\n if len(assignments) == len(all_variables):\n return True\n for v in sorted(all_variables - set(assignments.keys())):\n if isinstance(v, DiscreteVariable):\n possible_assignments = self.get_possible_assignments(v)\n else:\n possible_assignments = [v.sample() \\\n for _ in range(10*(1+num_attempts))]\n for assignment in possible_assignments:\n new_assignments = assignments.copy()\n new_assignments[v] = assignment\n # Constraint violated\n if not self.check(new_assignments):\n continue\n # Finish early\n if len(new_assignments) == len(all_variables):\n return True\n next_count = next(counter)\n hq.heappush(queue, (num_attempts, -len(new_assignments),\n -next_count, new_assignments))\n\n if next_count > gc.max_satisfy_tries:\n import ipdb; ipdb.set_trace()\n break\n\n return False", "def arrayStringsAreEqual1(self, word1: List[str], word2: List[str]) -> bool:\n word1str = ''.join(word1)\n word2str = ''.join(word2)\n return word1str == word2str", "def isValidTest(self):\n if not self.hasError():\n return False\n distance = dameraulevenshtein(self.word, self.error) \n if(distance > 1):\n return False\n regex = '.*[^a-zA-Z].*'\n if re.match(regex, self.word) or re.match(regex, self.error):\n return False\n return True", "def test_equality(self):\n\n # change .phones\n pw1: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n pw2: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"P\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n self.assertNotEqual(pw1, pw2)\n\n # change .stress_pattern\n pw1: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n pw2: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n ],\n )\n self.assertNotEqual(pw1, pw2)", "def __forward_check(self, assigned_var, assigned_value, unassigned_vars):\n for unassigned_neighbor in self.__unassigned_neighbors(assigned_var, unassigned_vars):\n consistent_values = self.__consistent_domain_values(assigned_var, assigned_value, unassigned_neighbor)\n if len(consistent_values) == 0:\n return False\n else:\n unassigned_neighbor.domain = consistent_values\n return True", "def isUnique(self, word):\n if len(word) < 3:\n abbrev = word\n else:\n abbrev = word[0] + str(len(word) - 2) + word[-1]\n if not abbrev in self.abbrev_dict:\n return True\n elif word in self.abbrev_dict[abbrev] and len(self.abbrev_dict[abbrev]) == 1:\n return True\n else:\n return False", "def test_is_strict(self):\n assert self.RNA(\"\").is_strict()\n assert self.PROT(\"A\").is_strict()\n assert self.RNA(\"UAGCACUgcaugcauGCAUGACuacguACAUG\").is_strict()\n assert not self.RNA(\"CAGUCGAUCA-cgaucagUCGAUGAC\").is_strict()", "def handle_same_length(self, a, b):\n found = False\n for i, j in zip(a, b):\n if i == j:\n continue\n elif found:\n return False # this case is the second found edit, thus return false\n else:\n found = True\n return True", "def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0", "def _testit(words):\n w_list = list(words)\n pairs = defaultdict(lambda: [0, 0])\n if not _is_component(w_list):\n return False\n for word in w_list:\n pairs[word[0].lower()][0] += 1\n pairs[word[-1].lower()][1] += 1\n lst = sorted([pair[0] - pair[1] for pair in pairs.values()])\n return all(i == 0 for i in lst[1:-1]) and \\\n lst[-1] <= 1 and sum(lst[::len(lst) - 1]) == 0", "def checker(self) -> bool:\n checker = 0\n if len(self._puzzle) == 4:\n for i, num in enumerate(self._puzzle):\n if num == Puzzle.solution[checker]:\n if i == checker:\n checker +=1\n if checker == 4:\n return True\n else:\n return False", "def test_uc_to_assignment(self):\r\n expected = {'q1': (['A', 'B', 'C'], 1.0, 2),\r\n 'q2': (['A', 'H', 'I', 'J'], 2. / 3., 3),\r\n 'q3': (['Unassigned'], 1.0, 1),\r\n 'q4': (['Unassigned'], 1.0, 1),\r\n 'q5': (['Unassigned'], 1.0, 1)\r\n }\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp}\r\n t = UclustConsensusTaxonAssigner(params)\r\n actual = t._uc_to_assignment(self.uc1_lines)\r\n self.assertEqual(actual, expected)\r\n\r\n # change label for unassignable\r\n expected = {'q1': (['A', 'B', 'C'], 1.0, 2),\r\n 'q2': (['A', 'H', 'I', 'J'], 2. / 3., 3),\r\n 'q3': (['x'], 1.0, 1),\r\n 'q4': (['x'], 1.0, 1),\r\n 'q5': (['x'], 1.0, 1)\r\n }\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp,\r\n 'unassignable_label': 'x'}\r\n t = UclustConsensusTaxonAssigner(params)\r\n actual = t._uc_to_assignment(self.uc1_lines)\r\n self.assertEqual(actual, expected)", "def test_is_consistent(self):\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Bob\", \"12345\")\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Mary\", \"012345\")\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Sue\", \"12345\") # identical to Bob\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Sue\", \"123\") # prefix of Bob\n self.assertTrue(self.phonebook.is_consistent())", "def _transition_possible_test_(self, word_in):\n if self._transition_possible_epsilon_(word_in):\n return False\n word_in_transposed = wordoftuples_to_tupleofwords(word_in)\n return all(self.compare_to_tape(track_number, word)\n for track_number, word in enumerate(word_in_transposed))", "def has_assignment_for(self, var):\n return self.variable_to_value.get(var) != None", "def has_palindrome_permutation(given_string):\n\n unpaired_characters = set()\n\n for char in given_string:\n if char in unpaired_characters:\n unpaired_characters.remove(char)\n else:\n unpaired_characters.add(char) \n\n return len(unpaired_characters) <= 1", "def is_permutation(A, B):\n return set(A) == set(B)", "def test_save_assignment_file(self):\n\n results = GenomePropertiesResultsWithMatches(*self.test_genome_property_results, properties_tree=self.test_tree)\n\n engine = self.engine\n results.to_assignment_database(engine)\n\n assignment_caches = load_assignment_caches_from_database_with_matches(engine)\n new_results = GenomePropertiesResultsWithMatches(*assignment_caches, properties_tree=self.test_tree)\n\n self.assertEqual(results.sample_names, new_results.sample_names)\n self.assertEqual(results.property_results.equals(new_results.property_results), True)\n self.assertEqual(results.step_results.equals(new_results.step_results), True)\n self.assertEqual(results.step_matches.equals(new_results.step_matches), True)", "def can_make_word(word, letters):\n grouped_chars = group_input(word)\n for char in letters:\n\n if is_empty(grouped_chars):\n return True\n\n if char in grouped_chars and grouped_chars[char] > 0:\n grouped_chars[char] -= 1\n\n return is_empty(grouped_chars)", "def permutation_is_valid(permutation):\n pass", "def palindrome_permutation(w):\n w = w.strip().replace(' ', '')\n chars = {}\n for c in w:\n try:\n chars[c] += 1\n except KeyError:\n chars[c] = 1\n\n if len(w) % 2 == 0:\n #Check if there is an even number\n #of every character in w.\n return all(x % 2 == 0 for x in chars.values()) \n else:\n #Check if there is an even number\n #of every character in w,\n #except for exactly one character.\n found_odd = False\n for c in chars:\n if chars[c] % 1 == 0:\n if not found_odd:\n found_odd = True\n else:\n return False\n \n if found_odd:\n return True\n else:\n return False", "def verify(self, word):\n if len(word) < 2:\n return (True, word)\n\n if word.lower() in self.replacement_words.keys():\n return (True, self.replacement_words[word.lower()])\n\n if word.lower() in self.word_list:\n return (True, word)\n\n if word.lower() in self.ignored_words:\n return (True, word)\n\n return (False, word)", "def is_equivalence(self) -> bool:", "def isWordSet(self):\n return len(self.getWord()) != 0", "def check(self, s: str, mem: dict):\n dp = [False for _ in range(len(s)+1)]\n dp[0] = True\n for i in range(1, len(s)+1):\n for j in range(i):\n if dp[j] and s[j:i] in mem:\n dp[i] = True\n return dp[-1]", "def _is_duplicate(a: str, b: str) -> bool:\n la = len(a)\n lb = len(b)\n diff = abs(la - lb)\n if diff > 50:\n return False\n denom = min(la, lb) + diff / 2\n ratio = levenshtein(a.casefold(), b.casefold()) / denom\n return ratio < 0.1", "def chars_are_equal(new_char, old_char):\n new_token = Token.Aborted if grayed else new_char.token\n\n # We ignore z-index, that does not matter if things get painted.\n return new_char.char == old_char.char and new_token == old_char.token", "def permutation_strings(input, input_two):\n if len(input) != len(input_two):\n return False\n else:\n return sorted(input) == sorted(input_two)", "def check_correctness(self, expected, got):\n expected_lines = expected.strip().splitlines()\n got_lines = got.strip().splitlines()\n if len(got_lines) != len(expected_lines):\n return False\n else:\n for exp, got in zip(expected_lines, got_lines):\n if self.params['strictwhitespace']:\n if exp.rstrip() != got.rstrip():\n return False\n else:\n if exp.strip().split() != got.strip().split():\n return False\n return True", "def isUnique(self, word):\n abbr = self.get_abbr(word)\n if abbr not in self.abbr:\n return True\n elif len(self.abbr[abbr]) == 1 and word == self.abbr[abbr][0]:\n return True\n else:\n return False", "def is_pair_allowed(a, b):\n if a == complementary(b):\n return True\n if a == 'G' and b == 'U' or a == 'U' and b == 'G':\n return True\n return False", "def check_match_over(self) -> bool:\n # check if programs filled\n self.program_is_full = [any(~np.isnan(program[:self.program_slots[i]]))\n for i, program in enumerate(self.program_matches)]\n if np.all(self.program_is_full):\n return True\n\n # check if candidates matched\n # I think this is inaccurate -- a candidate can tentatively match but not\n # fully match?\n self.applicant_is_matched = self.applicant_matches != -1\n if np.all(self.applicant_is_matched):\n return True\n\n # check for overlap between unfilled programs and unmatched applicants\n for i, program_matched in enumerate(self.program_is_full):\n if not program_matched:\n for j, applicant_matched in enumerate(self.applicant_is_matched):\n if not applicant_matched:\n if i in self.applicant_ranks[j] and j in self.program_ranks[i]:\n return True\n\n # otherwise, match isn't over\n return False", "def check_grid(self,word_id, words, high_conf):\r\n assign = None\r\n max_satisfied = 0\r\n possible_words = words[:]\r\n for word in words:\r\n if high_conf:\r\n assign = word \r\n num_of_satisfied = 0\r\n constraints = self.words[word_id].constraints\r\n for constraint in constraints:\r\n if word_id == constraint.word1:\r\n if len(self.words[constraint.word2].word) != 0:\r\n flag = constraint.check_constraint(word,self.words[constraint.word2].word)\r\n if not flag:\r\n possible_words.remove(word)\r\n break\r\n num_of_satisfied += 1\r\n elif word_id == constraint.word2:\r\n if len(self.words[constraint.word1].word) != 0:\r\n flag = constraint.check_constraint(self.words[constraint.word1].word,word)\r\n if not flag:\r\n possible_words.remove(word)\r\n break\r\n num_of_satisfied += 1\r\n if word in possible_words:\r\n if num_of_satisfied > max_satisfied:\r\n assign = word\r\n max_satisfied = num_of_satisfied\r\n \r\n if len(possible_words) == 0:\r\n return False, None\r\n elif max_satisfied > 0 or high_conf: \r\n return True, assign\r\n else:\r\n return True, None", "def check_duplicate(triple: str, result: List[str]) -> bool:\n fields = triple.strip().split(', ')\n assert len(fields) == 13\n assert fields[9] == 'BERT'\n psuedo_triple = fields[:11]\n psuedo_triple[9] = 'RELEVANCE'\n return ', '.join(psuedo_triple) in result", "def is_associative(self, s):\n base = range(self.cardinality)\n return all(self.operations[s][self.operations[s][x][y]][z] ==\n self.operations[s][x][self.operations[s][y][z]] \n for x in base for y in base for z in base)", "def test_assign_seqs_exceeds_error_correction_unassigned(self):\r\n\r\n # Handles single fasta and single qual, disabled bc correction,\r\n # writes unassigned sequences, retains barcodes, starts enumeration\r\n # at 1000, generic 12 base pair barcode type.\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_with_bc_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n file_data['unassigned_seqs_f'] = FakeOutFile()\r\n file_data['unassigned_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = True\r\n barcode_type = 12\r\n max_bc_errors = 1.5\r\n start_index = 1000\r\n write_unassigned_reads = True\r\n disable_bc_correction = True\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s2_1002 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nAGCAGCACTTGTGACCGATTACGATAACG\\n'\r\n expected_demultiplexed_qual_seq = '>s2_1002 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n30 27 11 16 30 19 13 19 16 15 24 12 10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n expected_unassigned_fasta_seq = '>Unassigned_1000 ABCD0001 orig_bc=TACTCGTCGATG new_bc=None bc_diffs=0\\nTACTCGTCGATGCAGGACGAGACGAGGTT\\n>Unassigned_1001 EFGH0002 orig_bc=GCCGCAGAGTCA new_bc=None bc_diffs=0\\nGCCGCAGAGTCACCAGATTACGAGATTA\\n'\r\n expected_unassigned_qual_seq = '>Unassigned_1000 ABCD0001 orig_bc=TACTCGTCGATG new_bc=None bc_diffs=0\\n29 13 24 14 10 14 16 13 30 10 13 11 30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>Unassigned_1001 EFGH0002 orig_bc=GCCGCAGAGTCA new_bc=None bc_diffs=0\\n13 22 15 12 10 14 23 13 25 22 15 20 12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n'\r\n self.assertEqual(file_data['unassigned_seqs_f'].data,\r\n expected_unassigned_fasta_seq)\r\n self.assertEqual(file_data['unassigned_qual_f'].data,\r\n expected_unassigned_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 0, 'AACTCGTCGATG,s1': 0,\r\n 'AGCAGCACTTGT,s2': 1}\r\n expected_bc_freqs = {'TACTCGTCGATG': 1, 'GCCGCAGAGTCA': 1,\r\n 'AGCAGCACTTGT': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def is_correct(self, ans):\n \n seq = self.player_seq.copy()\n seq.append(ans)\n return seq[-1] == self.sequence[len(seq) - 1]", "def is_complete(self):\n if self.input_alphabet is None:\n raise ValueError(\"No input alphabet is given. \"\n \"Try calling determine_alphabets().\")\n\n for state in self.iter_states():\n for transition in state.transitions:\n if len(transition.word_in) != 1:\n return False\n\n transition_classes_by_word_in = full_group_by(\n state.transitions,\n key=lambda t: t.word_in)\n\n for key, transition_class in transition_classes_by_word_in:\n if len(transition_class) > 1:\n return False\n\n # all input labels are lists, extract the only element\n outgoing_alphabet = [key[0] for key, transition_class in\n transition_classes_by_word_in]\n if not sorted(self.input_alphabet) == sorted(outgoing_alphabet):\n return False\n\n return True", "def is_aligned_dna(sequence):\r\n #ensure that the given sequence is uppercase\r\n sequence = sequence.upper()\r\n \r\n #replace all A C G and T and compare length with 0\r\n if len(sequence.replace(\"A\", \"\").replace(\"C\", \"\").replace(\"G\",\"\").replace(\"T\",\"\").replace(\"-\",\"\")) == 0:\r\n return True\r\n else:\r\n return False", "def is_equality(s):\n return s == \"=\"", "def correct(self):\n return self._solution == self._alternatives.value" ]
[ "0.8402212", "0.8299707", "0.702421", "0.6465896", "0.64497894", "0.6389745", "0.6059358", "0.6028566", "0.58353883", "0.5827617", "0.5789568", "0.5764622", "0.57099473", "0.5694708", "0.5602207", "0.55700195", "0.5525498", "0.5517784", "0.5466885", "0.546393", "0.54609096", "0.54444313", "0.54313844", "0.54272515", "0.54264945", "0.5421803", "0.5414445", "0.5414093", "0.5387538", "0.5380882", "0.5346807", "0.5332533", "0.5312555", "0.52983433", "0.52772665", "0.5269252", "0.52478945", "0.52295715", "0.52107996", "0.5204828", "0.5203903", "0.5198879", "0.5198483", "0.5177787", "0.5176812", "0.51555187", "0.5154357", "0.5154074", "0.5150257", "0.5141982", "0.5134884", "0.5121956", "0.51193136", "0.51135063", "0.51088184", "0.5107475", "0.5098557", "0.5097224", "0.5092993", "0.5076178", "0.5072098", "0.5069354", "0.50683093", "0.5061257", "0.50600076", "0.50536495", "0.50520027", "0.5047303", "0.5028426", "0.50245655", "0.5023849", "0.50219", "0.50192463", "0.5006239", "0.5005145", "0.49977311", "0.49924868", "0.4983313", "0.4980777", "0.4969674", "0.49692765", "0.4967836", "0.49663615", "0.4963561", "0.49580646", "0.49573678", "0.49570987", "0.4956541", "0.4954008", "0.4951771", "0.4950518", "0.49449164", "0.49434495", "0.49387825", "0.4933931", "0.49325958", "0.4929721", "0.49284908", "0.4925007", "0.4924399" ]
0.83261746
1
Return a list of values in the domain of `var`, in order by the number of values they rule out for neighboring variables. The first value in the list, for example, should be the one that rules out the fewest values among the neighbors of `var`.
def order_domain_values(self, var, assignment): # print("Entered order_domain_values Function") ordered_variables = [] # print("Var") # print(var) # print("self.domains[var]") # print(self.domains[var]) # print("self.crossword.neighbor(var)") # print(self.crossword.neighbors(var)) neighbors_to_check = self.crossword.neighbors(var).difference(assignment.keys()) for word in self.domains[var]: n = 0 for neighbor in neighbors_to_check: overlap = self.crossword.overlaps[(var, neighbor)] for neighbor_word in self.domains[neighbor]: if ( word[overlap[0]] is not neighbor_word[overlap[1]] or word is neighbor_word): n += 1 ordered_variables.append( (word, n) ) ordered_variables.sort(key=self.orderFunc) # print("ordered_variables") # print(ordered_variables) # input() return ordered_variables # raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eliminate_from_neighbors(csp, var) :\n reduced = []\n val = csp.get_assigned_value(var)\n replacement = []\n for constraint in csp.constraints_between(var,None):\n var2 = constraint.var2\n domainCopy = csp.domains[var2][:]\n numLeft = len(domainCopy)\n if (val!=None):\n for i in xrange(len(domainCopy)):\n possibleVal2 = domainCopy[i]\n check = constraint.check(val,possibleVal2)\n if (check==False):\n didEliminate = csp.eliminate(var2,possibleVal2)\n if (didEliminate):\n numLeft-=1\n if var2 not in reduced:\n reduced.append(var2)\n if numLeft==0:\n return None\n return sorted(reduced)", "def neighbors(self, var):\n # This is just to make this method technically correct in all contexts we want to support (i.e., we need to\n # account for the possibility that there will be unary constraints).\n arcs = [key for key in self.__constraints.keys() if type(key) is not Variable]\n arcs_involving_var = [arc for arc in arcs if var in arc]\n flattened_arcs = [neighbor for arc in arcs_involving_var for neighbor in arc]\n return [v for v in flattened_arcs if v != var]", "def eliminate_from_neighbors(csp, var) :\n eliminated_vars=[]\n val1s=csp.get_domain(var)\n neighbors=csp.get_neighbors(var)\n for neighbor in neighbors:\n eliminated=False\n constraints=csp.constraints_between(var,neighbor)\n tem=csp.copy()\n neighbor_domain=tem.get_domain(neighbor)\n\n for val2 in neighbor_domain:\n satisfied=False\n for val1 in val1s:\n good=True\n for constraint in constraints:\n if not constraint.check(val1,val2):\n good=False\n break\n if good:\n satisfied=True\n break\n if not satisfied:\n csp.eliminate(neighbor,val2)\n eliminated=True\n if eliminated:\n eliminated_vars.append(neighbor)\n if len(csp.get_domain(neighbor))==0:\n return None\n return sorted(eliminated_vars)", "def variable_bounds(problem):\n return ([\n problem['state_bounds'][var] if problem['state_bounds'][var] is not None else (-np.inf, np.inf)\n for _ in range(problem['N'] - 1)\n for var in range(problem['num_states'])\n ] + [\n problem['input_bounds'][inp] if problem['input_bounds'][inp] is not None else (-np.inf, np.inf)\n for _ in range(problem['N'] + 1)\n for inp in range(problem['num_inputs'])\n ]) * problem['Nv'] + ([(0.01, np.inf)] if problem['T'] == 0 else []) \\\n if problem['state_bounds'] is not None else None", "def get_all_neighboring_arcs(self, var):\n return [ (i, var) for i in self.constraints[var] ]", "def _apply_degree_heuristic(self, vars, assignment, csp):\n\n result = []\n max_degree = -1\n\n for var in vars:\n neighbors = set()\n for constraint in csp.get_constraints(var):\n for neighbor in constraint.get_scope():\n # Collect all not assigned variables with common constraints\n if not assignment.has_assignment_for(neighbor):\n neighbors.add(neighbor)\n\n # Number of collected variables is a degree of the current variable\n degree = len(neighbors)\n if degree >= max_degree:\n if degree > max_degree:\n result = []\n max_degree = degree\n result.append(var)\n\n return result", "def order_domain_values(self, var):\n return least_constraining_value(self, var.name)", "def least_constraining_values(self, cell):\r\n vals = {}\r\n for val in cell.domain:\r\n vals[val] = 0\r\n for i, j in cell.neighbors:\r\n if val in self.grid[i][j].domain:\r\n vals[val] += 1\r\n x = sorted(vals.items(), key=lambda i: i[1])\r\n res = []\r\n for i in x:\r\n res.append(i[0])\r\n return res", "def order_domain_values(csp, variable):\n domain = variable.domain\n returned = []\n \"\"\"\n print variable\n for a in csp.constraints[variable]:\n print a\n \"\"\"\n for x in domain:\n returned.append(conflict_count(csp, variable,x))\n\n ret = sorted(returned, key=itemgetter(1))\n rett = []\n for x in ret:\n rett.append(x[0])\n \n return rett\n # TODO implement this\n pass", "def order_domain_values(self, var, assignment):\n # retrieve the domain for the variable\n domain = self.domains[var]\n # initialise a dictionary for sorting the values in the variable's domain\n sorting_dict = {} \n # for each of the values in the variable's domain \n for value in domain:\n # set the constraint counter to zero\n sorting_dict[value] = 0\n # for each of the neighbors of the variable\n for neighbor in self.crossword.neighbors(var):\n # retrieve the overlap indexes\n overlap = self.crossword.overlaps[(neighbor, var)]\n # for each of the overlap's possible values (the overlap's domain)\n for test in self.domains[neighbor]:\n # if the overlap letter is not the same\n if test[overlap[0]] != value[overlap[1]]:\n # this value constrains the neighbor's domain\n sorting_dict[value] += 1\n # sort the dictionary by the value of the sorting key\n sorted_vars = sorted(domain, key=lambda x: sorting_dict[x])\n return sorted_vars", "def get_visits(xvars):\n is_timespace = len(list(xvars)[0]) == 3\n if is_timespace:\n visits = [var[0] for var in xvars.iteritems() if var[1].X == 1]\n return sorted(visits, key=lambda visit: visit[2])\n\n n = max(xvars.keys())[0] + 1\n visits = []\n for i in range(n):\n for j in range(n):\n if not i == j and xvars[i, j].X == 1:\n visits.append((i, j))\n return sorted(visits, key=lambda visit: visit[0])", "def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)", "def variable_range(examples, var):\n if var[1] == 'd':\n range = set()\n for datum in examples:\n range.add(datum[var[0]])\n return range\n else:\n range_min, range_max = 0, 0\n for datum in examples:\n data_val = float(datum[var[0]])\n range_min, range_max = min(range_min, data_val), max(range_max, data_val)\n return (range_min, range_max)", "def order_domain_values(self, var, assignment):\n #list to store pair data of words and their constraint score\n constraint_list= []\n #function to create list of all neighbors to node var\n neighbors= self.crossword.neighbors(var)\n\n for neighbor in neighbors:\n overlap= self.crossword.overlaps[var, neighbor]\n \n for word_var in self.domains[var]:\n constraint_score= 0\n\n for word_neighbor in self.domains[neighbor]:\n #adds constraint score for each word in the domain of neighbor nodes that are not consistent if word_var is chosen\n if word_var[overlap[0]] != word_neighbor[overlap[1]]:\n constraint_score += 1\n #add the pair data to list of all words\n constraint_list.append([word_var, constraint_score])\n #sorts the list in terms of constraint score\n constraint_list.sort(key= lambda x:x[1])\n #creates a list of all words in the same order as constraint_list\n return_list= map(lambda x:x.pop(0), constraint_list)\n return_list= list(return_list)\n return return_list", "def _compute_best_value(self):\n asgt = self._neighbors_values.copy()\n best_cost, best_val = None, []\n\n for v in self._variable.domain:\n asgt[self.variable.name] = v\n c = self._compute_cost(**asgt)\n if (\n best_cost is None\n or (best_cost > c and self._mode == \"min\")\n or (best_cost < c and self._mode == \"max\")\n ):\n best_cost = c\n best_val = [v]\n elif best_cost == c:\n best_val.append(v)\n\n return best_val, best_cost", "def select_unassigned_variable(csp):\n smallest = -1\n largest = 0\n multiple = False\n returned = None\n\n for unass in csp.variables:\n if not unass.is_assigned():\n if len(unass.domain) < smallest or smallest == -1:\n smallest = len(unass.domain)\n multiple = False\n returned = unass\n if len(unass.domain) == smallest:\n multiple = True\n\n if multiple == False:\n return returned\n else:\n for unass in csp.variables:\n if not unass.is_assigned():\n if len(unass.domain) == smallest:\n if len(csp.constraints[unass]) > largest:\n largest = len(csp.constraints[unass])\n returned = unass\n return returned\n\n\n\n\n\n # TODO implement this\n pass", "def __order_domain_values(self, var, assignment):\n values_to_inconsistencies = {}\n unassigned_vars = self.__unassigned_variables(assignment)\n unassigned_neighbors = self.__unassigned_neighbors(var, unassigned_vars)\n for value in var.domain:\n inconsistent_value_count = 0\n for unassigned_neighbor in unassigned_neighbors:\n consistent_domain_values = self.__consistent_domain_values(var, value, unassigned_neighbor)\n inconsistencies = unassigned_neighbor.domain.difference(consistent_domain_values)\n inconsistent_value_count += len(inconsistencies)\n values_to_inconsistencies[value] = inconsistent_value_count\n\n ordered_values = sorted(values_to_inconsistencies, key=values_to_inconsistencies.get)\n return ordered_values", "def getVar2FactorsMap(self):\r\n V = self.getAllNodes()\r\n return list(list(idx for idx,f in enumerate(self.factors) if i in f.var) for i in V)", "def select_most_constrained_var(self, iterables):\r\n return self.select_first([var for var in iterables if len(self.csp.domains[var]) == min(len(self.csp.domains[i]) for i in iterables)])", "def domain_reduction(csp, queue=None) :\n if queue==None:\n queue=csp.get_all_variables()\n dequeued=[]\n while len(queue)!=0:\n current_var=queue.pop(0)\n dequeued.append(current_var)\n eliminated=eliminate_from_neighbors(csp,current_var)\n if(eliminated==None):\n return None\n for var in eliminated:\n if not var in queue:\n queue.append(var)\n return dequeued", "def findall_var(formula, variable):\n res = []\n s = Solver()\n s.add(formula)\n while True:\n if s.check() == sat:\n m = s.model()\n res.append(m)\n value = m[variable]\n if value == None:\n return res\n s.add(variable != value)\n else:\n return res", "def __degree(self, var, unassigned_vars):\n return len(self.__unassigned_neighbors(var, unassigned_vars))", "def getVariableList(dataset):\n variables = [v for v in dataset.variables.keys() if v not in dataset.dimensions.keys()]\n for d in dataset.dimensions.keys():\n try:\n variables.pop(variables.index(dataset.variables[d].getncattr(\"bounds\")))\n except:\n pass\n return variables", "def find_varying(params, nvmax):\n print('Finding variable parameters')\n if nvmax < 0:\n raise ValueError(f'nvmax ({nvmax}) must be positive')\n\n var = []\n cnt = 0\n for p in params:\n if len(params[p]) == 2:\n if (params[p][1] - params[p][0]) < 0:\n raise ValueError(f'range is inverted for param: {p}')\n elif cnt >= nvmax:\n raise ValueError(f'too many param ranges were given. Expected {nvmax}')\n else:\n var.append(p)\n cnt += 1\n return var", "def cnf_variables(cnf):\n variabs = set()\n\n for clause in cnf:\n for var in clause:\n var = abs(var)\n\n if var not in variabs:\n variabs.add(var)\n\n return variabs", "def get_constraints_with(self, var):\n return [c for c in self.constraints if var.name in c.var_names]", "def variable_ranking(self):\n self.grow_trees()\n dist_classes = self.dist_classes\n oob = self.forest.oob_set_generator()\n oob_length, First, elt_vals, var_vals = len(oob), True, {}, {}\n succ_rate, dist_succ_rate, dist_order = 0, 0, 0\n for var in self.variables:\n var_range = list(variable_range(self.data, var))\n range_len = len(var_range)\n print var\n permution = None\n permuted_succ, perm_dist_succ = 0, 0\n for elts in oob:\n if First:\n actual = self.data[elts][self.prediction_index]\n elt_vals[elts] = actual\n predicted = self.forest.test_predict(self.data[elts], elts)\n if actual in dist_classes:\n dist_order += 1\n if actual == predicted:\n succ_rate += 1\n if actual in dist_classes:\n dist_succ_rate += 1\n if var[1] == 'd':\n permution = int(math.floor(uniform(0, 1)*range_len))\n permution = var_range[permution]\n else:\n permution = uniform(0, 1)*(var_range[1] - var_range[0])\n perm_tuple = self.data[elts][:var[0]] + [permution] + self.data[elts][var[0]+1:]\n permuted_prediction = self.forest.predict(perm_tuple)\n actual = elt_vals[elts]\n if actual == permuted_prediction:\n permuted_succ += 1\n if actual in dist_classes:\n perm_dist_succ += 1\n if First:\n succ_rate = float(succ_rate)/oob_length\n dist_succ_rate = float(dist_succ_rate)/dist_order\n First = False\n permuted_succ = float(permuted_succ)/oob_length\n perm_dist_succ = float(perm_dist_succ)/dist_order\n print \"Originally a \", succ_rate, \" success rate, with permution to \", permuted_succ\n print \"A difference of \", succ_rate - permuted_succ\n print \"WRT Distinguised classes, a success rate of:\", dist_succ_rate, 'with permution to ', perm_dist_succ\n print \"A difference of \", dist_succ_rate - perm_dist_succ\n var_vals[var] = succ_rate - permuted_succ\n var_vals[(var, 'd')] = dist_succ_rate - perm_dist_succ\n var_vals = sorted(var_vals.items(), key=lambda x: x[1], reverse=True)\n for x in var_vals:\n print x[0], x[1]", "def all_variables(formula):\n return collect_unique_nodes(formula, lambda x: isinstance(x, Variable))", "def _schedule_per_var(self):\n # sg = self.mg.region_graph.copy() # no mutation\n sg = self.mg.region_graph\n var_elim_schedule = []\n for v in self.elim_order:\n nodes_with_v = [node for node in sg.nodes_iter() if v in sg.node[node]['sc']]\n var_elim_schedule.append(nodes_with_v)\n return var_elim_schedule", "def get_neighbors(self, site):\n shape = self.sites.shape\n neighbors = list()\n for i, dim in enumerate(shape):\n nbr = list(site)\n if site[i] > 0:\n nbr[i] = nbr[i] - 1\n else:\n nbr[i] = dim - 1\n neighbors.append(tuple(nbr))\n\n nbr = list(site)\n if site[i] < dim - 1:\n nbr[i] = nbr[i] + 1\n else:\n nbr[i] = 0\n neighbors.append(tuple(nbr))\n return neighbors", "def get_var(self, data, cluster, link, var):\n\n pooled_var = 0.0\n for j in range(4, len(data[:,0])):\n if link.statistics[j-3][4] != 0.0:\n pooled_var += (abs(cluster.statistics[j-3][2]-link.statistics[j-3][2])/link.statistics[j-3][4])**2.\n else:\n pooled_var += 0.0\n pooled_var = np.sqrt(pooled_var)\n var.append(pooled_var)\n\n return var", "def retrieve_potential(evidence, variables, markov_network):\n\n potential = 0\n for c in range(markov_network[\"n_cliques\"]):\n if variables == markov_network[\"cliques\"][c][\"vars\"]:\n potential = markov_network[\"cliques\"][c][\"potential\"]\n\n all_possibilities = []\n for i in range(len(variables)):\n all_possibilities.append([k for k in range(markov_network[\"cardinalities\"][variables[i]])])\n\n all_possibilities = itertools.product(*all_possibilities)\n all_possibilities = [a for a in all_possibilities]\n\n pot = all_possibilities.index(evidence)\n pot = potential[pot]\n\n return float(pot)", "def var(x, ddof=0):\n n = len(x)\n with mp.extraprec(16):\n sumx = mp.fsum(x)\n meanx = sumx / n\n varx = mp.fsum((mp.mpf(t) - meanx)**2 for t in x)/(n - ddof)\n return varx", "def get_constraints_for_variable(self, var):\n return (constraint for constraint in self.constraints\n if var.name in [constraint.var1.name, constraint.var2.name])", "def solve():\n\n s, g, e = make_lattice(21)\n stack = deque([[e]])\n vals = {s: 1}\n max_n = 0\n\n while stack:\n max_n = max(max_n, len(stack))\n n, *p = stack.pop()\n for c in g.get_connected(n):\n if c > n:\n continue\n if c in vals:\n propagate(c, [n] + p, vals)\n else:\n stack.append([c, n] + p)\n return vals[e]", "def find_rvs_in_graph(vars: Union[Variable, Sequence[Variable]]) -> Set[Variable]:\n\n def expand(r):\n owner = r.owner\n if owner:\n inputs = list(reversed(owner.inputs))\n\n if isinstance(owner.op, HasInnerGraph):\n inputs += owner.op.inner_outputs\n\n return inputs\n\n return {\n node\n for node in walk(makeiter(vars), expand, False)\n if node.owner and isinstance(node.owner.op, (RandomVariable, MeasurableVariable))\n }", "def generate_var_ranges(self):\n\n var_ranges = {}\n for var in self.variables:\n min_to_max = list(range(int(var['min']), int(var['max']) + 1))\n if (var['zero_ok'] == False and 0 in min_to_max):\n min_to_max.remove(0)\n\n var_ranges[var['variable']] = min_to_max\n\n return var_ranges", "def var(\n self, values: pdarray, skipna: bool = True, ddof: int_scalars = 1\n ) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"var\", skipna, ddof)\n return k, cast(pdarray, v)", "def order_domain_values(var,assignment,csp):\n #right now it works only as just convert value and return\n #no special black magic yet\n return var.domain", "def _max_cardinality_search(self, mask):\n n = mask.shape[0]\n cliques = [[]] # maintains the list of cliques\n last_mark = -1 # number of marked neighbors for prev. node\n marks = [[] for i in range(n)] # a set tracking the marked neighbors of each node\n mark_size = np.zeros(n) # number of marked neighbors for each node\n remaining = list(range(n))\n for _ in reversed(range(n)):\n node = remaining[np.argmax(mark_size[remaining])]\n if mark_size[node] <= last_mark: # moving into a new clique\n cliques.append(marks[node] + [node])\n else: # add it to the last clique\n cliques[-1].append(node)\n nb_node = np.nonzero(mask[node,:])[0] # neighbors of node\n for nb in nb_node: # update the marks for neighbors\n marks[nb].append(node)\n mark_size[nb] += 1\n last_mark = mark_size[node]\n remaining.remove(node)\n sorted_cliques = [sorted(c) for c in cliques]\n return sorted_cliques", "def split_var(self, x):\n cum_dims = list(np.cumsum(self.dims))\n out = []\n for slice_from, slice_to, dist in zip([0] + cum_dims, cum_dims, self.dists):\n sliced = x[:, slice_from:slice_to]\n out.append(sliced)\n return out", "def _var_sol(self, var: Union[LpVariable, Var]) -> float:\n\n return value(var) if self.optimizer == 'pulp' else var.x", "def get_neighbors(n):\n if n < 3:\n return ValueError(\"Integer must be greater than 3.\")\n p = generate()\n q = []\n l = 0\n g = 0\n while g <= n:\n q = next(p)\n g = q[-1]\n if q[-1] == n:\n l = q[0][-2]\n q = next(p)\n g = q[-1]\n elif q[-1] > n:\n l = q[0][-3]\n return l, g", "def compute_hist_nns(var, n, mn, mx):\n bins = np.linspace(mn, mx, n + 1)\n hist, _ = np.histogram(var, bins=bins, density=True)\n hist[np.isnan(hist)] = 0\n return .5 * (bins[1]-bins[0]) + bins[:-1], hist", "def GetNumberOfVariables(self):\n\n # nvar = 0\n # for i in self.variables_order:\n # # DO NOT COUNT VARIABLES THAT GET CONDENSED OUT\n # if i!=0:\n # if mesh.element_type == \"tri\":\n # nvar += (i+1)*(i+2) // 2\n # elif mesh.element_type == \"tet\":\n # nvar += (i+1)*(i+2)*(i+3) // 6\n # elif mesh.element_type == \"quad\":\n # nvar += (i+1)**2\n # elif mesh.element_type == \"hex\":\n # nvar += (i+1)**3\n\n # nvar = sum(self.variables_order)\n if self.nvar == None:\n self.nvar = self.ndim\n return self.nvar", "def get_neighbor_idxes(x, n, limit):\n idxes = sorted(range(limit), key=lambda idx: (abs(x - idx), idx))[:n]\n idxes = sorted(idxes)\n return np.array(idxes)", "def init_sorted_variables(self):\n variables_by_neighbors = [] # A list of (var_name, |neighbors|)\n for variable in self.var_names:\n variables_by_neighbors.append(\n (self.variables[variable].get_name(), len(self.variables[variable].get_neighbors())))\n\n # In this part we sort the variables according to the heuristic:\n variables_by_neighbors = sorted(variables_by_neighbors, key=lambda tup: tup[1], reverse=True)\n # (J) Notice that there can be many variables with same neighbour, thus the order between them isn't determined.\n self.sorted_variables = [*map(lambda x: x[0], variables_by_neighbors)]", "def find_maxima(x):\n\n idx = []\n for i in range(len(x)):\n # `i` is a local maximum if the signal decreases before and after it\n if x[i-1] < x[i] and x[i+1] < x[i]:\n idx.append(i)\n return idx", "def all_different_assignment_propagator(var: str, val: int, domains: Domains, problem_vars: FrozenSet[str]) -> Domains:\r\n reduced_domains = {v: frozenset({val}) if v == var else\r\n domains[v] - {val} if v in problem_vars else\r\n domains[v] for v in domains}\r\n return reduced_domains", "def __minimum_remaining_values(self, unassigned_vars):\n min_var = None\n for var in unassigned_vars:\n if min_var is None:\n min_var = var\n elif len(var.domain) < len(min_var.domain):\n min_var = var\n return min_var", "def domain_reduction_singleton_domains(csp, queue=None) :\n if queue==None:\n queue=csp.get_all_variables()\n dequeued=[]\n while len(queue)!=0:\n current_var=queue.pop(0)\n dequeued.append(current_var)\n eliminated=eliminate_from_neighbors(csp,current_var)\n if(eliminated==None):\n return None\n pre_add_list=[]\n add_list=[]\n for var in eliminated:\n exist=False\n for varr in queue:\n if var == varr:\n exist=True\n break\n if not exist:\n pre_add_list.append(var)\n for var in pre_add_list:\n if len(csp.get_domain(var))==1:\n add_list.append(var)\n queue=queue+add_list\n return dequeued", "def topological_sort(self):\n\t\t#detect leaves\n\t\tnumChildren = dict((n.name,0) for n in self.variables.values())\n\t\tfor n in self.variables.itervalues():\n\t\t\tfor p in n.parents:\n\t\t\t numChildren[p]+=1\n\t\t#do a BFS from leaves to get the reverse topological sort\n\t\ttopo = []\n\t\tqueue = [n for (n,c) in numChildren.iteritems() if c==0]\n\t\tif len(queue)==0:\n\t\t\traise ValueError(\"Bayes net is not acyclic?\")\n\t\twhile len(queue)>0:\n\t\t\tn = self.variables[queue.pop(0)]\n\t\t\ttopo.append(n)\n\t\t\tfor p in n.parents:\n assert numChildren[p]>0\n numChildren[p] -= 1\n if numChildren[p]==0:\n queue.append(p)\n\t\t#now reverse it to get the top down ordering\n assert len(topo)==len(self.variables)\n\t\treturn reversed(topo)", "def _expanded(var):\r\n if var == '*':\r\n return []\r\n n = var.split(',')\r\n n2 = [i.split('-') for i in n if i.find('-') > -1]\r\n n3 = set([int(i) for i in n if i.find('-') == -1])\r\n for i in n2:\r\n for x in range(int(i[0]), int(i[1]) + 1):\r\n n3.add(x)\r\n return n3", "def lvar (inlist):\r\n n = len(inlist)\r\n mn = mean(inlist)\r\n deviations = [0]*len(inlist)\r\n for i in range(len(inlist)):\r\n deviations[i] = inlist[i] - mn\r\n return ss(deviations)/float(n-1)", "def no_non_adjacent_vertices(self):\n clauses = []\n for v in range(0,self.graph.num_vertices):\n non_neighbours = sorted(list(set(range(0,self.graph.num_vertices))\n - set([v])\n - set(self.graph.edges[v])))\n for nv in non_neighbours:\n for position in range(0,self.graph.num_vertices-1):\n clause = [ ClauseVariable(True,v,position),\n ClauseVariable(True,nv,position+1)]\n clauses.append(clause)\n return clauses", "def bin_discretize(self, variables=[], bins=3,\n min_const_samples_bin_size=1.0/3):\n self.edges=np.zeros((self.arity.size,bins+1))\n for i in variables:\n un_cnt=np.unique(self.data[:,i],return_counts=True)\n constvals=un_cnt[0][un_cnt[1]>self.data.shape[0]*min_const_samples_bin_size]\n mask=np.ones(self.data.shape[0],dtype=bool)\n if constvals.size>0:\n for j,cv in enumerate(constvals):\n mask*=(self.data[:,i]!=cv)\n self.data[self.data[:,i]==cv,i]=j\n\n size=np.sum(mask)/bins\n sorted_i=np.argsort(self.data[mask,i])\n edges=[self.data[mask,i][sorted_i[int(size*num)-1]] for num in range(1,bins)]\n self.edges[i]=[self.data[mask,i][sorted_i[0]]]+edges+[self.data[mask,i][sorted_i[-1]]]\n self.data[mask,i]=np.searchsorted(edges,self.data[mask,i])+constvals.size\n self.arity[i]=len(edges)+1+constvals.size", "def size_of_variable(self, variable):\n index_structures = variable.index_structures\n if not index_structures:\n return 1\n mapping = [self.mod_index[ind].mapping for ind in index_structures]\n blocking = [self.mod_index[ind].blocking for ind in index_structures]\n size = []\n for i in range(len(mapping)):\n if mapping[i] and blocking[i]:\n length = 0\n for blk in blocking[i]:\n if blk == 0:\n length += 1\n else:\n length += blk\n size.append(length)\n else:\n return None\n return size", "def select_unassigned_variable(self, assignment):\n var_list= []\n #add unassigned variabled to a list along with the number of words left in its domain\n for var in self.domains:\n if var not in assignment:\n var_list.append((var, len(self.domains[var])))\n #sort this list by the number of words left in its domain\n var_list.sort(key= lambda x:x[1])\n\n #list for variables that are tied for least words left in domain\n equal_vars= [list(var_list[0])]\n for i in range(len(var_list)):\n #adds variables with same number of words left in domain\n if var_list[0][1] == var_list[i][1] and var_list[i] != var_list[0]:\n equal_vars.append(list(var_list[i]))\n\n \n #change the encoded information for words left in domain to the number of neighbors the variable had (highest degree)\n for i in range(len(equal_vars)):\n equal_vars[i][1]= len(self.crossword.neighbors(equal_vars[i][0]))\n\n #sort the list by the highest degree\n equal_vars.sort(key= lambda x:x[1])\n \n #return var with highest degree\n return equal_vars[0][0]", "def var(x):\n length = len(x)\n\n if length == 0:\n return None\n result = 0.0\n m = TinyStatistician.mean(x)\n for i in x:\n result += (i - m) ** 2\n\n return result / length", "def find_smallest(num_vars):\n for x in range(10):\n if num_vars <= 2**x:\n return x", "def nvar(self):\n return self.h.shape[0]", "def solve(given: np.array) -> np.array:\n possible = np.full((9, 9, 9), True)\n mask = given > 0\n possible[mask, :] = False\n possible[mask, given[mask] - 1] = True\n\n # number of possibilities at each site, masking those already propagated\n # to avoid repetitive work. All masked == problem solved\n count = ma.array(possible.sum(axis=2), fill_value=1)\n\n # allocate upfront to as out parameter to np.equal\n # (ma.array because count is ma.array)\n where = ma.array(np.empty((9, 9), dtype=bool), fill_value=False)\n\n stack = [(possible, count)]\n while stack:\n node, count = stack.pop()\n unsolved = propagate(node, count, where)\n if unsolved == -1:\n continue\n if unsolved == 0:\n break\n # try all possibilities from cell with fewest > 1\n i, j = np.unravel_index(count.argmin(), count.shape)\n for k in np.flatnonzero(node[i, j, :]):\n node_copy, count_copy = node.copy(), count.copy()\n node_copy[i, j, :] = False\n node_copy[i, j, k] = True\n count_copy[i, j] = 1\n stack.append((node_copy, count_copy))\n\n i, j, k = node.nonzero()\n count[i, j] = k + 1\n return np.array(count)", "def highest_var(self, n=100):\n self.highest_x(n, np.amin(self.sd(), axis=0), 'variance')", "def _compute_best_value(self):\n reduced_cs = []\n concerned_vars = set()\n\n for c in self.utilities:\n asgt = filter_assignment_dict(self._neighbors_values, c.dimensions)\n reduced_cs.append(c.slice(asgt))\n concerned_vars.update(c.dimensions)\n var_val, rel_val = find_arg_optimal(\n self.variable,\n lambda x: functools.reduce(operator.add, [f(x) for f in reduced_cs]),\n self._mode,\n )\n # Add the cost for each variable value if any\n for var in concerned_vars:\n if var.name == self.name:\n rel_val += var.cost_for_val(self.current_value)\n else:\n rel_val += var.cost_for_val(self._neighbors_values[var.name])\n\n return var_val, rel_val", "def eliminate_var(n, g,clq_ind,tree):\r\n l = len(clq_ind) # number of nodes eliminated\r\n \r\n new_ind = scipy.array(g.neighbors(n))\r\n new_clique = g.neighbors(n)\r\n new_clique.append(n) \r\n g.add_edges_from( combinations(new_clique,2) )\r\n \r\n for i,clq in enumerate(clq_ind):\r\n if n in clq:\r\n tree.add_edge(l,i)\r\n clq_ind[i] = scipy.setdiff1d(clq,new_clique)\r\n \r\n clq_ind.append(new_ind)\r\n g.remove_node(n)\r\n tree.node[l]['clique'] = new_clique", "def _projections(self, nvar):\n min_var = self.proje_var.argsort()[:nvar]\n add_coeffs = 1 / self.proje_var[min_var]\n indp_est_proje = np.dot(add_coeffs, self.sep_proje_eval[min_var]) /\\\n np.sum(add_coeffs)\n\n # consider covariance\n coverr = []\n try:\n proje_cov_inv = np.linalg.inv(self.proje_cov[min_var][:, min_var])\n cov_weight = np.sum(proje_cov_inv, axis=0) / np.sum(proje_cov_inv)\n cov_est_proje = np.dot(cov_weight, self.sep_proje_eval[min_var])\n coverr.append(1/np.sum(proje_cov_inv))\n except:\n cov_est_proje = np.ones(self.sep_proje_eval.shape[1])\n cov_est_proje[:] = np.nan\n coverr.append(np.nan)\n return np.array([indp_est_proje, cov_est_proje])", "def count_vars(scope=''):\n v = get_vars(scope)\n return sum([np.prod(var.shape.as_list()) for var in v])", "def get_truncated_ranges(variable, unbounded_alpha=0.99, bounded_alpha=1.0):\n ranges = []\n if (type(variable) == GaussCopulaVariable) and (bounded_alpha == 1):\n bounded_alpha = unbounded_alpha\n\n for rv in variable.marginals():\n ranges += get_truncated_range(rv, unbounded_alpha, bounded_alpha)\n return np.array(ranges)", "def estimate_var(sample, threshold):\n sample_size = len(sample)\n index_at = get_var_level_index(sample_size, threshold)\n sample.sort()\n return sample[index_at]", "def get_set(dim, maximum):\n\n i = 0\n numbers = []\n while i**2 <= maximum:\n n = i**2\n counter = 0\n while n <= maximum and counter < dim:\n numbers += [i**2]\n n += i**2\n counter += 1\n i += 1\n return numbers", "def __dijkstra_max_prob_tree(unit_rules_graph, source_var):\n variables = unit_rules_graph.vertices\n dist = {var: -1.0 for var in variables}\n dist[source_var] = 1.0\n tree_nodes = {var: ParseTreeNode(var) for var in variables}\n pending_vars = set(variables)\n while pending_vars:\n max_dist, max_dist_var = max((d, v) for v, d in dist.items() if v in pending_vars)\n pending_vars.remove(max_dist_var)\n for neighbor, prob in unit_rules_graph.neighbors[max_dist_var]:\n if neighbor not in pending_vars:\n continue\n alt_dist = dist[max_dist_var] * prob\n if dist[neighbor] is None or alt_dist > dist[neighbor]:\n dist[neighbor] = alt_dist\n tree_nodes[max_dist_var].children.append(tree_nodes[neighbor])\n return ParseTree(tree_nodes[source_var])", "def solution(n, array):\n\n counters = [0] * n\n\n # Current greatest value calculated so far\n max_count = 0\n\n for i in range(len(array)):\n if array[i] == n + 1:\n # max_count = max(counters)\n counters = [max_count] * n\n else:\n counters[array[i] - 1] += 1\n\n # To avoid calculating max(), we update the max value at each step\n if counters[array[i] - 1] > max_count:\n max_count = counters[array[i] - 1]\n\n return counters", "def neighborhood(index, npoints, maxdist=1):\n return [index + i for i in range(-maxdist, maxdist + 1)\n if i != 0 and 0 <= index + i <= npoints - 1]", "def Z_most_abundant(self) -> list[Integral]:\n if np.any(np.isnan(self.ionic_fractions)):\n raise ParticleError(\n f\"Cannot find most abundant ion of {self.base_particle} \"\n f\"because the ionic fractions have not been defined.\"\n )\n\n return np.flatnonzero(\n self.ionic_fractions == self.ionic_fractions.max()\n ).tolist()", "def collect_primed_vars(t):\n g = Tree.from_recursive_ast(t)\n # (node, context)\n Q = [(t, False)]\n primed = set()\n while Q:\n u, c = Q.pop()\n if u.type == 'var' and c:\n primed.add(u.value)\n try:\n c = (u.operator == 'X') or c\n except AttributeError:\n pass\n Q.extend((v, c) for v in g.successors(u))\n return primed", "def connectedComponents(self):\n components = []\n X = set(self.X)\n while X:\n Xi = X.pop()\n if Xi.states <= 1: continue # don't include missing or assigned variables \n group = {Xi} # start a new group with this variable\n queue = [Xi] # do DFS on the graph from Xi to find its connected component:\n while queue:\n n = queue.pop()\n nbrs = self.markovBlanket(n) # get all connected variables\n nbrs.difference_update(group) # remove any we've already seen\n X.difference_update(nbrs) # remove new ones from unexplored variable list\n group.update(nbrs) # add them to this connected component\n queue.extend(nbrs) # and continue exploring from them in DFS order\n components.append(group)\n return components", "def pos_conserved(df, conservation):\n nb_rows, nb_cols = df.shape\n\n value_counts = df.apply(pd.Series.value_counts, axis=0).max(axis=0).ge(conservation * nb_rows)\n\n ge = [i for i, x in enumerate(value_counts) if x]\n return ge", "def solution(N, A):\n arr = [0]*(N) # To hold the max value. This idea is also used in L09_MaxDoubleSliceSum_m_golden_slice.py\n minimum = maximum = 0\n for i in A:\n if i > N:\n minimum = maximum\n else:\n arr[i-1] = max(arr[i-1], minimum)\n arr[i-1] += 1\n if arr[i-1] > maximum:\n maximum = arr[i-1]\n\n for i in range(len(arr)):\n arr[i] = max(arr[i], minimum)\n\n return arr", "def size(self, varname):\n if self.handle == None: return []\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return []\n \n def dimlen(d):\n dim = self.handle.dimensions[d]\n if dim != None:\n t = type(dim).__name__\n if t == 'int':\n return dim\n return len(dim)\n return 0\n return map(lambda d: dimlen(d), var.dimensions)", "def rv(self, var):\n return [(self.prob(self.prop(var + ' = ' + val)), val)\n for val in self.rvs()[var]]", "def unfold_grid(var):\n if (len(var.shape)==2): # 2-D variable\n work = N.concatenate((N.zeros((var.shape[0],24),float),var),1)\n work[39:68,0:24] = work[39:68,var.shape[1]:]\n work[39:68,var.shape[1]:] = 0.0\n elif (len(var.shape)==3): # 3-D variable\n work = (N.concatenate((N.zeros((var.shape[0],var.shape[1],24),float),\n var),2))\n work[:,39:68,0:24] = work[:,39:68,var.shape[2]:]\n work[:,39:68,var.shape[2]:] = 0.0\n\n return work", "def nconflicts(self, var, val, assignment):\r\n\r\n # Subclasses may implement this more efficiently\r\n def conflict(var2):\r\n return var2 in assignment and not self.constraints(var, val, var2, assignment[var2])\r\n\r\n return count(conflict(v) for v in self.neighbors[var])", "def get_neighbours(self, value):\n\t\tnode = self.get_node(value)\n\t\tneighbours = [key.value for key in node.edges.keys()]\n\t\treturn neighbours", "def get_n_params(var_list):\n return int(np.sum([np.product(\n [x.value for x in var.get_shape()]) for var in var_list]))", "def markov_partition(markov_network):\n evidences = []\n for i in range(markov_network[\"n_variables\"]):\n evidences.append([k for k in range(markov_network[\"cardinalities\"][i])])\n\n evidences = itertools.product(*evidences)\n evidences = [e for e in evidences]\n\n result = 0\n for evidence in evidences:\n clique_potentials = []\n for c in range(markov_network[\"n_cliques\"]):\n ### Check if the clique is maximal\n ### If not, don't consider it\n ### For now, it only excludes singletons\n if len(markov_network[\"cliques\"][c][\"vars\"]) == 1:\n continue\n\n ### Consider only variables present at the considered clique\n reduced_evidence = itemgetter(*markov_network[\"cliques\"][c][\"vars\"])(evidence)\n\n ### When there's only one variable the itemgetter does not return a tuple\n if isinstance(reduced_evidence, int): reduced_evidence = (reduced_evidence, )\n\n ### Get the potential from all the cliques\n clique_potentials.append(retrieve_potential(reduced_evidence, markov_network[\"cliques\"][c][\"vars\"], markov_network))\n\n #print(clique_potentials)\n result += reduce(lambda x, y: x * y, clique_potentials)\n\n return result", "def var_to_grid(array_var: List[Any], size: Tuple[int, int]) -> List[List[Any]]:\n ix = 0\n grid = [[]] * size[1]\n for y in range(size[1] - 1, -1, -1):\n grid[y] = array_var[ix : ix + size[0]]\n ix += size[0]\n return grid", "def get_depths(self, variables):\n\n return [0.]", "def _apply_mrv_heuristic(self, csp, assignment):\n result = []\n mrv = PlusInfinity()\n copy_assignment = assignment.copy()\n\n for var in csp.get_variables():\n if not copy_assignment.has_assignment_for(var):\n # Get number of left values for this variable\n num = self._calculate_left_values(var, csp, copy_assignment)\n if num <= mrv:\n if num < mrv:\n result = []\n mrv = num\n result.append(var)\n\n return result", "def evaluate(self,var,g=None):\n if (g==None):g=self.g\n assert(len(var)==self.n)\n res=np.zeros(self.n+1)\n for i in range(self.n):\n res[i]=var[i]**2+2.*var[i]-self.N*(self.n-self.N)*g**2*self.gamma-g*np.sum([self.XXZ.Z(i,j)*(var[i]-var[j]) for j in range(self.n) if j!=i])\n res[self.n]=np.sum(var)+2.*self.N\n return res", "def all_different(variables) :\n constraints=[]\n for index,var in enumerate(variables):\n for sub_index in range(index+1,len(variables)):\n var1=var\n var2=variables[sub_index]\n new_constraint=Constraint(var1,var2,constraint_different)\n constraints.append(new_constraint)\n return constraints", "def nvar(self):\n return len(self.__vars)", "def valency(self):\n return len(self.neighbors())", "def leaves_of(phi):\n return [d for d in descendants(phi)\n if isinstance(d, amnet.Variable)]", "def nvar(self):\n return len(self.v)", "def hubs(self):\r\n cities = col.defaultdict(int)\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n cities[code] += 1\r\n heap = [(-value, key) for key, value in cities.items()]\r\n largest = heapq.nsmallest(5, heap)\r\n largest = [(key, -value) for value, key in largest]\r\n return largest", "def posns_from_trace(trace):\n posns = []\n\n for i in range((len(trace.variables)-1)//2):\n var_x = trace.variables[2*i]\n var_y = trace.variables[2*i+1]\n\n car_i = int(var_x.name.split('_')[2])\n\n xy = (var_x.value.item(), var_y.value.item())\n\n if len(posns) <= car_i:\n posns.append(xy) # if it's first, append it\n else:\n posns[car_i] = xy # else overwrite\n return posns", "def _adjacentvariance(data,nPoints=40):\r\n N = data.shape[0]\r\n vardata = np.zeros_like(data)\r\n for u, i in enumerate(data):\r\n if u<nPoints:\r\n vardata[u] = np.var(data[:(u*2+1)])\r\n elif ((N-u)<nPoints):\r\n vardata[u] = np.var(data[-(N*2-u*2-1):])\r\n else:\r\n temp = data[:(u+1+nPoints)]\r\n temp = temp[-(1+2*nPoints):]\r\n vardata[u] = np.var(temp)\r\n return vardata", "def _neighborhood(self, state_index):\n if self.locality is None:\n # Global neighborhood\n return list(range(0, self.n_states))\n else:\n # Local neighborhood specified by 'locality'\n return list(range(max(0, state_index - self.locality), min(self.n_states, state_index + self.locality + 1)))", "def domain_reduction(csp, queue=None) :\n if (queue==None):\n queue = csp.get_all_variables()\n dequeued = []\n while len(queue)!=0:\n removedVar = queue[0]\n dequeued.append(removedVar)\n queue = queue[1:]\n for constraint in csp.constraints_between(removedVar,None)[:]:\n var2 = constraint.var2\n val2 = csp.get_assigned_value(var2)\n var2Domain = csp.get_domain(var2)[:]\n removedDomain = csp.get_domain(removedVar)[:]\n if len(removedDomain)==0 or len(var2Domain)==0:\n return None\n for domainVal2 in var2Domain:\n anyNonViolators = False\n for domainVal in removedDomain:\n check = constraint.check(domainVal,domainVal2)\n if check==True:\n anyNonViolators = True\n continue\n if anyNonViolators==False:\n csp.eliminate(var2, domainVal2)\n if len(csp.get_domain(var2))==0:\n return None\n if var2 not in queue:\n queue.append(var2)\n return dequeued", "def splittable_variables(self) -> List[int]:\n #print(\"enter bartpy/bartpy/data.py CovariateMatrix splittable_variables\")\n \n for i in range(0, self._n_features):\n if self._splittable_variables[i] is None:\n self._splittable_variables[i] = is_not_constant(self.get_column(i))\n \n output = [i for (i, x) in enumerate(self._splittable_variables) if x is True] \n #print(\"-exit bartpy/bartpy/data.py CovariateMatrix splittable_variables\")\n return output" ]
[ "0.65513057", "0.6346596", "0.62824583", "0.6041415", "0.60353327", "0.6032189", "0.5999635", "0.58770275", "0.5836044", "0.57163435", "0.55799633", "0.5501954", "0.5491494", "0.5456138", "0.54167825", "0.53648853", "0.53506935", "0.5325021", "0.53222", "0.5308232", "0.5291675", "0.52163595", "0.5189531", "0.51892823", "0.51809263", "0.5115579", "0.51025516", "0.50829107", "0.5067318", "0.50511485", "0.50497156", "0.5033584", "0.5027232", "0.50151074", "0.50027776", "0.49990025", "0.49949905", "0.49945635", "0.49886277", "0.4981895", "0.4980163", "0.4972114", "0.49550498", "0.49501425", "0.49276534", "0.48889586", "0.48753893", "0.48705056", "0.48473394", "0.48408848", "0.48347837", "0.4828796", "0.48278832", "0.4820003", "0.47937503", "0.478368", "0.47817013", "0.47813958", "0.47680873", "0.47566307", "0.47384897", "0.47375697", "0.47318566", "0.47303042", "0.47275183", "0.47246468", "0.4716692", "0.47114053", "0.47064003", "0.4698726", "0.46984982", "0.46966332", "0.46955863", "0.46940625", "0.46872368", "0.46871087", "0.46822712", "0.4677409", "0.46741754", "0.46729866", "0.46701533", "0.46643043", "0.46588314", "0.46547586", "0.46507463", "0.46490404", "0.46438628", "0.4642198", "0.46410772", "0.46240172", "0.46235886", "0.46234217", "0.46231133", "0.4619331", "0.4609515", "0.46084115", "0.46017975", "0.45835748", "0.45833984", "0.45829856" ]
0.54332227
14
Return an unassigned variable not already part of `assignment`. Choose the variable with the minimum number of remaining values in its domain. If there is a tie, choose the variable with the highest degree. If there is a tie, any of the tied variables are acceptable return values.
def select_unassigned_variable(self, assignment): # print("Entered select_unassigned_variable Function") # print("Assignment") # print(assignment) variables = set() variables.update(self.domains.keys()) unassigned_variables = set() unassigned_variables.update(variables.difference(assignment.keys())) # print("All Variables") # print(variables) # print("Unassigned Variables") # print(unassigned_variables) # This chooses the variables with the smallest domain from this list (unassigned_variables) var_list = [] for variable in unassigned_variables: var_list.append( (variable, len(self.domains[variable]), len(self.crossword.neighbors(variable)) ) ) var_list.sort(key = self.sort_by_domain) var_list.sort(reverse=True, key = self.sort_by_neighbors) # print("var_list") # print(var_list) return var_list[0][0] # raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_unassigned_variable(self, assignment):\n var_list= []\n #add unassigned variabled to a list along with the number of words left in its domain\n for var in self.domains:\n if var not in assignment:\n var_list.append((var, len(self.domains[var])))\n #sort this list by the number of words left in its domain\n var_list.sort(key= lambda x:x[1])\n\n #list for variables that are tied for least words left in domain\n equal_vars= [list(var_list[0])]\n for i in range(len(var_list)):\n #adds variables with same number of words left in domain\n if var_list[0][1] == var_list[i][1] and var_list[i] != var_list[0]:\n equal_vars.append(list(var_list[i]))\n\n \n #change the encoded information for words left in domain to the number of neighbors the variable had (highest degree)\n for i in range(len(equal_vars)):\n equal_vars[i][1]= len(self.crossword.neighbors(equal_vars[i][0]))\n\n #sort the list by the highest degree\n equal_vars.sort(key= lambda x:x[1])\n \n #return var with highest degree\n return equal_vars[0][0]", "def select_unassigned_variable(csp:list,assignment:set,method=0) -> variable:\n if(method not in range(3)):\n return \"method out of bounds\"\n \n if(method == 0):\n y = rdint(0,len(csp)-1) #rdint is inclusive, hence the -1\n var = csp[y]\n while(var in assignment):\n y = rdint(0,len(csp)-1) #rdint is inclusive, hence the -1\n var = csp[y]\n return var\n \n elif(method == 1):\n #1:minimum-remaining value\n least_domain = math.inf\n low_var = None\n for var in csp:\n if(var not in assignment):\n dm_size = var.domain_size()\n if(dm_size == 0):\n return False\n if(dm_size < least_domain):\n least_domain = dm_size\n low_var = var\n return low_var\n \n elif(method == 2):\n #2:minimum-remaining value together with degree\n #the degree of the node works as a tie breaker, otherwise it works\n #just like minimum remaining value\n least_domain = math.inf\n low_var = None\n for var in csp:\n if(var not in assignment):\n dm_size = var.domain_size()\n if(dm_size == 0):\n return False\n if(dm_size < least_domain):\n least_domain = dm_size\n low_var = var\n elif(dm_size == least_domain and var.constraint_size() > low_var.constraint_size()):\n least_domain = dm_size\n low_var = var\n return low_var", "def select_unassigned_variable(self, assignment):\n # sort crossword variables that are not in assignment by the length of their domain lists\n available = sorted([x for x in self.crossword.variables if x not in assignment], key=lambda x: len(self.domains[x]))\n # sort the list of available variables that have the same size domain as the shortest by the number of neighbors they have\n available = sorted([x for x in available if len(self.domains[x]) == len(self.domains[available[0]])], key=lambda x: len(self.crossword.neighbors(x)))\n # return the last element of the array\n return available.pop()", "def select_unassigned_variable(self, assignment):\n # Simply just pick the next value that has more than one value\n # in the variable list\n for key, value in assignment.iteritems():\n if len(value) > 1:\n return key, value", "def select_unassigned_variable(csp):\n smallest = -1\n largest = 0\n multiple = False\n returned = None\n\n for unass in csp.variables:\n if not unass.is_assigned():\n if len(unass.domain) < smallest or smallest == -1:\n smallest = len(unass.domain)\n multiple = False\n returned = unass\n if len(unass.domain) == smallest:\n multiple = True\n\n if multiple == False:\n return returned\n else:\n for unass in csp.variables:\n if not unass.is_assigned():\n if len(unass.domain) == smallest:\n if len(csp.constraints[unass]) > largest:\n largest = len(csp.constraints[unass])\n returned = unass\n return returned\n\n\n\n\n\n # TODO implement this\n pass", "def first_unassigned_variable(assignment, csp):\r\n # print(first([var for var in csp.variables if var not in assignment]))\r\n\r\n return first([var for var in csp.variables if var not in assignment])", "def _select_unassigned_variable(self, assignment, csp):\n for var in csp.get_variables():\n if not assignment.has_assignment_for(var):\n return var\n\n return None", "def __select_unassigned_variable(self, unassigned_vars):\n mrv = self.__minimum_remaining_values(unassigned_vars)\n copy_unassigned_vars = list(unassigned_vars)\n copy_unassigned_vars.remove(mrv)\n return mrv, copy_unassigned_vars", "def __minimum_remaining_values(self, unassigned_vars):\n min_var = None\n for var in unassigned_vars:\n if min_var is None:\n min_var = var\n elif len(var.domain) < len(min_var.domain):\n min_var = var\n return min_var", "def backtrack(self, assignment):\n #if a solution has been found, returns the solution, this is used for recursive purposes\n if self.assignment_complete(assignment) and self.consistent(assignment):\n return assignment\n #select the most optimal variable/node\n var = self.select_unassigned_variable(assignment)\n #assigns a word left in the domain of var and assigns it to var\n for word in self.order_domain_values(var, assignment):\n assignment[var]= word\\\n #if the assignment is consistent, recursively call backtrack\n if self.consistent(assignment):\n result= self.backtrack(assignment)\n if result != False:\n return assignment\n #if the assignment is not consistent at any point, remove the latest assignment\n assignment.pop(var)\n\n return None", "def backtrack(self, assignment):\n # if the assignment is complete\n if self.assignment_complete(assignment):\n # return the assignment, crossword is complete\n return assignment\n # pick a variable to try to assign\n var = self.select_unassigned_variable(assignment)\n # for each value in the variable's domain\n for value in self.order_domain_values(var, assignment):\n # attempt to assign this value and fit it into the crossword\n # make a copy of the current assignments\n trial = assignment.copy()\n # add the trial value to the test assignment\n trial[var] = value\n # if the test assignment is consistent\n if self.consistent(trial):\n # add the trial assignment to the current list of assignments\n assignment[var] = value\n # take the next backtrack step with this new assign,ent\n result = self.backtrack(assignment)\n # if the backtrack is a success\n if result is not None:\n # we have a match\n return result\n # a backtrack further down failed, so remove the trial assignment\n assignment.pop(var)\n # no assignment was possible, return None\n return None", "def pop_next_unassigned_var(self):\n return self.unassigned_vars.pop(0) if self.unassigned_vars else None", "def get_next_unassigned_var(self):\n while(True):\n randomVal = random.randint(0, self.n - 1)\n if self.state[randomVal] is -1:\n return randomVal", "def get_best(self):\n if len(self._table) == 0:\n self.log.warning(\"table is empty, cannot extract best value\")\n raise ValueError()\n\n max_prob = -np.inf\n max_assignment = None\n for assignment in self._table.keys():\n prob = self._table[assignment]\n if prob > max_prob:\n max_prob = prob\n max_assignment = assignment\n\n # TODO: check refactor > there is no case of max_assignment is None\n return max_assignment if max_assignment is not None else Assignment.create_default(self._head_vars)", "def backtrack(self, assignment):\n # print(\"Entered backtrack Function\")\n # Check if assignment is complete\n if len(assignment) == len(self.domains):\n return assignment\n\n # Try a new variable\n var = self.select_unassigned_variable(assignment)\n word_list = self.order_domain_values(var, assignment)\n \n for word in word_list:\n new_assignment = assignment.copy()\n new_assignment[var] = word[0]\n if self.consistent(new_assignment):\n result = self.backtrack(new_assignment)\n if result is not None:\n return result\n \n return None\n\n # raise NotImplementedError", "def backtrack(self, assignment):\n # As stated above, if all variables in assignment is 1\n # then all values have been set and we return assignment \n if all(len(l) == 1 for l in assignment.values()):\n return assignment\n\n # Pick the next unnassigned variable that we are going to check \n key, values = self.select_unassigned_variable(assignment)\n # Loop through all the allowed values of this square in the sudoku board\n for value in values:\n # Do a deepcopy cuz otherwise R.I.P\n deep = copy.deepcopy(assignment)\n # Checks if this current value is consistent with the rest\n # of the sudoku board \n if self.check_consistency(deep, key, value):\n # IF it is consistent then we set this square to have this value \n deep[key] = [value]\n # Do inference check for hyper optimized code\n if self.inference(deep, self.get_all_arcs()):\n self.counter += 1\n result = self.backtrack(deep)\n if result is not False:\n return result\n else:\n self.fails += 1\n else:\n # Continue looping through the values of the currently selected \n # sudoku-square if the value was inconsistent with the board \n continue\n return False", "def recurse(assignment, states, domains, neighbors, user_dict):\n\t\tif len(unassigned) == 0:\n\t\t\treturn assignment\n\n\t\tvarr[0] = randomchooseanddelete()\n\n\t\tfor val in stardomain(varr[0], curr_domains):\n\t\t\tassignment[varr[0]] = val\n\t\t\tforwardcheck(varr[0], val, assignment, user_dict)\n\t\t\tnextstep = recurse(assignment, states, domains, neighbors, user_dict)\n\t\t\tif nextstep != None:\n\t\t\t\treturn nextstep\n\t\treturn None", "def minimum_remaining_values(csp, ac_3=False):\n assigned = list()\n result = mrv_rec_backtracking(csp, assigned, csp.variables[:]) \n if result is False:\n print \"fuck\"\n return csp, assigned", "def randomchooseanddelete():\n\t\tvar = random.choice(unassigned)\n\t\tunassigned.remove(var)\n\t\treturn var", "def satisfying_assignment(formula):\n if len(formula) == 0:\n return {}\n solution = find_solution(formula)\n if solution != {}:\n return solution\n return None", "def _apply_degree_heuristic(self, vars, assignment, csp):\n\n result = []\n max_degree = -1\n\n for var in vars:\n neighbors = set()\n for constraint in csp.get_constraints(var):\n for neighbor in constraint.get_scope():\n # Collect all not assigned variables with common constraints\n if not assignment.has_assignment_for(neighbor):\n neighbors.add(neighbor)\n\n # Number of collected variables is a degree of the current variable\n degree = len(neighbors)\n if degree >= max_degree:\n if degree > max_degree:\n result = []\n max_degree = degree\n result.append(var)\n\n return result", "def minimum_remaining_values_with_degree(csp, ac_3=False):\n assigned = list()\n result = mrv_rec_backtracking(csp, assigned, csp.variables[:],True) \n if result is False:\n print \"fuck\"\n return csp, assigned", "def satisfying_assignment(formula):\n # convert the formula to a list of sets.\n formula = [set(i) for i in formula]\n\n # call the helper starting with the givne formula and an empty assignments\n # dictionary.\n result = sat_helper(formula, {})\n if result[0]:\n return result[1] # result[1] will be the dictionary of assignments.\n else:\n return None", "def iterate_over_assignment(assignment_funct, max_iterations=12, **args):\n args['knowing_minimum'] = 0\n args['knowing_maximum'] = sys.maxsize\n args['maxtime'] = 16 # in secs\n vars = None\n prob_status = pulp.LpStatusNotSolved\n iterations = 0\n while pulp.LpStatusOptimal != prob_status and pulp.LpStatusInfeasible != prob_status and iterations <= max_iterations:\n prob_status, vars = assignment_funct(**args)\n iterations+=1\n return prob_status, vars", "def select_most_constrained_var(self, iterables):\r\n return self.select_first([var for var in iterables if len(self.csp.domains[var]) == min(len(self.csp.domains[i]) for i in iterables)])", "def get_assigned_value(self, var) :\n return self.assigned_values.get(var, None)", "def min_conflicts(csp, max_steps=100000):\r\n # Generate a complete assignment for all variables (probably with conflicts)\r\n csp.current = current = {}\r\n for var in csp.variables:\r\n val = min_conflicts_value(csp, var, current)\r\n csp.assign(var, val, current)\r\n # Now repeatedly choose a random conflicted variable and change it\r\n for i in range(max_steps):\r\n conflicted = csp.conflicted_vars(current)\r\n if not conflicted:\r\n return current\r\n var = random.choice(conflicted)\r\n val = min_conflicts_value(csp, var, current)\r\n csp.assign(var, val, current)\r\n return None,csp.totchecks", "def set_unassigned_vars_order(self, unassigned_vars_ordered) :\n if (unassigned_vars_ordered is not None\n and not (set(unassigned_vars_ordered) <= set(self.variables))) :\n raise AttributeError(\"unassigned_vars_ordered contains items that \"\n +\"are not variables in this problem\")\n if any([var in self.assigned_values.keys() for var in unassigned_vars_ordered]):\n raise AttributeError(\"unassigned_vars_ordered contains variables \"\n +\"that are already assigned\")\n self.unassigned_vars = unassigned_vars_ordered[:]\n return self", "def _apply_mrv_heuristic(self, csp, assignment):\n result = []\n mrv = PlusInfinity()\n copy_assignment = assignment.copy()\n\n for var in csp.get_variables():\n if not copy_assignment.has_assignment_for(var):\n # Get number of left values for this variable\n num = self._calculate_left_values(var, csp, copy_assignment)\n if num <= mrv:\n if num < mrv:\n result = []\n mrv = num\n result.append(var)\n\n return result", "def select_from_strategy(dist: dict):\n\n val = random()\n for x in dist:\n val -= dist[x]\n if val <= 0:\n return x\n\n # Not properly normalised!\n return None", "def infer_assignment(self):\r\n self.support_pruning()\r\n return {v: self.curr_domains[v][0]\r\n for v in self.variables if 1 == len(self.curr_domains[v])}", "def min_conflicts_value(csp, var, current):\r\n return argmin_random_tie(csp.domains[var], key=lambda val: csp.nconflicts(var, val, current))", "def part_1(rules: Rules) -> int:\n\n happiness, _ = max(generate_arrangements(rules))\n print(f\"part 1: optimal arrangement brings {happiness} happiness\")\n return happiness", "def solve(grid):\n assignment = grid_values(grid)\n assignment = eliminate(assignment)\n return assignment", "def _compute_best_value(self):\n reduced_cs = []\n concerned_vars = set()\n\n for c in self.utilities:\n asgt = filter_assignment_dict(self._neighbors_values, c.dimensions)\n reduced_cs.append(c.slice(asgt))\n concerned_vars.update(c.dimensions)\n var_val, rel_val = find_arg_optimal(\n self.variable,\n lambda x: functools.reduce(operator.add, [f(x) for f in reduced_cs]),\n self._mode,\n )\n # Add the cost for each variable value if any\n for var in concerned_vars:\n if var.name == self.name:\n rel_val += var.cost_for_val(self.current_value)\n else:\n rel_val += var.cost_for_val(self._neighbors_values[var.name])\n\n return var_val, rel_val", "def simplex_choose_entering(tab):\n n = -1\n if rule==\"Random\":\n non_neg = [x for x in tab.get_non_basic() if tab[0,(x-1)]>0]\n if non_neg :\n n = non_neg[randint(0,len(non_neg)-1)]\n elif rule==\"Bland\":\n for x in tab.get_non_basic():\n if tab[0,(x-1)]>0:\n n=x\n break;\n elif rule==\"MaxCoeff\":\n n = np.argmax(tab[0,0:-1])+1\n if tab[0,n-1]<=0:\n n=-1\n elif rule==\"Custom\":\n t = tab[0,0:-1].copy()\n for i in range(len(t)):\n if np.dot(tab[:,i], tab[:,i])!=0:\n t[i] /= np.dot(tab[:,i], tab[:,i])\n else:\n t[i]=-1\n n = np.argmax(t)+1\n if tab[0,n-1]<=0:\n n=-1\n else:\n raise Exception(\"Pivot rule is not valid !\")\n if verboseMode and n!=-1:\n print(\"The entering variable is x_{0}\".format(n))\n return n", "def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True", "def choose_cell_to_assign(self):\r\n min_domain = 10\r\n max_degree = -1\r\n chosen_row = None\r\n chosen_col = None\r\n for row in range(9):\r\n for col in range(9):\r\n if self.puzzle[row][col] == 0:\r\n domain_size = len(self.grid[row][col].domain)\r\n if domain_size < min_domain:\r\n min_domain = domain_size\r\n chosen_row = row\r\n chosen_col = col\r\n elif domain_size == min_domain:\r\n degree = len(self.grid[row][col].neighbors)\r\n if degree > max_degree:\r\n max_degree = degree\r\n chosen_row = row\r\n chosen_col = col\r\n return self.grid[chosen_row][chosen_col]", "def satisfying_assignment(formula):\n return solver(convert_formula(formula))", "def fold_assignment(self):\n return self._parms.get(\"fold_assignment\")", "def _find_element_not_in_set(self, already_used: set) -> int:\n new_element = random.randint(a=self.min_value, b=self.max_value)\n while new_element in already_used:\n new_element = random.randint(a=self.min_value, b=self.max_value)\n return new_element", "def _check_if_satisfiable(self):\n # Search for a satisfying assignment\n all_variables = self.all_variables()\n\n # Try to find some assignment of the constrained vars\n counter = count()\n next_count = next(counter)\n queue = [(0, 0, next_count, {})]\n\n while queue:\n num_attempts, _, _, assignments = hq.heappop(queue)\n num_attempts += 1\n # Full assignment?\n # keep out of loop for empty constraint edge case\n if len(assignments) == len(all_variables):\n return True\n for v in sorted(all_variables - set(assignments.keys())):\n if isinstance(v, DiscreteVariable):\n possible_assignments = self.get_possible_assignments(v)\n else:\n possible_assignments = [v.sample() \\\n for _ in range(10*(1+num_attempts))]\n for assignment in possible_assignments:\n new_assignments = assignments.copy()\n new_assignments[v] = assignment\n # Constraint violated\n if not self.check(new_assignments):\n continue\n # Finish early\n if len(new_assignments) == len(all_variables):\n return True\n next_count = next(counter)\n hq.heappush(queue, (num_attempts, -len(new_assignments),\n -next_count, new_assignments))\n\n if next_count > gc.max_satisfy_tries:\n import ipdb; ipdb.set_trace()\n break\n\n return False", "def sample(self):\n assignments = {}\n for v in self.all_variables():\n for _ in range(gc.max_satisfy_tries):\n assignments[v] = v.sample()\n if self.check(assignments):\n break\n assert len(assignments) == len(self.all_variables())\n return assignments", "def removeInitialAssignment(self, *args):\n return _libsbml.Model_removeInitialAssignment(self, *args)", "def isAssigned(self):\n if self.getProton1Assignments() and self.getProton2Assignments():\n return 1\n else:\n return 0", "def find_free(min_=0):\n while is_occupied(min_):\n min_ += 1\n return min_", "def find_smallest(num_vars):\n for x in range(10):\n if num_vars <= 2**x:\n return x", "def solve_assignments(self):\n # build valuations vector \n c = -1 * self.valuations.flatten()\n\n # empty G and h, no inequality constraints\n G = np.zeros((1, self.n**2))\n h = np.zeros((1))\n\n # build A and b, enforces unique room-agent matching\n A = np.zeros((2 * self.n, self.n**2))\n b = np.ones((2 * self.n))\n for i in range(self.n):\n # exactly one room per agent\n A[i, i * self.n + np.arange(self.n)] = 1.0 \n # exactyly one agent per room\n A[self.n + i, (self.n) * np.arange(self.n) + i] = 1.0\n\n B = set(range(self.n**2))\n status, x = ilp(c=matrix(c, tc='d'), G=matrix(G, tc='d'), \n h=matrix(h, tc='d'), A=matrix(A, tc='d'), \n b=matrix(b, tc='d'), B=B)\n\n # get assignments\n x = np.argmax(np.array(x).reshape(self.n, self.n), axis=1)\n self.assignments = x \n\n return self.assignments", "def get_unique_variable(var_op_name):\n candidates = get_variables(scope=var_op_name)\n if not candidates:\n raise ValueError('Couldn\\'t find variable %s' % var_op_name)\n\n for candidate in candidates:\n if candidate.op.name == var_op_name:\n return candidate\n raise ValueError('Variable %s does not uniquely identify a variable' %\n var_op_name)", "def _compute_best_value(self):\n asgt = self._neighbors_values.copy()\n best_cost, best_val = None, []\n\n for v in self._variable.domain:\n asgt[self.variable.name] = v\n c = self._compute_cost(**asgt)\n if (\n best_cost is None\n or (best_cost > c and self._mode == \"min\")\n or (best_cost < c and self._mode == \"max\")\n ):\n best_cost = c\n best_val = [v]\n elif best_cost == c:\n best_val.append(v)\n\n return best_val, best_cost", "def min_value_prune_et(self, state, l_action, alpha, beta, d, cp):\r\n ended = self.terminal_check(state, d)\r\n\r\n if ended:\r\n # Calculate the utility value based on heuristic function\r\n return self.utility_et(state), l_action\r\n\r\n v = 999\r\n g_action = (5, 5)\r\n actions = []\r\n\r\n # Legal actions for the current player\r\n for action in self.actions(state):\r\n if action[0] == cp:\r\n actions.append(action)\r\n\r\n for action in actions:\r\n rd = self.result(state, action)\r\n vv = self.max_value_prune_et(rd, action, alpha, beta, d + 1, 'o' if cp == 'x' else 'x')\r\n\r\n if v > vv[0]:\r\n v = vv[0]\r\n g_action = action\r\n\r\n # Updating the record of visited state and the corresponding minimax value\r\n self.visited_node.append((copy.deepcopy(rd), vv[0]))\r\n\r\n # Update beta and prune the search\r\n if v <= alpha:\r\n return v, action\r\n beta = min(beta, v)\r\n\r\n return v, g_action", "def backtrack(csp):\n\n if len(csp.assignment) == len(csp.variables):\n return True\n\n variable = select_unassigned_variable(csp)\n value = order_domain_values(csp, variable)\n #print variable\n #print value\n flag = 0\n for x in value:\n csp.variables.begin_transaction()\n if is_consistent(csp, variable, x):\n #print \"past is_consistent\"\n for var in csp.variables:\n if var == variable:\n var.assign(x)\n var.is_assigned()\n solution = backtrack(csp)\n if solution != False:\n return True\n csp.variables.rollback()\n return False", "def pick(self, partitioning):\n r = random.random()\n a = 0.0\n try:\n for part, prob in self.prob[partitioning].items():\n a += prob\n if a >= r:\n return part\n raise JudgedError(\"Probabilities for partitioning '{}' do not sum to 1.0.\".format(partitioning))\n except:\n raise JudgedError(\"Probabilities for partitioning '{}' not set\".format(partitioning))", "def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None", "def get_assignment(self, var):\n return self.variable_to_value.get(var)", "def pick_one(self):\n index = 0\n r = random.random()\n while r >= 0:\n r = r - self.normalised_fitness[index]\n index += 1\n index -= 1\n return self.population[index]", "def remove_assignment(self, var):\n\n del self.variable_to_value[var]", "def variable_ranking(self):\n self.grow_trees()\n dist_classes = self.dist_classes\n oob = self.forest.oob_set_generator()\n oob_length, First, elt_vals, var_vals = len(oob), True, {}, {}\n succ_rate, dist_succ_rate, dist_order = 0, 0, 0\n for var in self.variables:\n var_range = list(variable_range(self.data, var))\n range_len = len(var_range)\n print var\n permution = None\n permuted_succ, perm_dist_succ = 0, 0\n for elts in oob:\n if First:\n actual = self.data[elts][self.prediction_index]\n elt_vals[elts] = actual\n predicted = self.forest.test_predict(self.data[elts], elts)\n if actual in dist_classes:\n dist_order += 1\n if actual == predicted:\n succ_rate += 1\n if actual in dist_classes:\n dist_succ_rate += 1\n if var[1] == 'd':\n permution = int(math.floor(uniform(0, 1)*range_len))\n permution = var_range[permution]\n else:\n permution = uniform(0, 1)*(var_range[1] - var_range[0])\n perm_tuple = self.data[elts][:var[0]] + [permution] + self.data[elts][var[0]+1:]\n permuted_prediction = self.forest.predict(perm_tuple)\n actual = elt_vals[elts]\n if actual == permuted_prediction:\n permuted_succ += 1\n if actual in dist_classes:\n perm_dist_succ += 1\n if First:\n succ_rate = float(succ_rate)/oob_length\n dist_succ_rate = float(dist_succ_rate)/dist_order\n First = False\n permuted_succ = float(permuted_succ)/oob_length\n perm_dist_succ = float(perm_dist_succ)/dist_order\n print \"Originally a \", succ_rate, \" success rate, with permution to \", permuted_succ\n print \"A difference of \", succ_rate - permuted_succ\n print \"WRT Distinguised classes, a success rate of:\", dist_succ_rate, 'with permution to ', perm_dist_succ\n print \"A difference of \", dist_succ_rate - perm_dist_succ\n var_vals[var] = succ_rate - permuted_succ\n var_vals[(var, 'd')] = dist_succ_rate - perm_dist_succ\n var_vals = sorted(var_vals.items(), key=lambda x: x[1], reverse=True)\n for x in var_vals:\n print x[0], x[1]", "def holds(self,assignment):\n return self.condition(*tuple(assignment[v] for v in self.scope))", "def calc_fair_profit(self, assignment):\n fair_profit = {t:0 for t in self.tasks}\n for agent, tasks in assignment.items():\n for task in tasks:\n fair_profit[task] += self.profit(agent, task)\n return min(fair_profit.values())", "def next_nonlex(self):\n r = self.rank_nonlex()\n if r == ifac(self.size) - 1:\n return None\n return Permutation.unrank_nonlex(self.size, r+1)", "def assign_value(Xj, Xk, csp, assignment):\r\n parent_assignment = assignment[Xj]\r\n for val in csp.curr_domains[Xk]:\r\n if csp.constraints(Xj, parent_assignment, Xk, val):\r\n return val\r\n\r\n # No consistent assignment available\r\n return None", "def assignment_complete(self, assignment):\n # print(\"Entered assignment_complete Function\")\n for var in assignment:\n if assignment[var] is None:\n return False\n return self.consistent(assignment)\n\n # raise NotImplementedError", "def consistent(self, assignment):\n # print(\"Entered consistent Function\")\n # print(\"assignment\")\n # print(assignment)\n\n overlaps = self.crossword.overlaps\n value_set = set()\n for variable in assignment: \n #checking overlaps with neighbors\n neighbors = self.crossword.neighbors(variable)\n for neighbor in neighbors:\n overlap = overlaps[(variable, neighbor)]\n if (neighbor in assignment):\n # print(\"var 1 overlap letter\")\n # print(assignment[variable][overlap[0]])\n # print(\"var 2 overlap letter\")\n # print(assignment[neighbor][overlap[1]])\n if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]):\n return False\n \n # print(\"neighbors\")\n # print(neighbors)\n\n #checking that the assignment is the correct length for the variable\n if (variable.length != len(assignment[variable])):\n return False\n\n #the set to check for distinct variables later\n value_set.add(assignment[variable])\n\n #Checking that all variables are distinct\n #these should be the same length unless two or more variables share an value\n if( len(value_set) is not len(assignment)): \n return False\n \n return True\n\n # raise NotImplementedError", "def solve_bruteforce(self):\n max_value = -1\n for z in range(0, self.k):\n max_value = -1\n max_index = -1\n for i, v in enumerate(self.numbers):\n if v > max_value:\n max_index = i\n max_value = v\n del self.numbers[max_index]\n\n return max_value", "def assignment(data, assigns, means, distfn):\n for i in xrange(data.shape[0]):\n bestidx, mindist = None, None\n for idx, mean in enumerate(means):\n dist = distfn(data[i,:], mean)\n if bestidx == None or dist < mindist:\n bestidx = idx\n mindist = dist\n assigns[i] = bestidx\n return assigns", "def get_var(my_vars: dict, name: str):\n desired_var = my_vars.get(name)\n if desired_var is not None:\n return desired_var\n else:\n var_names = 'x, y, alpha, beta, zeta, psi'\n print('No variable with this name, current model accepts only:' + var_names)\n return None", "def backtracking_search(self):\n # Make a so-called \"deep copy\" of the dictionary containing the\n # domains of the CSP variables. The deep copy is required to\n # ensure that any changes made to 'assignment' does not have any\n # side effects elsewhere.\n assignment = copy.deepcopy(self.domains)\n\n # Run AC-3 on all constraints in the CSP, to weed out all of the\n # values that are not arc-consistent to begin with\n self.inference(assignment, self.get_all_arcs())\n # Call backtrack with the partial assignment 'assignment'\n\n return self.backtrack(assignment)", "def max_value_prune_et(self, state, l_action, alpha, beta, d, cp):\r\n ended = self.terminal_check(state, d)\r\n\r\n if ended:\r\n # Calculate the utility value based on heuristic function\r\n return self.utility_et(state), l_action\r\n\r\n v = -999\r\n g_action = (6, 6)\r\n actions = []\r\n\r\n # Legal actions for the current player\r\n for action in self.actions(state):\r\n if action[0] == cp:\r\n actions.append(action)\r\n\r\n for action in actions:\r\n rs = self.result(state, action)\r\n vv = self.min_value_prune_et(rs, action, alpha, beta, d + 1, 'o' if cp == 'x' else 'x')\r\n\r\n if v < vv[0]:\r\n v = vv[0]\r\n g_action = action\r\n\r\n # Updating the record of visited state and the corresponding minimax value\r\n self.visited_node.append((copy.deepcopy(rs), vv[0]))\r\n\r\n # Update alpha and prune the search\r\n if v >= beta:\r\n return v, action\r\n alpha = max(alpha, v)\r\n\r\n return v, g_action", "def pick_rhs_element(self):\n if self.number_preterminal_productions > 0:\n return random.choice(self.nonterminals)\n if random.random() < self.terminalprob:\n return self.pick_terminal()\n else:\n return random.choice(self.nonterminals)", "def nconflicts(self, var, val, assignment):\r\n\r\n # Subclasses may implement this more efficiently\r\n def conflict(var2):\r\n return var2 in assignment and not self.constraints(var, val, var2, assignment[var2])\r\n\r\n return count(conflict(v) for v in self.neighbors[var])", "def __degree(self, var, unassigned_vars):\n return len(self.__unassigned_neighbors(var, unassigned_vars))", "def assign(self, available_workers):\n \n status = self.getStatus()\n\n assert len(available_workers) == 1\n worker = available_workers[0]\n assignment = {}\n\n w_id = str(worker.id)\n task_id = self.task_id\n\n #tracks \n worker_assignments_var = redis_get_worker_assignments_var(task_id, w_id)\n\n print \"WORKER ID:\", w_id\n print \"STATUS:\", status\n print \"ASSIGNMENTS FOR WORKER SO FAR:\", app.redis.smembers(worker_assignments_var)\n\n\n # sort questions by pomdp expected reward...\n # XXX this isn't quite what we want...\n # want to sort by value of getting another label\n # so we don't have all workers getting assigned to the same question\n unfinished_unsorted_qs = [(q,v) for (q,v) in status.iteritems() if v['best_action_str'] == 'create-another-job']\n # NOTE REVERSE ORDER\n sorted_qs = sorted(unfinished_unsorted_qs, key=lambda x:x[1]['best_expected_reward'], reverse=True)\n print \"sorted_qs\", sorted_qs\n# print \"worker %s has done the following questions\" % w_id\n# for (q_id,er) in sorted_qs:\n# if app.redis.sismember(worker_assignments_var, q_id):\n# print \"+\", q_id\n# else:\n# print \"-\", q_id\n\n for idx in range(len(sorted_qs)):\n q_id,expected_reward = sorted_qs[idx]\n\n if not app.redis.sismember(worker_assignments_var, q_id):\n assignment[w_id] = q_id\n print \"assignment=\", assignment\n app.redis.sadd(worker_assignments_var, q_id)\n return assignment\n\n #if here no assignment was made to our worker!\n assert len(assignment) == 0\n print \"no assignment made yet\"\n\n #NOTE POMDP doesn't think there are any questions available to the worker \n #that need another label, but let's give them an assignment anyway\n #Pick question where submitting would have worst expected reward \n # (implying it may benefit from another label)\n finished_qs = [(q,v) for (q,v) in status.iteritems() if v['best_action_str'] != 'create-another-job']\n sorted_finished_qs = sorted(finished_qs, key=lambda x:x[1]['best_expected_reward']) # no reverse\n for idx in range(len(sorted_finished_qs)):\n q_id,expected_reward = sorted_finished_qs[idx]\n\n if not app.redis.sismember(worker_assignments_var, q_id):\n assignment[w_id] = q_id\n print \"gave worker a finished q assignment=\", assignment\n app.redis.sadd(worker_assignments_var, q_id)\n return assignment\n\n return assignment", "def getAssignmentRuleByVariable(self, *args):\n return _libsbml.Model_getAssignmentRuleByVariable(self, *args)", "def _var_sol(self, var: Union[LpVariable, Var]) -> float:\n\n return value(var) if self.optimizer == 'pulp' else var.x", "def fn(x):\n ans = x\n for xx in graph.get(x, []): \n if quiet[fn(xx)] < quiet[ans]: ans = fn(xx)\n return ans", "def search(values):\n global assignments\n\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n\n # Check if this solution is unsolvable\n if values is False:\n return False\n\n # Check if we found a solutio, all boxes have one digit\n if all(len(values[s]) == 1 for s in boxes):\n return values\n # Choose one of the unfilled squares with the fewest possibilities\n min = 10\n minKey = None\n for v in values:\n if 1 < len(values[v]) < min:\n min = len(values[v])\n minKey = v\n\n for digit in values[minKey]:\n new_values = dict(values)\n assignments_bck = assignments.copy()\n new_values = assign_value(new_values, minKey, digit)\n new_values = search(new_values)\n if new_values != False:\n return new_values\n assignments = assignments_bck.copy()\n return False", "def revise(self, assignment, i, j):\n revised = False\n # For all the values in i's variables\n for x in assignment[i]:\n # if there exist NO possible values in the constraints between i and j\n # then remove this value from i\n if not any([(x,y) for y in assignment[j] if (x,y) in self.constraints[i][j]]):\n assignment[i].remove(x)\n revised = True\n return revised", "def assignment(data, assigns, means, distfn):\n for i in xrange(data.shape[0]):\n bestidx, mindist = None, None\n for idx, mean in enumerate(means):\n I = data[i,:,:]\n try:\n dist = distfn(I, mean)\n except:\n traceback.print_exc()\n pdb.set_trace()\n if dist == np.nan:\n print \"Uhoh, nan dist.\"\n pdb.set_trace()\n if bestidx == None or dist < mindist:\n if dist == mindist:\n # To prevent cycles, always tie-break via smallest\n # index.\n bestidx = min(bestidx, idx)\n else:\n bestidx = idx\n mindist = dist\n assigns[i] = bestidx\n return assigns", "def part_one(puzzle: Puzzle) -> typing.Optional[typing.Union[str, int]]:\n puzzle[\"set\"] = set(puzzle.intlines)\n\n for number_one in puzzle[\"set\"]:\n if (2020 - number_one) in puzzle[\"set\"]:\n return (2020 - number_one) * number_one", "def get_most_valuable(self):\n return self.most_valuable", "def eliminate_variable(variable, factors):\r\n containing_var = []\r\n not_containing_var = []\r\n for fac in factors:\r\n if variable in fac.get_variables():\r\n containing_var.append(fac)\r\n else:\r\n not_containing_var.append(fac)\r\n\r\n if not containing_var:\r\n return factors\r\n else:\r\n T = factor_module.multiply_batch(variable, containing_var)\r\n new_factor = factor_module.sum_out(variable, T)\r\n not_containing_var.append(new_factor)\r\n return not_containing_var", "def __de_randomize_LP(self, LP_news_pool, tmp_slots_assignation_probabilities, de_rand_technique):\n result = [0] * self.layout_slots\n tmp_slot_promenances = self.real_slot_promenances.copy()\n feasible_news = [i for i in range(len(LP_news_pool))]\n slot_counter = 0\n allocated_slots = []\n while slot_counter < self.layout_slots:\n if (de_rand_technique == \"rand_1\") or (de_rand_technique == \"rand_3\"):\n # Start from the best slot\n target_slot = np.argmax(tmp_slot_promenances)\n else:\n # Start from slot j with probability proportional to j's slot promenance\n tmp_slot_promenance_norm = list(np.array(tmp_slot_promenances) / sum(tmp_slot_promenances))\n target_slot_promenance = np.random.choice(tmp_slot_promenances, p=tmp_slot_promenance_norm)\n target_slot = tmp_slot_promenances.index(target_slot_promenance)\n\n target_slot_assegnation_probabilities = tmp_slots_assignation_probabilities[int(target_slot)]\n if de_rand_technique == \"rand_3\":\n for p in range(len(tmp_slots_assignation_probabilities)):\n if (p not in allocated_slots) and (p != target_slot):\n target_slot_assegnation_probabilities = \\\n list(np.array(target_slot_assegnation_probabilities) *\n (1 - np.array(tmp_slots_assignation_probabilities[p])))\n allocated_slots.append(target_slot)\n\n # Normalize the vector of the variable assigning to the target slot\n target_slot_assegnation_probabilities_norm = list(np.array(target_slot_assegnation_probabilities) /\n sum(target_slot_assegnation_probabilities))\n # Choose the allocating news with probability proportional to the values of the variables\n selected_news = np.random.choice(feasible_news, p=np.abs(target_slot_assegnation_probabilities_norm))\n # Insert the winner news in the allocation and repeat after removing the variables.\n result[int(target_slot)] = LP_news_pool[selected_news]\n deletion_index = feasible_news.index(selected_news)\n feasible_news.__delitem__(deletion_index)\n for probs in tmp_slots_assignation_probabilities:\n probs.__delitem__(deletion_index)\n tmp_slot_promenances[int(target_slot)] = 0\n slot_counter += 1\n\n return result", "def _best_individual(self):\n return max(self._population, key=attrgetter(\"fitness\"))", "def value(self, atom_assignment):\n return atom_assignment ^ self.negated", "def solve_constraint_propagate_reduced_domains(problem) :\n q = [problem]\n extCount = 0\n while len(q)!=0:\n removed = q[0]\n q = q[1:]\n extCount+=1\n if has_empty_domains(removed) or check_all_constraints(removed)==False:\n continue\n if len(removed.unassigned_vars)==0:\n return (removed.assigned_values,extCount)\n \n var = removed.pop_next_unassigned_var()\n extensions = []\n for val in removed.get_domain(var):\n csp_new = removed.copy()\n csp_new.set_assigned_value(var,val)\n domain_reduction(csp_new,[var])\n extensions.append(csp_new)\n \n q = extensions + q\n return (None,extCount)", "def unused(permutation, nb_elements):\n return tuple(set(range(nb_elements)) - set(permutation))", "def test_auto_assign_one_overflow(self):\n shift1 = RegularWorkshift.objects.create(\n workshift_type=self.wtype1,\n pool=self.p1,\n hours=6,\n )\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([self.profile], unfinished)\n self.assertNotIn(self.profile, shift1.current_assignees.all())\n\n instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)\n self.assertGreater(instances.count(), 0)\n self.assertTrue(all(\n instance.workshifter is None\n for instance in instances\n ))\n\n pool_hours = self.profile.pool_hours.get(pool=self.p1)\n self.assertEqual(\n pool_hours.assigned_hours,\n 0,\n )", "def get(self, lhs, default=frozenset()):\n return self._rules_by_lhs.get(lhs, default)", "def check_assignment_consistency(self, assign_df=None, threshold=0.1):\n \n # If the user hasn't specified an assign_df, use one already calculated \n # for this NAPS_assigner instance\n if assign_df is None:\n set_assign_df = True\n assign_df = self.assign_df\n else:\n set_assign_df = False\n \n # First check if there are any sequential atoms\n carbons = pd.Series([\"C\",\"CA\",\"CB\"])\n carbons_m1 = carbons + \"m1\"\n seq_atoms = carbons[carbons.isin(assign_df.columns) & \n carbons_m1.isin(assign_df.columns)]\n seq_atoms_m1 = seq_atoms+\"m1\"\n #seq_atoms = list(seq_atoms)\n \n if seq_atoms.size==0:\n # You can't do a comparison\n assign_df[\"Max_mismatch_prev\"] = np.NaN\n assign_df[\"Max_mismatch_next\"] = np.NaN\n assign_df[\"Num_good_links_prev\"] = np.NaN\n assign_df[\"Num_good_links_next\"] = np.NaN\n return(assign_df)\n else:\n # First, get the i and i-1 shifts for the preceeding and \n # succeeding residues\n tmp = assign_df.copy()\n tmp = tmp.loc[tmp[\"Dummy_res\"]==False,]\n tmp.index = tmp[\"Res_N\"]\n tmp = tmp[list(seq_atoms)+list(seq_atoms_m1)]\n tmp_next = tmp.copy()\n tmp_next.index -= 1\n tmp_prev = tmp.copy()\n tmp_prev.index += 1\n tmp = tmp.join(tmp_next, rsuffix=\"_next\")\n tmp = tmp.join(tmp_prev, rsuffix=\"_prev\")\n # Calculate mismatch for each atom type\n for atom in seq_atoms:\n tmp[\"d\"+atom+\"_prev\"] = tmp[atom+\"m1\"] - tmp[atom+\"_prev\"]\n tmp[\"d\"+atom+\"_next\"] = tmp[atom] - tmp[atom+\"m1_next\"]\n # Calculate maximum mismatch\n tmp[\"Max_mismatch_prev\"] = tmp[\"d\"+seq_atoms+\"_prev\"].max(axis=1, \n skipna=True)\n tmp[\"Max_mismatch_next\"] = tmp[\"d\"+seq_atoms+\"_next\"].max(axis=1,\n skipna=True)\n \n # Calculate number of consistent matches\n tmp[\"Num_good_links_prev\"] = (tmp[\"d\"+seq_atoms+\"_prev\"]<threshold).sum(axis=1)\n tmp[\"Num_good_links_next\"] = (tmp[\"d\"+seq_atoms+\"_next\"]<threshold).sum(axis=1)\n \n # Join relevant columns back onto assign_df\n tmp[\"Res_N\"] = tmp.index\n assign_df = assign_df.join(tmp.loc[:,[\"Max_mismatch_prev\", \n \"Max_mismatch_next\", \n \"Num_good_links_prev\", \n \"Num_good_links_next\"]], \n on=\"Res_N\")\n if set_assign_df:\n self.assign_df = assign_df\n return(assign_df)", "def strategy(hand, num_die_sides):\n best_move = (0.0, ())\n all_holds = gen_all_holds(hand)\n for hold in all_holds:\n # hand can be less than 5\n num_free_dice = len(hand) - len(hold)\n expected = expected_value(hold, num_die_sides, num_free_dice)\n if expected > best_move[0]:\n best_move = (expected, hold)\n return best_move", "def sat_solve(self):\n # YOUR CODE HERE\n o = frozenset()\n if self.isfalse:\n return False\n elif self.istrue:\n return set()\n l = self.generate_candidate_assignments()\n print(\"assignments,\", l)\n for i in l:\n st = sat_apply_assignment(self, i)\n print(\"i:\", i, \"new set\", st)\n\n if st.istrue:\n return {i}\n elif not st.isfalse:\n sat_solve(st)\n\n return {i}", "def assign(self, starts):\n # Initialize the set of open and closed nodes, and the connection map\n open_set, closed_set = starts, set()\n \n # Initialize a map of assignments and associated profits\n profits = {s:0 for s in starts}\n \n while open_set:\n\n # Explore the most promising node\n current = max(open_set, key=lambda n: profits[n])\n \n # Move the current node from the open set to the closed set\n open_set.remove(current)\n closed_set.add(current)\n \n # Track if assignment is complete\n assignment_finished = True\n \n # Determine all possible next assignment steps\n for agent in self.agents:\n # Determine possible tasks the agent may be assigned to\n poss_tasks = self.assign_agent(agent, current)\n \n # If assignments are possible, the assignment is not complete\n if poss_tasks: assignment_finished = False\n \n for task in poss_tasks:\n # Determine next assignment step\n next_dict = dict(current)\n next_dict[agent] = next_dict[agent] | {task}\n next_assignment = frozendict(next_dict)\n \n # If we have already explored this assignment, continue\n if next_assignment in closed_set:\n continue\n # Else add the assignment to the open set\n else:\n open_set.add(next_assignment)\n profits[next_assignment] = self.calc_profit(next_assignment)\n \n # If assignment is finished, add it to finished assignments\n if assignment_finished:\n \n # Check if assignment is also complete\n if self.complete and not self.is_complete(current):\n continue\n \n self.finished_assignments[current] = profits[current]\n \n # Update current fair / max profit and print if applicable\n # Procedure for fair profit (max profit tiebreaker)\n if self.fair:\n cur_fair_profit = self.calc_fair_profit(current)\n if ((cur_fair_profit > self.fair_profit) or \n (cur_fair_profit == self.fair_profit and\n profits[current] > self.max_profit)):\n self.fair_profit = cur_fair_profit\n self.max_profit = profits[current]\n self.print_assignment(current, profits[current])\n elif (self.verbose and profits[current] >= self.max_profit\n and cur_fair_profit >= self.fair_profit):\n self.print_assignment(current, profits[current])\n # Procedure for maximum profit\n else:\n if profits[current] > self.max_profit:\n self.max_profit = profits[current]\n self.print_assignment(current, profits[current])\n elif self.verbose and profits[current] >= self.max_profit:\n self.print_assignment(current, profits[current])", "def _find_optimal_impl(self, field, depth, is_r, alpha, beta):\n # Try to evaluate the field right now\n if field.is_terminal():\n final_value = self.eval_field(field, depth, is_r)\n return Move(final_value, 0, 0)\n\n self.unrolled += 1\n\n # copy = field.copy()\n value = Move(2 if is_r else -2, 0, 0)\n for move in get_moves(field):\n m = move[0]\n new_value = self._find_optimal_impl(m, depth + 1, not is_r,\n alpha, beta)\n new_value.x = move[1].x\n new_value.y = move[1].y\n if is_r:\n value = cp.copy(min(value, new_value))\n if value <= alpha:\n return value # cut off\n beta = cp.copy(min(beta, value))\n else:\n value = cp.copy(max(value, new_value))\n if value >= beta:\n return value # cut off\n alpha = cp.copy(max(alpha, value))\n return value", "def optimal_min(board):\n if terminal(board):\n return [None, utility(board)]\n\n available_actions = list(actions(board))\n\n # Naive baseline comparison is positive infinity\n global_optimum = [None, math.inf]\n\n for action in available_actions:\n # Anticipates optimal adversarial moves.\n local_optimum = optimal_max(result(board, action))\n\n if global_optimum[1] >= local_optimum[1]:\n global_optimum = [action, local_optimum[1]]\n\n return global_optimum", "def select_best_n(solver, pop, n, minimising=None):\n assert n <= len(pop)\n if minimising is None:\n minimising = solver.alg_params.minimising\n\n key_f = operator.attrgetter('fitness')\n if minimising:\n f = copy.deepcopy(sorted(pop, key=key_f, reverse=False))\n else:\n f = copy.deepcopy(sorted(pop, key=key_f, reverse=True))\n return f[:n]", "def solve_constraint_propagate_reduced_domains(problem) :\n agenda=[problem]\n extension=0\n current_prob=agenda.pop(0)\n extension+=1\n\n #check failure\n if has_empty_domains(current_prob) or (not check_all_constraints(current_prob)):\n return (None, extension)\n\n #check success\n all_assigned=True\n variables = current_prob.get_all_variables()\n for var in variables:\n if current_prob.get_assigned_value(var)==None:\n all_assigned=False\n break\n if all_assigned:\n return (current_prob.assigned_values,extension)\n\n #iteration\n next_un_var=current_prob.pop_next_unassigned_var()\n next_domain=current_prob.get_domain(next_un_var)\n new_probs=[]\n for val in next_domain:\n temp=current_prob.copy()\n new=temp.set_assigned_value(next_un_var,val)\n\n queue=[next_un_var]\n domain_reduction(new,queue)\n\n new_probs.append(new)\n agenda=new_probs+agenda\n while (len(agenda)!=0):\n new_prob = agenda.pop(0)\n result=solve_constraint_propagate_reduced_domains(new_prob)\n extension+=result[1]\n if not result[0] is None:\n return (result[0],extension)\n return (None,extension)", "def assignment(data, assigns, mediods, distfn, distmat):\n for row in xrange(data.shape[0]):\n if row in mediods:\n # Data pt. is a mediod, should be assigned to itself\n assigns[row] = row\n continue\n mindist, bestidx = None, None\n for i, idx in enumerate(mediods):\n dist = distmat[row, idx]\n try:\n foo = dist < mindist\n bar = mindist == None\n baz = foo or bar\n except:\n pdb.set_trace()\n if mindist == None or dist < mindist:\n mindist = dist\n bestidx = idx\n assigns[row] = bestidx\n return assigns", "def getInitialAssignment(self, *args):\n return _libsbml.Model_getInitialAssignment(self, *args)", "def backtracking(csp, ac_3=False):\n assigned = []\n unassigned = csp.variables[:]\n for v in unassigned:\n if v.value is not None:\n unassigned.remove(v)\n \n result = recursive_backtracking(csp, assigned,unassigned)\n if result is False:\n print \"fuck\"\n return csp" ]
[ "0.80119693", "0.7859947", "0.76820076", "0.7355518", "0.71462846", "0.703895", "0.703833", "0.6703648", "0.6678851", "0.65556186", "0.6266142", "0.61606276", "0.6047509", "0.6028325", "0.5978296", "0.5843578", "0.5655437", "0.56006765", "0.55530095", "0.5532206", "0.55234164", "0.5511484", "0.5477733", "0.54232055", "0.5317622", "0.52617127", "0.5255691", "0.5047522", "0.5032681", "0.5003967", "0.49953648", "0.4993014", "0.49727038", "0.49652827", "0.49632525", "0.49420223", "0.48798946", "0.4872813", "0.48494795", "0.48370785", "0.48311684", "0.4792585", "0.478366", "0.47655907", "0.47415435", "0.47373837", "0.47368348", "0.47243544", "0.47124782", "0.47091624", "0.47047997", "0.47044063", "0.47043473", "0.47017017", "0.46865314", "0.46766746", "0.4674907", "0.46614626", "0.46343884", "0.4632232", "0.46256056", "0.4617807", "0.46111956", "0.46068668", "0.4605585", "0.45954517", "0.45937678", "0.45846772", "0.45774826", "0.45760098", "0.45600545", "0.45524612", "0.45390096", "0.45360127", "0.45177838", "0.45096076", "0.4501163", "0.44990572", "0.44806415", "0.4475887", "0.44748837", "0.44736564", "0.44731396", "0.44723696", "0.4469363", "0.44666198", "0.4466332", "0.44657", "0.44639996", "0.44614425", "0.44610947", "0.4458951", "0.44572386", "0.44480515", "0.44463244", "0.4442963", "0.44417462", "0.44414732", "0.4436209", "0.44304758" ]
0.77794665
2
Using Backtracking Search, take as input a partial assignment for the crossword and return a complete assignment if possible to do so. `assignment` is a mapping from variables (keys) to words (values). If no assignment is possible, return None.
def backtrack(self, assignment): # print("Entered backtrack Function") # Check if assignment is complete if len(assignment) == len(self.domains): return assignment # Try a new variable var = self.select_unassigned_variable(assignment) word_list = self.order_domain_values(var, assignment) for word in word_list: new_assignment = assignment.copy() new_assignment[var] = word[0] if self.consistent(new_assignment): result = self.backtrack(new_assignment) if result is not None: return result return None # raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backtrack(self, assignment):\n # if the assignment is complete\n if self.assignment_complete(assignment):\n # return the assignment, crossword is complete\n return assignment\n # pick a variable to try to assign\n var = self.select_unassigned_variable(assignment)\n # for each value in the variable's domain\n for value in self.order_domain_values(var, assignment):\n # attempt to assign this value and fit it into the crossword\n # make a copy of the current assignments\n trial = assignment.copy()\n # add the trial value to the test assignment\n trial[var] = value\n # if the test assignment is consistent\n if self.consistent(trial):\n # add the trial assignment to the current list of assignments\n assignment[var] = value\n # take the next backtrack step with this new assign,ent\n result = self.backtrack(assignment)\n # if the backtrack is a success\n if result is not None:\n # we have a match\n return result\n # a backtrack further down failed, so remove the trial assignment\n assignment.pop(var)\n # no assignment was possible, return None\n return None", "def backtrack(self, assignment):\n #if a solution has been found, returns the solution, this is used for recursive purposes\n if self.assignment_complete(assignment) and self.consistent(assignment):\n return assignment\n #select the most optimal variable/node\n var = self.select_unassigned_variable(assignment)\n #assigns a word left in the domain of var and assigns it to var\n for word in self.order_domain_values(var, assignment):\n assignment[var]= word\\\n #if the assignment is consistent, recursively call backtrack\n if self.consistent(assignment):\n result= self.backtrack(assignment)\n if result != False:\n return assignment\n #if the assignment is not consistent at any point, remove the latest assignment\n assignment.pop(var)\n\n return None", "def backtrack(self, assignment):\n # As stated above, if all variables in assignment is 1\n # then all values have been set and we return assignment \n if all(len(l) == 1 for l in assignment.values()):\n return assignment\n\n # Pick the next unnassigned variable that we are going to check \n key, values = self.select_unassigned_variable(assignment)\n # Loop through all the allowed values of this square in the sudoku board\n for value in values:\n # Do a deepcopy cuz otherwise R.I.P\n deep = copy.deepcopy(assignment)\n # Checks if this current value is consistent with the rest\n # of the sudoku board \n if self.check_consistency(deep, key, value):\n # IF it is consistent then we set this square to have this value \n deep[key] = [value]\n # Do inference check for hyper optimized code\n if self.inference(deep, self.get_all_arcs()):\n self.counter += 1\n result = self.backtrack(deep)\n if result is not False:\n return result\n else:\n self.fails += 1\n else:\n # Continue looping through the values of the currently selected \n # sudoku-square if the value was inconsistent with the board \n continue\n return False", "def select_unassigned_variable(self, assignment):\n var_list= []\n #add unassigned variabled to a list along with the number of words left in its domain\n for var in self.domains:\n if var not in assignment:\n var_list.append((var, len(self.domains[var])))\n #sort this list by the number of words left in its domain\n var_list.sort(key= lambda x:x[1])\n\n #list for variables that are tied for least words left in domain\n equal_vars= [list(var_list[0])]\n for i in range(len(var_list)):\n #adds variables with same number of words left in domain\n if var_list[0][1] == var_list[i][1] and var_list[i] != var_list[0]:\n equal_vars.append(list(var_list[i]))\n\n \n #change the encoded information for words left in domain to the number of neighbors the variable had (highest degree)\n for i in range(len(equal_vars)):\n equal_vars[i][1]= len(self.crossword.neighbors(equal_vars[i][0]))\n\n #sort the list by the highest degree\n equal_vars.sort(key= lambda x:x[1])\n \n #return var with highest degree\n return equal_vars[0][0]", "def backtracking_search(csp):\n if backtrack(csp):\n return csp.assignment\n else:\n return None", "def backtracking_search(csp):\n if backtrack(csp):\n return csp.assignment\n else:\n return None", "def consistent(self, assignment):\n # for each of the current assignments\n for word in assignment:\n # if the word does not fit in the gaps\n if len(assignment[word]) != word.length:\n # reject attempt\n return False\n # if the word is already in the assignment\n if list(assignment.values()).count(assignment[word]) > 1:\n # reject attempt\n return False\n # for each of the overlaps\n for overlap in self.crossword.overlaps:\n # if the overlap isn't empty and is an overlap for the word\n # overlaps are a superset: if the overlap of (x, y) is in the set, so is (y, x), so we can just go by the first overlap element\n if self.crossword.overlaps[overlap] is not None and overlap[0] == word:\n # try to access the word assignment for the other overlap target\n try:\n test_word = assignment[overlap[1]]\n # if it does not exist in the assignment\n except KeyError:\n # continue to the next overlap\n continue\n # if the other overlap target has been assigned\n else:\n # extract the letter we want to match for the overlap\n test_letter = test_word[self.crossword.overlaps[overlap][1]]\n # if the letters do not match\n if assignment[word][self.crossword.overlaps[overlap][0]] != test_letter:\n # reject attempt\n return False\n return True", "def select_unassigned_variable(self, assignment):\n # print(\"Entered select_unassigned_variable Function\")\n # print(\"Assignment\")\n # print(assignment)\n variables = set()\n variables.update(self.domains.keys())\n unassigned_variables = set()\n unassigned_variables.update(variables.difference(assignment.keys()))\n # print(\"All Variables\")\n # print(variables)\n # print(\"Unassigned Variables\")\n # print(unassigned_variables)\n\n # This chooses the variables with the smallest domain from this list (unassigned_variables)\n var_list = []\n for variable in unassigned_variables:\n var_list.append( (variable, len(self.domains[variable]), len(self.crossword.neighbors(variable)) ) )\n \n var_list.sort(key = self.sort_by_domain)\n var_list.sort(reverse=True, key = self.sort_by_neighbors)\n\n # print(\"var_list\")\n # print(var_list) \n \n return var_list[0][0]\n\n # raise NotImplementedError", "def satisfying_assignment(formula):\n if len(formula) == 0:\n return {}\n solution = find_solution(formula)\n if solution != {}:\n return solution\n return None", "def backtracking_search(self):\n # Make a so-called \"deep copy\" of the dictionary containing the\n # domains of the CSP variables. The deep copy is required to\n # ensure that any changes made to 'assignment' does not have any\n # side effects elsewhere.\n assignment = copy.deepcopy(self.domains)\n\n # Run AC-3 on all constraints in the CSP, to weed out all of the\n # values that are not arc-consistent to begin with\n self.inference(assignment, self.get_all_arcs())\n # Call backtrack with the partial assignment 'assignment'\n\n return self.backtrack(assignment)", "def satisfying_assignment(formula):\n # convert the formula to a list of sets.\n formula = [set(i) for i in formula]\n\n # call the helper starting with the givne formula and an empty assignments\n # dictionary.\n result = sat_helper(formula, {})\n if result[0]:\n return result[1] # result[1] will be the dictionary of assignments.\n else:\n return None", "def select_unassigned_variable(self, assignment):\n # sort crossword variables that are not in assignment by the length of their domain lists\n available = sorted([x for x in self.crossword.variables if x not in assignment], key=lambda x: len(self.domains[x]))\n # sort the list of available variables that have the same size domain as the shortest by the number of neighbors they have\n available = sorted([x for x in available if len(self.domains[x]) == len(self.domains[available[0]])], key=lambda x: len(self.crossword.neighbors(x)))\n # return the last element of the array\n return available.pop()", "def select_unassigned_variable(self, assignment):\n # Simply just pick the next value that has more than one value\n # in the variable list\n for key, value in assignment.iteritems():\n if len(value) > 1:\n return key, value", "def fold_assignment(self):\n return self._parms.get(\"fold_assignment\")", "def consistent(self, assignment):\n # print(\"Entered consistent Function\")\n # print(\"assignment\")\n # print(assignment)\n\n overlaps = self.crossword.overlaps\n value_set = set()\n for variable in assignment: \n #checking overlaps with neighbors\n neighbors = self.crossword.neighbors(variable)\n for neighbor in neighbors:\n overlap = overlaps[(variable, neighbor)]\n if (neighbor in assignment):\n # print(\"var 1 overlap letter\")\n # print(assignment[variable][overlap[0]])\n # print(\"var 2 overlap letter\")\n # print(assignment[neighbor][overlap[1]])\n if (assignment[variable][overlap[0]] is not assignment[neighbor][overlap[1]]):\n return False\n \n # print(\"neighbors\")\n # print(neighbors)\n\n #checking that the assignment is the correct length for the variable\n if (variable.length != len(assignment[variable])):\n return False\n\n #the set to check for distinct variables later\n value_set.add(assignment[variable])\n\n #Checking that all variables are distinct\n #these should be the same length unless two or more variables share an value\n if( len(value_set) is not len(assignment)): \n return False\n \n return True\n\n # raise NotImplementedError", "def holds(self,assignment):\n return self.condition(*tuple(assignment[v] for v in self.scope))", "def recurse(assignment, states, domains, neighbors, user_dict):\n\t\tif len(unassigned) == 0:\n\t\t\treturn assignment\n\n\t\tvarr[0] = randomchooseanddelete()\n\n\t\tfor val in stardomain(varr[0], curr_domains):\n\t\t\tassignment[varr[0]] = val\n\t\t\tforwardcheck(varr[0], val, assignment, user_dict)\n\t\t\tnextstep = recurse(assignment, states, domains, neighbors, user_dict)\n\t\t\tif nextstep != None:\n\t\t\t\treturn nextstep\n\t\treturn None", "def satisfying_assignment(formula):\n return solver(convert_formula(formula))", "def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None", "def _select_unassigned_variable(self, assignment, csp):\n for var in csp.get_variables():\n if not assignment.has_assignment_for(var):\n return var\n\n return None", "def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True", "def solve_crossword(vocab, blanks):\n # this value can be freely adjusted\n attempts = len(blanks)**2\n # attempts to solve puzzle with random restart if a \"failure\" occurs\n # this is one way to deal getting stuck at a local maximum or plateau when hill climbing\n for i in range(attempts):\n # print(\"Attempt \" + str(i) + \": \")\n solution = solve_crossword_helper(vocab, blanks)\n if solution:\n return solution\n return None", "def first_unassigned_variable(assignment, csp):\r\n # print(first([var for var in csp.variables if var not in assignment]))\r\n\r\n return first([var for var in csp.variables if var not in assignment])", "def consistent(self, assignment):\n for node1 in assignment:\n for node2 in assignment:\n\n if node1 != node2:\n #returns False if any assignmed words are the same\n if assignment[node1] == assignment[node2]:\n return False\n\n overlap= self.crossword.overlaps[node1,node2]\n if overlap != None:\n #checks if words assigned to node overlaps are the same letter\n if assignment[node1][overlap[0]] != assignment[node2][overlap[1]]:\n return False\n\n return True", "def solve_part_one(self):\n self.initialize_values_and_rules()\n current_bot = None\n ret = None\n while True:\n for k in self.bots:\n if len(self.bots[k]) == 2:\n current_bot = k\n if current_bot is None:\n break\n\n low_type, dest_low, high_type, dest_high = self.rules[current_bot]\n chips = sorted(self.bots[current_bot])\n if chips[0] == 17 and chips[1] == 61:\n ret = current_bot\n\n del self.bots[current_bot]\n current_bot = None\n\n self.assign(low_type, dest_low, chips[0])\n self.assign(high_type, dest_high, chips[1])\n return ret", "def solver(formula):\n # dictionary initializing output solution\n assignments={}\n\n # check and simplify unit clauses\n for clause in formula:\n # if clause is a unit clause\n if len(clause)==1:\n # extract random literal from clause\n var,val=get_from_set(clause)\n # make assignment such that unit clause is true\n assignments[var] = val\n # update rest of the formula with such assignment\n formula = expand(formula,var,val)\n\n # RECURSION BASE CASE 1: found one of possible solutions\n # NOTE: since I eliminate clauses once satisfied, list is \n # empty when all clauses are satisfied. \n if not formula:\n return assignments\n\n # RECURSION BASE CASE 2: impossible due to contradiction\n # NOTE: if any of the clauses is false, then no solution\n if not all(formula):\n return None\n\n # CORE OF RECURSION: recursive simplification of CNF formula\n var, val = get_from_set(formula[0])\n for attempt in (val, not val): # e.g try True, if no success try False \n assignments[var] = attempt\n new_assignments = solver(expand(formula,var,attempt))\n if new_assignments is not None:\n assignments.update(new_assignments)\n return assignments\n\n # if we get to this line, neither attempt yields a solution\n return None", "def __set_has_homework_or_assignment(text=str, replacement_text=str, word_list=list):\n word_set = set()\n tokenized_text = nltk.word_tokenize(text)\n # loop through all the words to see if it contains homework or its synonyms\n for word in tokenized_text:\n word_lem = wordnet.morphy(word, wordnet.NOUN)\n if (word_lem is not None) and (word_lem in word_list):\n word_set.add(word)\n # convert to list and sort based on length\n word_set = list(word_set)\n word_set.sort(key=len, reverse=True)\n # replace those words, if any, with the replacement text\n for word in word_set:\n text = text.replace(word, replacement_text)\n return text", "def check(self,word):\n if self.pre:\n def sub_word(chars):\n if re.match('^'+chars+'.*',word):\n return word[len(chars):]\n else:\n return None\n else:\n def sub_word(chars):\n if re.match('^.*'+chars+'$',word):\n return word[:-len(chars)]\n else:\n return None\n\n if word == '':\n return self\n for chars in self.branches.keys():\n res = sub_word(chars)\n if res:\n return self.branches[chars].check(res)\n elif res == '':\n return self.branches[chars]\n return None", "def select_unassigned_variable(csp:list,assignment:set,method=0) -> variable:\n if(method not in range(3)):\n return \"method out of bounds\"\n \n if(method == 0):\n y = rdint(0,len(csp)-1) #rdint is inclusive, hence the -1\n var = csp[y]\n while(var in assignment):\n y = rdint(0,len(csp)-1) #rdint is inclusive, hence the -1\n var = csp[y]\n return var\n \n elif(method == 1):\n #1:minimum-remaining value\n least_domain = math.inf\n low_var = None\n for var in csp:\n if(var not in assignment):\n dm_size = var.domain_size()\n if(dm_size == 0):\n return False\n if(dm_size < least_domain):\n least_domain = dm_size\n low_var = var\n return low_var\n \n elif(method == 2):\n #2:minimum-remaining value together with degree\n #the degree of the node works as a tie breaker, otherwise it works\n #just like minimum remaining value\n least_domain = math.inf\n low_var = None\n for var in csp:\n if(var not in assignment):\n dm_size = var.domain_size()\n if(dm_size == 0):\n return False\n if(dm_size < least_domain):\n least_domain = dm_size\n low_var = var\n elif(dm_size == least_domain and var.constraint_size() > low_var.constraint_size()):\n least_domain = dm_size\n low_var = var\n return low_var", "def solve(puzzle_input):\r\n return {'a': part_a(puzzle_input), 'b': part_b(puzzle_input)}", "def _solve_position(self, position: _Position, visited: Set[_Position],\n partial_word: str, words: List[str]) -> List[str]:\n # set list of found words to empty\n # set this position to visited\n # add character to partial word\n found_words = []\n visited.add(position)\n char = self.puzzle[position.row][position.col]\n partial_word = '%s%s' % (partial_word, char)\n\n # check partial matches\n words = self._get_partial_words(partial_word=partial_word,\n words=words)\n if partial_word in words:\n # exact match\n found_words.append(partial_word)\n words.remove(partial_word)\n\n # add neighboring characters to the partial word\n if len(words) > 0:\n for row in self._get_row_col_neighbors(position.row):\n for col in self._get_row_col_neighbors(position.col):\n next_position = self._Position(col, row)\n if next_position not in visited:\n found_words.extend(self._solve_position(next_position, visited, partial_word, words))\n\n visited.remove(position)\n\n return found_words", "def solve(grid):\n assignment = grid_values(grid)\n assignment = eliminate(assignment)\n return assignment", "def test_traversal_with_partial_word_returns_rest_of_word(full_trie):\n assert list(full_trie.traversal('h')) == ['e', 'y']", "def candidates(word):\r\n return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])", "def matchWords(checkerWord, stringWord, variables, pos):\n for expression in checkerWord:\n # If `expression` is a variable reference, replace it with the value.\n if expression.variant == TestExpression.Variant.VarRef:\n pattern = re.escape(getVariable(expression.name, variables, pos))\n else:\n pattern = expression.text\n\n # Match the expression's regex pattern against the remainder of the word.\n # Note: re.match will succeed only if matched from the beginning.\n match = re.match(pattern, stringWord)\n if not match:\n return None\n\n # If `expression` was a variable definition, set the variable's value.\n if expression.variant == TestExpression.Variant.VarDef:\n variables = setVariable(expression.name, stringWord[:match.end()], variables, pos)\n\n # Move cursor by deleting the matched characters.\n stringWord = stringWord[match.end():]\n\n # Make sure the entire word matched, i.e. `stringWord` is empty.\n if stringWord:\n return None\n\n return variables", "def iterate_over_assignment(assignment_funct, max_iterations=12, **args):\n args['knowing_minimum'] = 0\n args['knowing_maximum'] = sys.maxsize\n args['maxtime'] = 16 # in secs\n vars = None\n prob_status = pulp.LpStatusNotSolved\n iterations = 0\n while pulp.LpStatusOptimal != prob_status and pulp.LpStatusInfeasible != prob_status and iterations <= max_iterations:\n prob_status, vars = assignment_funct(**args)\n iterations+=1\n return prob_status, vars", "def backtracking(csp, ac_3=False):\n assigned = []\n unassigned = csp.variables[:]\n for v in unassigned:\n if v.value is not None:\n unassigned.remove(v)\n \n result = recursive_backtracking(csp, assigned,unassigned)\n if result is False:\n print \"fuck\"\n return csp", "def build_from_partial(self, partial_assignments):\n assert isinstance(partial_assignments, dict)\n ordered_vals = []\n for v in self.variables:\n if v in partial_assignments:\n ordered_vals.append(partial_assignments[v])\n else:\n ordered_vals.append(v.arbitrary_value) # arbitrary value\n return State(tuple(ordered_vals), self)", "def backtracking(board):\n ##implementing this with reference to MRV \n ##also with forward checking \n if assignmentComplete(board) == True:\n solved_board = board \n return solved_board\n \n\n \n else:\n var, domains = select_MRV(board)\n domain = domains[var]\n \n \n ##now using propogation to check the values \n \n \n ## now implementing forward checking has no legal values \n \n ## we need to go through and check if appplying a particular variable leads to no possible variables for the correct columns, rows and squares\n new_domain = domain\n \n ##go through and select the correct value for the var \n for value in new_domain: \n \n if check_valid_insert(board, var, value) == True:\n board[var] = value \n result = backtracking(board)\n \n if result != \"Failure\":\n return result\n board[var] = 0\n \n return \"Failure\"", "def keep_cross_validation_fold_assignment(self):\n return self._parms.get(\"keep_cross_validation_fold_assignment\")", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def _find_best_fit(self, puzzle):\n\n word = puzzle['answer']\n\n # if first word\n print(len(self.filled_pos))\n if len(self.filled_pos) == 0:\n x = random.randint(0,4)\n y = random.randint(0,4)\n print(\"first_word: {} x:{} y:{}\".format(word, x, y))\n print(\"will_fit: {}\".format(will_fit[ACROSS](x, y, length(word, self.lang))))\n if will_fit[ACROSS](x, y, length(word, self.lang)):\n puzzle['orientation'] = \"across\"\n # puzzle['position'] = t + 1\n puzzle['startx'] = x + 1\n puzzle['starty'] = y + 1\n self._fill_word_in_matrix(word, ACROSS, (x,y))\n return puzzle\n\n # first find the location where it overlaps.. then move to the other ones to keep it interesting\n for key in self.filled_pos:\n #the orientation for this word should be perpendicular to the one we are trying to match\n pos = int(not self.filled_pos[key]['orientation'])\n # find the intersecting letters between the two words\n intersect = find_intersection(key, word, self.lang)\n print(\"trying to intersect filled_word={} with word={}\".format(key, word))\n if len(intersect) == 0:\n # no letters matched.. lets find the next\n continue\n else:\n a = [-10, -10]\n print(\"intersecting letters={}\".format(intersect))\n for letter in intersect:\n indexes1 = find_all_char_pos(key, letter, self.lang)\n for index in indexes1:\n # index = filled_pos[key]['word'].find(letter)\n print(\"location of the letter={} in word={} is {}\".format(letter, key, index))\n filled_word_pos = self.filled_pos[key]['position']\n a[pos] = filled_word_pos[pos] + index\n indexes2 = find_all_char_pos(word, letter, self.lang)\n for index2 in indexes2:\n # index2 = word.find(letter)\n print(\"location of the letter={} in word={} is {}\".format(letter, word, index2))\n a[self.filled_pos[key]['orientation']] = filled_word_pos[int(not pos)] - index2\n print(\"looking for match in location={}\".format(a))\n print(\"will_fit={}\".format(will_fit[pos](a[0], a[1], length(word, self.lang))))\n if will_fit[pos](a[0], a[1], length(word, self.lang)):\n if not self._check_overlap(word, pos, a[0], a[1]):\n self._fill_word_in_matrix(word, pos, (a[0], a[1]))\n calculate_free_rows(self.puzzle_matrix, self.height)\n puzzle['orientation'] = \"down\" if pos else \"across\"\n # puzzle['position'] = t + 1\n puzzle['startx'] = a[0] + 1\n puzzle['starty'] = a[1] + 1\n return puzzle\n # if we are still here then we havent found a place for this word\n # fill it in an empty space\n free_blocks_across = calculate_free_rows(self.puzzle_matrix, self.height)\n print(\"@@@@@@filling a random across free_blocks_across={}\".format(free_blocks_across))\n for key, val in sorted(free_blocks_across.items()):\n print(\"key={} val={}\".format(key, val))\n if key >= length(word, self.lang):\n pos = val.pop(random.randint(0, len(val)-1 ))\n if will_fit[ACROSS](pos[0], pos[1], length(word, self.lang)) and not self._check_overlap(word, ACROSS, pos[0], pos[1]):\n self._fill_word_in_matrix(word, ACROSS, (pos))\n puzzle['orientation'] = \"across\"\n puzzle['startx'] = pos[0] + 1\n puzzle['starty'] = pos[1] + 1\n return puzzle", "def backtracker(board, checker):\n # if element == ' ': # or if element == 0\n # return board # board is solved\n if finished_board(board):\n return board\n element, values = next_empty(checker) # e.g. 'A1'\n for i in values: # for all valid assignments in dict\n if not num_present(element, i, board): # returns FALSE if i is valid\n board[element] = i # fill in the blank\n if clear_board(element, i, checker): # clear the potential values\n print(\"in inner if\")\n new_check = copy.deepcopy(checker) # copy dictionary\n new_check.pop(element) # clear from dict of unassigned values\n new_board = copy.deepcopy(board)\n backtracking(new_board, new_check) # begin recursion\n board[element] = 0 # go on with current board\n\n if finished_board(board):\n return board", "def assign_value(Xj, Xk, csp, assignment):\r\n parent_assignment = assignment[Xj]\r\n for val in csp.curr_domains[Xk]:\r\n if csp.constraints(Xj, parent_assignment, Xk, val):\r\n return val\r\n\r\n # No consistent assignment available\r\n return None", "def backtrack(csp):\n\n if len(csp.assignment) == len(csp.variables):\n return True\n\n variable = select_unassigned_variable(csp)\n value = order_domain_values(csp, variable)\n #print variable\n #print value\n flag = 0\n for x in value:\n csp.variables.begin_transaction()\n if is_consistent(csp, variable, x):\n #print \"past is_consistent\"\n for var in csp.variables:\n if var == variable:\n var.assign(x)\n var.is_assigned()\n solution = backtrack(csp)\n if solution != False:\n return True\n csp.variables.rollback()\n return False", "def forwardcheck(var, val, assignment, user_dict):\n\t\tif curr_domains:\n\t\t\tfor (meal, restaurant) in curr_deleted[var]:\n\t\t\t\tcurr_domains[meal].append(restaurant)\n\t\t\tcurr_deleted[var] = []\n\n\t\t\tfor meal in neighbors[var]:\n\t\t\t\tif meal not in assignment:\n\t\t\t\t\tfor restaurant in curr_domains[meal][:]:\n\t\t\t\t\t\tnum_cats = count_categories(assignment.values())\n\t\t\t\t\t\tif not constraints_match(num_cats, user_dict):\n\t\t\t\t\t\t#if not user_solution_checker(user_dict, meal, restaurant, assignment):\n\t\t\t\t\t\t\tcurr_domains[meal].remove(restaurant)\n\t\t\t\t\t\t\tcurr_deleted[var].append((meal, restaurant))", "def correction(word):\r\n return max(candidates(word), key=P)", "def find_lhs(line):\n ind = line.find('=')\n ind2 = line.find('(')\n if ind == -1:\n return None\n elif ind2 > -1:\n #there is both an equal sign and a (\n if ind < ind2:\n #the equal sign is first and there is an lhs\n #out = myfunc(b=5)#<-- the lhs here is \"out\"\n return line[0:ind]\n else:\n #the ( is first as in\n #myfunc(1, b=2)#<-- note that there is no assignment here\n return None\n else:\n #there is an equal sign, but no (\n return line[0:ind]", "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def assign_to_words_(wordflann, words, idx2_vec, idx_name='idx', dense=True,\n nAssign=1, massign_alpha=1.2, massign_sigma=80):\n idx2_vec_values = pdh.ensure_values(idx2_vec)\n # Assign each vector to the nearest visual words\n _idx2_wx, _idx2_wdist = wordflann.nn_index(idx2_vec_values, nAssign)\n if nAssign > 1:\n # MultiAssignment Filtering from Improving Bag of Features\n # http://lear.inrialpes.fr/pubs/2010/JDS10a/jegou_improvingbof_preprint.pdf\n thresh = np.multiply(massign_alpha, _idx2_wdist.T[0:1].T)\n invalid = np.greater_equal(_idx2_wdist, thresh)\n # Weighting as in Lost in Quantization\n gauss_numer = -_idx2_wdist.astype(np.float64)\n gauss_denom = 2 * (massign_sigma ** 2)\n gauss_exp = np.divide(gauss_numer, gauss_denom)\n unnorm_maw = np.exp(gauss_exp)\n # Mask invalid multiassignment weights\n masked_unorm_maw = np.ma.masked_array(unnorm_maw, mask=invalid)\n # Normalize multiassignment weights from 0 to 1\n masked_norm = masked_unorm_maw.sum(axis=1)[:, np.newaxis]\n masked_maw = np.divide(masked_unorm_maw, masked_norm)\n masked_wxs = np.ma.masked_array(_idx2_wx, mask=invalid)\n # Remove masked weights and word indexes\n idx2_wxs = list(map(utool.filter_Nones, masked_wxs.tolist()))\n idx2_maws = list(map(utool.filter_Nones, masked_maw.tolist()))\n else:\n idx2_wxs = _idx2_wx.tolist()\n idx2_maws = [1.0] * len(idx2_wxs)\n\n # Invert mapping -- Group by word indexes\n jagged_idxs = ([idx] * len(wxs) for idx, wxs in enumerate(idx2_wxs))\n wx_keys, groupxs = clustertool.jagged_group(idx2_wxs)\n idxs_list = clustertool.apply_jagged_grouping(jagged_idxs, groupxs)\n maws_list = clustertool.apply_jagged_grouping(idx2_maws, groupxs)\n wx2_idxs = dict(zip(wx_keys, idxs_list))\n wx2_maws = dict(zip(wx_keys, maws_list))\n\n if WITH_PANDAS:\n idx_series = pdh.ensure_index(idx2_vec)\n wx_series = pdh.ensure_index(words)\n wx2_idxs = pdh.pandasify_dict1d(\n wx2_idxs, wx_series, idx_name, ('wx2_' + idx_name + 's'), dense=dense)\n idx2_wxs = pdh.IntSeries(idx2_wxs, index=idx_series, name='wx')\n\n return wx2_idxs, wx2_maws, idx2_wxs", "def search_hash(word_input,hashtable):\n\n if word_input in hashtable:\n return hashtable[word_input]\n else:\n return None", "def match_rule(name, lhs, rhs, wm):\n print(\" ------------ Matching Rule '\", name, \"' --------------\")\n print(\" lhs = \", lhs)\n print(\" rhs = \", rhs)\n print(\" wm = \", wm)\n print()\n def mr_helper(queue, new_wm):\n # Each state in queue is\n # (anteceds-left, subs)\n # print(\" ----- matching rule helper ------\")\n # print(\" queue = \", queue)\n # print(\" new_wm = \", new_wm)\n # print()\n if queue == []: # if the queue is empty, return new_wm\n return new_wm\n else: # else examine the first item in the queue (call it state1)\n state1 = queue[0]\n if state1[0] == []: # If state1 has no antecedents, state1 is a goal state (the rule is matched);\n # call \"execute\" on rhs using the substitution in state1\n derived = execute(state1[1], rhs, new_wm)\n # But don't stop here (this is exhaustive):\n # return mr_helper applied to the rest of the queue, appending\n # whatever new WM assertions \"execute\" returned.\n new_wm = update_wm(new_wm, derived)\n return mr_helper(queue[1:], new_wm)\n elif state1[0] != []: # Else if state1 has antecedents, apply \"match_antecedent\" to them along with wm and the substitutions in state1.\n matched = match_antecedent(state1[0], wm, state1[1])\n if matched == []: # If \"match_antecedent\" returns no new states, return mr_helper on rest of the queue without changing states.\n return mr_helper(queue[1:], new_wm)\n else:\n # Else return mr_helper on the updated queue,\n # i.e., the old one with the new states found\n # by \"match_antecedent\" replacing state1\n queue = matched + queue[1:]\n return mr_helper(queue, new_wm)\n return mr_helper(match_antecedent(lhs, wm ,[]), [])", "def crosswordPuzzle(crossword, words):\n words = words.split(';')\n n_rows = len(crossword)\n lines = []\n columns = []\n row = 0\n twisted = twistgrid(crossword)\n while row < n_rows:\n lines.append(list(re.finditer(r'(-){2,}', crossword[row])))\n columns.append(list(re.finditer(r'(-){2,}', twisted[row])))\n row += 1\n row_words = []\n col_words = []\n blank_lengths = []\n for irow, matches in enumerate(lines):\n row_words.extend([((irow, x.span()[0]),\n (irow, x.span()[1] - 1)) for x in matches])\n blank_lengths.extend([x.span()[1] - x.span()[0] for x in matches])\n\n for icol, matches in enumerate(columns):\n col_words.extend([((x.span()[0], icol),\n (x.span()[1] - 1, icol)) for x in matches])\n blank_lengths.extend([x.span()[1] - x.span()[0] for x in matches])\n\n intersections = {'row_words': [], 'col_words': []}\n n_intersections = 0\n\n for i, rword in enumerate(row_words):\n for j, cword in enumerate(col_words):\n if rword[0][0] >= cword[0][0] and rword[0][0] <= cword[1][0] and\\\n cword[0][1] >= rword[0][1] and cword[0][1] <= rword[1][1]:\n intersections['row_words'].append((i,\n cword[0][1] - rword[0][1]))\n intersections['col_words'].append((j,\n rword[0][0] - cword[0][0]))\n n_intersections += 1\n\n guesses = list(permutations(words))\n right_length = []\n for i, guess in enumerate(guesses):\n if all((len(guess[k]) == blank_lengths[k] for k in range(len(guess)))):\n right_length.append(guess)\n\n for k, guess in enumerate(right_length):\n row_intersections = []\n col_intersections = []\n i = 0\n while col_intersections == row_intersections and i < n_intersections:\n word = guess[intersections['row_words'][i][0]]\n letter_ind = intersections['row_words'][i][1]\n row_intersections.append(word[letter_ind])\n\n # now find letters of columns that are intersections\n word = guess[intersections['col_words'][i][0] + len(row_words)]\n # need offset because the first words in guess fall into rows\n letter_ind = intersections['col_words'][i][1]\n col_intersections.append(word[letter_ind])\n i += 1\n if col_intersections == row_intersections:\n print(f'Intersections match for guess {guess}')\n break # don't keep changing guess even after you found fit\n print(f'Found correct guess on search {k+1} out of {len(guesses)} choices')\n\n out = [list(row) for row in crossword]\n for i, word in enumerate(row_words):\n out[word[0][0]][word[0][1]:word[1][1] + 1] = list(guess[i])\n\n out = [''.join(row) for row in out]\n out = twistgrid(out)\n\n out = [list(row) for row in out]\n for i, word in enumerate(col_words):\n out[word[0][1]][word[0][0]:word[1][0] + 1] = list(guess[i + len(row_words)])\n out = [''.join(row) for row in out]\n out = twistgrid(out)\n out = [''.join(row) for row in out]\n print('\\n'.join(out))\n\n return out", "def input_assignment(in_dict):\n\n # define initialization & assignment strings\n init_str= \"\"\n assign_str= \"\"\n\n # loop through elements\n for key,value in in_dict.items():\n # Check if type is a boolean\n if isinstance(in_dict[key][0], str):\n # Initialization\n init_str= init_str + \"init({0})\".format(key) + \":= {TRUE, FALSE};\\n\"\n\n # Assignment\n assign_str= assign_str + \\\n 'next({0}):= case\\n'.format(key) + \\\n ' stab: {TRUE, FALSE};\\n' +\\\n ' TRUE: {0};\\n'.format(key) + \\\n 'esac;\\n'\n \n # if type is not a boolean\n else:\n\n # Initialization\n init_val= in_dict[key][0][1]\n # Check if initial value is a string and is not n enum type\n if (isinstance(init_val, str) and not (\"{\" in init_val)):\n init_val= '\"' + init_val + '\"'\n\n init_str= init_str + \"init({0})\".format(key) + \":= {0};\\n\".format(init_val)\n\n # Assignment\n assign_str= assign_str + \\\n 'next({0}):= case\\n'.format(key) + \\\n ' stab: {0};\\n'.format(in_dict[key][0][2]) +\\\n ' TRUE: {0};\\n'.format(key) + \\\n 'esac;\\n'\n \n # return\n out_str= init_str + assign_str\n \n return out_str", "def assignment_complete(self, assignment):\n # print(\"Entered assignment_complete Function\")\n for var in assignment:\n if assignment[var] is None:\n return False\n return self.consistent(assignment)\n\n # raise NotImplementedError", "def _check_all_valence_terms_assigned(\n handler,\n assigned_terms,\n topology,\n valence_terms,\n):\n if len(assigned_terms) == len(valence_terms):\n return\n\n # Convert the valence term to a valence dictionary to make sure\n # the order of atom indices doesn't matter for comparison.\n valence_terms_dict = assigned_terms.__class__()\n for atoms in valence_terms:\n atom_indices = (topology.atom_index(a) for a in atoms)\n valence_terms_dict[atom_indices] = atoms\n\n # Check that both valence dictionaries have the same keys (i.e. terms).\n assigned_terms_set = set(assigned_terms.keys())\n valence_terms_set = set(valence_terms_dict.keys())\n unassigned_terms = valence_terms_set.difference(assigned_terms_set)\n not_found_terms = assigned_terms_set.difference(valence_terms_set)\n\n # Raise an error if there are unassigned terms.\n err_msg = \"\"\n\n if len(unassigned_terms) > 0:\n unassigned_atom_tuples = []\n\n unassigned_str = \"\"\n for unassigned_tuple in unassigned_terms:\n unassigned_str += \"\\n- Topology indices \" + str(unassigned_tuple)\n unassigned_str += \": names and elements \"\n\n unassigned_atoms = []\n\n # Pull and add additional helpful info on missing terms\n for atom_idx in unassigned_tuple:\n atom = topology.atom(atom_idx)\n unassigned_atoms.append(atom)\n unassigned_str += f\"({atom.name} {atom.symbol}), \"\n unassigned_atom_tuples.append(tuple(unassigned_atoms))\n err_msg += (\n \"{parameter_handler} was not able to find parameters for the following valence terms:\\n\"\n \"{unassigned_str}\"\n ).format(\n parameter_handler=handler.__class__.__name__,\n unassigned_str=unassigned_str,\n )\n if len(not_found_terms) > 0:\n if err_msg != \"\":\n err_msg += \"\\n\"\n not_found_str = \"\\n- \".join([str(x) for x in not_found_terms])\n err_msg += (\n \"{parameter_handler} assigned terms that were not found in the topology:\\n\"\n \"- {not_found_str}\"\n ).format(\n parameter_handler=handler.__class__.__name__,\n not_found_str=not_found_str,\n )\n if err_msg:\n err_msg += \"\\n\"\n\n if isinstance(handler, BondHandler):\n exception_class = UnassignedBondError\n elif isinstance(handler, AngleHandler):\n exception_class = UnassignedAngleError\n elif isinstance(handler, (ProperTorsionHandler, ImproperTorsionHandler)):\n exception_class = UnassignedTorsionError\n else:\n raise RuntimeError(\n f\"Could not find an exception class for handler {handler}\",\n )\n\n exception = exception_class(err_msg)\n exception.unassigned_topology_atom_tuples = unassigned_atom_tuples\n exception.handler_class = handler.__class__\n raise exception", "def _extract_solution(self, manager: RoutingIndexManager, routing: RoutingModel, assignment: Assignment, indices_to_visit: List[int]) -> Dict[str, Any]:\n sln = {\"objective\": assignment.ObjectiveValue()}\n \n stop_indices = []\n index = routing.Start(0)\n while not routing.IsEnd(index):\n relative_index = manager.IndexToNode(index)\n stop_indices.append(indices_to_visit[relative_index])\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n relative_index = manager.IndexToNode(index)\n stop_indices.append(indices_to_visit[relative_index])\n sln[\"order\"] = stop_indices\n return sln", "def comp_choose_word(hand, word_list):\n perms_list = []\n for i in range(1, HAND_SIZE+1):\n perms_list.extend(get_perms(hand, i))\n perms_valid = []\n for j in range(0, len(perms_list)):\n word = perms_list[j]\n if is_valid_word(word, hand, word_list):\n perms_valid.append(word)\n\n \n\n\n\n\n\n valid_scores = []\n for k in range(0, len(perms_valid)):\n valid_scores.append(get_word_score(perms_valid[k], HAND_SIZE))\n valid_words = perms_valid[k]\n\n # Compare first two scores and put highest score and corresponding word in seprate variables\n # respectively.\n max_score = 0\n max_word = ''\n for j in range(0, len(valid_scores)):\n if max_score < valid_scores[j]:\n max_score = valid_scores[j]\n max_word = perms_valid[j]\n return max_word", "def search(values):\n global assignments\n\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n\n # Check if this solution is unsolvable\n if values is False:\n return False\n\n # Check if we found a solutio, all boxes have one digit\n if all(len(values[s]) == 1 for s in boxes):\n return values\n # Choose one of the unfilled squares with the fewest possibilities\n min = 10\n minKey = None\n for v in values:\n if 1 < len(values[v]) < min:\n min = len(values[v])\n minKey = v\n\n for digit in values[minKey]:\n new_values = dict(values)\n assignments_bck = assignments.copy()\n new_values = assign_value(new_values, minKey, digit)\n new_values = search(new_values)\n if new_values != False:\n return new_values\n assignments = assignments_bck.copy()\n return False", "def _uc_to_assignment(self, uc):\r\n # get map of query id to all assignments\r\n results = self._uc_to_assignments(uc)\r\n # for each query id, compute the consensus taxonomy assignment\r\n for query_id, all_assignments in results.items():\r\n results[query_id] = self._get_consensus_assignment(all_assignments)\r\n return results", "def sat_solve(self):\n # YOUR CODE HERE\n o = frozenset()\n if self.isfalse:\n return False\n elif self.istrue:\n return set()\n l = self.generate_candidate_assignments()\n print(\"assignments,\", l)\n for i in l:\n st = sat_apply_assignment(self, i)\n print(\"i:\", i, \"new set\", st)\n\n if st.istrue:\n return {i}\n elif not st.isfalse:\n sat_solve(st)\n\n return {i}", "def lookup(match):\n word = match.group(0)\n return symtab[unbase(word)] or word", "def check_matches(known, match): \n #array containing tuples of corrected spellings\n out_vals = []\n\n #clear screen\n os.system('clear')\n \n for k,v in known.iteritems():\n matched_vals = [ik for ik, iv in match.iteritems() if iv == v]\n cur_matches = process.extract(k, matched_vals, limit = MATCH_LIMIT)\n \n #append a none option and option for actual val\n cur_matches.insert(0,(\"NONE\",0))\n cur_matches.append((k,100))\n\n\n #if there is a match of 100, skip\n if cur_matches[1][1] == 86 or cur_matches[1][1] < 50:\n #special case where probably means there really isn't a match\n choice = 'NONE'\n print 'No match!'\n\n elif cur_matches[1][1] < 95:\n #print out options for each word\n print_options(k, cur_matches)\n\n #cycle through prompt for input if valid (numeric, within MATCH_LIMIT)\n choice = None\n while choice == None:\n try:\n choice = cur_matches[get_choice()][0]\n except:\n print '!Invalid match! Try again'\n print_options(k, cur_matches)\n \n print choice\n\n else:\n #exact match\n choice = cur_matches[1][0]\n print '%s - Match!'%choice\n\n print \n print\n #create a new tuple with correct and incorrect spelling\n out_vals.append((k, choice))\n output(out_vals)\n\n #we're done, return vals array\n return out_vals", "def match_words_to_search(chunks, searchresult, compare_func, join=True):\n wordlist = [hebstrip(w)[1] for w in word_bound.split(searchresult)]\n wordset = set(wordlist)\n genlist = [\n m\n for m in [\n match_one(rlist, wordset)\n for rlist in chunks.linked_heb\n if rlist.data\n ]\n if m\n ]\n ours = [i[0] for i in genlist]\n theirs = [i[1] for i in genlist]\n if join:\n return compare_func(\" \".join(ours), \" \".join(wordlist)), theirs\n else:\n return compare_func(ours, wordlist), theirs", "def get_max_score(self,word_id, assigned_words):\r\n def find_max(possible_word_dict,word_id2):\r\n max_score = 0\r\n new_word_to_assign = '*' * self.words[word_id2].length\r\n for possible_word in possible_word_dict.keys():\r\n score = 0\r\n for element in assigned_words.keys():\r\n if element != word_id2:\r\n if element in self.satisfiers[word_id2][possible_word].keys():\r\n if assigned_words[element] in self.satisfiers[word_id2][possible_word][element]:\r\n score += 1\r\n if score >= max_score:\r\n max_score = score\r\n new_word_to_assign = possible_word\r\n return (new_word_to_assign, max_score), max_score\r\n if word_id[1] == 'A':\r\n words = self.across\r\n elif word_id[1] == 'D':\r\n words = self.down\r\n total_score = 0\r\n new_assigned_words = {}\r\n for word_id2 in words.keys():\r\n if word_id2 != word_id:\r\n max_w, max_s = find_max(self.satisfiers[word_id2],word_id2)\r\n total_score += max_s\r\n new_assigned_words[word_id2] = max_w\r\n return new_assigned_words, total_score", "def select_unassigned_variable(csp):\n smallest = -1\n largest = 0\n multiple = False\n returned = None\n\n for unass in csp.variables:\n if not unass.is_assigned():\n if len(unass.domain) < smallest or smallest == -1:\n smallest = len(unass.domain)\n multiple = False\n returned = unass\n if len(unass.domain) == smallest:\n multiple = True\n\n if multiple == False:\n return returned\n else:\n for unass in csp.variables:\n if not unass.is_assigned():\n if len(unass.domain) == smallest:\n if len(csp.constraints[unass]) > largest:\n largest = len(csp.constraints[unass])\n returned = unass\n return returned\n\n\n\n\n\n # TODO implement this\n pass", "def get_matching(variables, strict=True, single=True, **criteria):\n matching = []\n for var in variables:\n for crit_name, crit_info in criteria.items():\n if getattr(var, crit_name) == crit_info:\n continue\n else:\n break\n else:\n matching.append(var)\n\n if not matching and strict:\n raise RuntimeError(\"No matching variables were found.\")\n if single:\n if len(matching) > 1:\n raise RuntimeError(\n f\"Expected to find 1 matching variable. Found '{matching}'.\"\n )\n if not matching:\n return ()\n return matching[0]\n return tuple(matching)", "def infer_assignment(self):\r\n self.support_pruning()\r\n return {v: self.curr_domains[v][0]\r\n for v in self.variables if 1 == len(self.curr_domains[v])}", "def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))", "def npa_constraints(\n assemblage: dict[tuple[int, int], cvxpy.Variable], k: int | str = 1, referee_dim: int = 1\n) -> list[cvxpy.constraints.constraint.Constraint]:\n a_out, a_in, b_out, b_in = _get_nonlocal_game_params(assemblage, referee_dim)\n\n words = _gen_words(k, a_out, a_in, b_out, b_in)\n dim = len(words)\n\n r_var = cvxpy.Variable((referee_dim * dim, referee_dim * dim), PSD=True, name=\"R\")\n # Normalization.\n norm = sum(r_var[i * dim, i * dim] for i in range(referee_dim))\n constraints = [norm == 1]\n\n seen = {}\n for i in range(dim):\n for j in range(i, dim):\n w_i, w_j = words[i], words[j]\n w_i = tuple(reversed(w_i))\n word = _reduce(w_i + w_j)\n\n sub_mat = r_var[i::dim, j::dim]\n # if i = 0 we would consider (ε, ε) as an empty word.\n if i != 0 and _is_zero(word):\n constraints.append(sub_mat == 0)\n\n elif _is_meas(word):\n s_a, s_b = word\n constraints.append(\n sub_mat\n == assemblage[s_a.question, s_b.question][\n s_a.answer * referee_dim : (s_a.answer + 1) * referee_dim,\n s_b.answer * referee_dim : (s_b.answer + 1) * referee_dim,\n ]\n )\n\n elif _is_meas_on_one_player(word):\n symbol = word[0]\n if symbol.player == \"Alice\":\n sum_all_bob_meas = sum(\n assemblage[symbol.question, 0][\n symbol.answer * referee_dim : (symbol.answer + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for b_ans in range(b_out)\n )\n\n constraints.append(sub_mat == sum_all_bob_meas)\n\n if symbol.player == \"Bob\":\n sum_all_alice_meas = sum(\n assemblage[0, symbol.question][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n symbol.answer * referee_dim : (symbol.answer + 1) * referee_dim,\n ]\n for a_ans in range(a_out)\n )\n\n constraints.append(sub_mat == sum_all_alice_meas)\n\n elif word in seen:\n old_i, old_j = seen[word]\n old_sub_mat = r_var[old_i::dim, old_j::dim]\n constraints.append(sub_mat == old_sub_mat)\n\n else:\n seen[word] = (i, j)\n\n # now we impose constraints to the assemblage operator\n for x_alice_in in range(a_in):\n for y_bob_in in range(b_in):\n sum_all_meas_and_trace = 0\n for a_ans in range(a_out):\n for b_ans in range(b_out):\n sum_all_meas_and_trace += sum(\n assemblage[x_alice_in, y_bob_in][\n i + a_ans * referee_dim, i + b_ans * referee_dim\n ]\n for i in range(referee_dim)\n )\n\n # r x r sub - block is PSD since it's an unnormalized quantum state.\n constraints.append(\n assemblage[x_alice_in, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n >> 0\n )\n\n constraints.append(sum_all_meas_and_trace == 1)\n\n # Bob marginal consistency\n for y_bob_in in range(b_in):\n for b_ans in range(b_out):\n sum_first_question = sum(\n assemblage[0, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for a_ans in range(a_out)\n )\n\n for x_alice_in in range(1, a_in):\n sum_cur_question = sum(\n assemblage[x_alice_in, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for a_ans in range(a_out)\n )\n\n constraints.append(sum_first_question == sum_cur_question)\n\n # Alice marginal consistency\n for x_alice_in in range(a_in):\n for a_ans in range(a_out):\n sum_first_question = sum(\n assemblage[x_alice_in, 0][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for b_ans in range(b_out)\n )\n\n for y_bob_in in range(1, b_in):\n sum_cur_question = sum(\n assemblage[x_alice_in, y_bob_in][\n a_ans * referee_dim : (a_ans + 1) * referee_dim,\n b_ans * referee_dim : (b_ans + 1) * referee_dim,\n ]\n for b_ans in range(b_out)\n )\n\n constraints.append(sum_first_question == sum_cur_question)\n\n return constraints", "def check_assignment(assignments: dict, point: Point, value: str) -> bool:\n\n # check base condition: do the constraints hold for current point\n if not check_constraint_satisfied(assignments, point, value):\n print(' → base constraint failed:', point, '=', value)\n return False\n\n # check neighbouring conditions: do the constraints (still) hold for other points\n temp_assignment = copy.deepcopy(assignments)\n temp_assignment[point] = value\n\n # loop through points that can attack the current point, as kings\n print(' > checking neighbouring kings')\n for pt in filter(lambda p: p in assignments and assignments[p] == 'king', attack_points_king[point]):\n if not check_constraint_satisfied(temp_assignment, pt, assignments[pt]):\n print(' → neighbouring constraint failed for neighbour', pt, '=', assignments[pt])\n return False\n\n # loop through points that can attack the current point, as knights\n print(' > checking neighbouring knights')\n for pt in filter(lambda p: p in assignments and assignments[p] == 'knight', attack_points_knight[point]):\n if not check_constraint_satisfied(temp_assignment, pt, assignments[pt]):\n print(' → neighbouring constraint failed for neighbour', pt, '=', assignments[pt])\n return False\n\n # all constraints are satisfied!\n return True", "def check_crossword(self, letter, space, cross):\n if cross == ('', ''):\n return 0\n\n pre, post = cross\n crossword = pre + letter + post\n if self.is_word(crossword):\n score = self.score_factory.make()\n score.add(letter, space)\n rest = pre + post\n for let in rest:\n score.add(let)\n return score.get_points()\n return None", "def q1(puzzle):\n mysudoku = build_csp(puzzle)\n solution = mysudoku.backtracking_search()\n return solution, mysudoku", "def candidates(self, word):\n return (self.known([word]) or self.known(self.edits1(word)) or self.known(self.edits2(word)) or [word])", "def compute_possibles(letters, slots, dictionary_words, context):\n\n words = dictionary_words\n\n # if we have a known number of slots filter\n # our word list down to words w/ that manny letters\n if slots:\n words = ifilter(f.word_len(slots), words)\n\n # filter our word list down to words who's\n # letters are a subset of the given letters\n words = ifilter(f.letter_subset(letters), words)\n\n # we now have our final iterator of possible solutions\n return words", "def choose_word():\n pass", "def get_assignment_map_from_checkpoint(tvars, init_checkpoint, name_to_variable=None):\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable2 = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable2[name] = var\n\n if name_to_variable is not None:\n print(\"DOESNT WORK\")\n print(name_to_variable)\n print(\"DOES WORK\")\n print(name_to_variable2)\n else:\n name_to_variable = name_to_variable2\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n print(name)\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)\n\n\n # def _make_polyak_averaging(embeddings, features, label_logits, mode, polyak, make_label_logits, params):", "def update_word(word_to_guess, word_in_progress, suggested_letter):\n\t\n\tword_guessed = list(word_in_progress)\n\t\n\tfor i in range(len(word_to_guess)):\n\t\tif word_to_guess[i] == suggested_letter:\n\t\t\tword_guessed[i] = suggested_letter\n\t\n\tword_in_progress_2 = ''\n\tword_in_progress_2 = word_in_progress_2.join(word_guessed)\n\tword_in_progress = word_in_progress_2\n\n\treturn word_in_progress", "def return_last_function_assignment(topconstruct):\n items = query([\n is_layering([syntax.ASSIGNMENT]),\n at_indent_0_from_function_program,\n last_program_step\n ], TreeItem(topconstruct))\n for item in items:\n # special case of returning a PY_TUPLE\n if item.construct.args[0].construct == syntax.PY_TUPLE:\n # if the first element of the tuple is a no var (_) replace it by a ret\n retprop = item.construct.args[0].args[0][0]\n if retprop.construct == syntax.PY_NOVAR:\n retprop = syntax.Construct(syntax.VAR_NAME, \"_ret\")\n retprop.resolution = RESOLUTION_NAKED\n item.construct.args[0].args[0][0] = retprop\n item.append_construct(\n syntax.Construct(syntax.FUNCTION_RETURN, retprop))\n\n else:\n assigned_varname = item.construct.args[0].args[0]\n # does not work for paths\n if assigned_varname.construct == syntax.VAR_NAME:\n assigned_string = assigned_varname.args[0]\n prop = syntax.Construct(syntax.VAR_NAME, assigned_string)\n prop.resolution = assigned_varname.resolution\n item.append_construct(\n syntax.Construct(syntax.FUNCTION_RETURN, prop))", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def get_course_by_key_words(input):", "def __getitem__(self, name: str) -> Set[BaseAssignment]:\n ...", "def assign(self, available_workers):\n \n status = self.getStatus()\n\n assert len(available_workers) == 1\n worker = available_workers[0]\n assignment = {}\n\n w_id = str(worker.id)\n task_id = self.task_id\n\n #tracks \n worker_assignments_var = redis_get_worker_assignments_var(task_id, w_id)\n\n print \"WORKER ID:\", w_id\n print \"STATUS:\", status\n print \"ASSIGNMENTS FOR WORKER SO FAR:\", app.redis.smembers(worker_assignments_var)\n\n\n # sort questions by pomdp expected reward...\n # XXX this isn't quite what we want...\n # want to sort by value of getting another label\n # so we don't have all workers getting assigned to the same question\n unfinished_unsorted_qs = [(q,v) for (q,v) in status.iteritems() if v['best_action_str'] == 'create-another-job']\n # NOTE REVERSE ORDER\n sorted_qs = sorted(unfinished_unsorted_qs, key=lambda x:x[1]['best_expected_reward'], reverse=True)\n print \"sorted_qs\", sorted_qs\n# print \"worker %s has done the following questions\" % w_id\n# for (q_id,er) in sorted_qs:\n# if app.redis.sismember(worker_assignments_var, q_id):\n# print \"+\", q_id\n# else:\n# print \"-\", q_id\n\n for idx in range(len(sorted_qs)):\n q_id,expected_reward = sorted_qs[idx]\n\n if not app.redis.sismember(worker_assignments_var, q_id):\n assignment[w_id] = q_id\n print \"assignment=\", assignment\n app.redis.sadd(worker_assignments_var, q_id)\n return assignment\n\n #if here no assignment was made to our worker!\n assert len(assignment) == 0\n print \"no assignment made yet\"\n\n #NOTE POMDP doesn't think there are any questions available to the worker \n #that need another label, but let's give them an assignment anyway\n #Pick question where submitting would have worst expected reward \n # (implying it may benefit from another label)\n finished_qs = [(q,v) for (q,v) in status.iteritems() if v['best_action_str'] != 'create-another-job']\n sorted_finished_qs = sorted(finished_qs, key=lambda x:x[1]['best_expected_reward']) # no reverse\n for idx in range(len(sorted_finished_qs)):\n q_id,expected_reward = sorted_finished_qs[idx]\n\n if not app.redis.sismember(worker_assignments_var, q_id):\n assignment[w_id] = q_id\n print \"gave worker a finished q assignment=\", assignment\n app.redis.sadd(worker_assignments_var, q_id)\n return assignment\n\n return assignment", "def get_assignment_by_name(self, assignment_name, assignments=None):\n if assignments is None:\n assignments = self.get_assignments()\n for assignment in assignments:\n if assignment['name'] == assignment_name:\n return assignment['assignmentId'], assignment\n return None, None", "def propagate(formula, var, val, assignment):\n # make a copy of assignment to work with, and set the given variable to the\n # given value in it.\n assignment = dict(assignment)\n assignment[var] = val\n # update the formula based on this assignment.\n # clauses containing (var, val) are satisfied already (so remove them from the formula).\n # clauses containing (var, not val) must be satisfied by another variable,\n # so remove (var, not val) from them but otherwise leave them intact.\n new_form = [clause - {(var, not val)} for clause in formula if (var, val) not in clause]\n\n # at this point, if any empty clauses exist, they cannot be satisfied. and\n # if no clauses remain, we have already satisfied the formula.\n if not new_form:\n # if the list is empty, we win\n return True, assignment, []\n if not all(new_form):\n # if any clause is empty, we lose\n return False, {}, []\n # otherwise, we're still going\n return None, assignment, new_form", "def q2(puzzle):\n mysudoku = build_csp(puzzle)\n mysudoku.ac3_algorithm()\n solution = mysudoku.backtracking_search()\n return solution, mysudoku", "def test_get_consensus_assignment_overlapping_names(self):\r\n # here the 3rd level is different, but the 4th level is the same\r\n # across the three assignments. this can happen in practice if\r\n # three different genera are assigned, and under each there is\r\n # an unnamed species\r\n # (e.g., f__x;g__A;s__, f__x;g__B;s__, f__x;g__B;s__)\r\n # in this case, the assignment should be f__x.\r\n in1 = [['Ab', 'Bc', 'De', 'Jk'],\r\n ['Ab', 'Bc', 'Fg', 'Jk'],\r\n ['Ab', 'Bc', 'Hi', 'Jk']]\r\n\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp}\r\n expected = (['Ab', 'Bc'], 1., 3)\r\n t = UclustConsensusTaxonAssigner(params)\r\n self.assertEqual(t._get_consensus_assignment(in1),\r\n expected)\r\n\r\n # here the third level is the same in 4/5 of the\r\n # assignments, but one of them (z, y, c) refers to a\r\n # different taxa since the higher levels are different.\r\n # the consensus value should be 3/5, not 4/5, to\r\n # reflect that.\r\n in2 = [['a', 'b', 'c'],\r\n ['a', 'd', 'e'],\r\n ['a', 'b', 'c'],\r\n ['a', 'b', 'c'],\r\n ['z', 'y', 'c']]\r\n expected = (['a', 'b', 'c'], 0.6, 5)\r\n t = UclustConsensusTaxonAssigner(params)\r\n self.assertEqual(t._get_consensus_assignment(in2),\r\n expected)", "def parseAssignments(assignments):\n\treturn dict([(lead, trail) for lead, trail in\n\t\t[litPair.split(\":\") for litPair in assignments.split()]])", "def candidates(self, word):\n return (self.known([word]) or \\\n self.known(self.edits1(word)) or \\\n self.known(self.edits2(word)) or \\\n set([word]))", "def solve(puzzle, words):\r\n with open(words) as inp:\r\n word_docu = json.load(inp)\r\n word_collect = {word.upper() for word in word_docu[\"words\"]}\r\n\r\n with open(puzzle) as puzzle:\r\n letter_str = ''\r\n word_lst = []\r\n letter_verti = ''\r\n\r\n for line in puzzle:\r\n puzzle_str = line.rstrip() + ' '\r\n word_lst.append(puzzle_str)\r\n letter_str = letter_str + puzzle_str\r\n\r\n for i in range(len(word_lst)):\r\n letter_verti = letter_verti + ' '\r\n for word in word_lst:\r\n letter_verti = letter_verti + word[i]\r\n letter_str = letter_str + letter_verti\r\n\r\n words_puzzle = []\r\n\r\n for word in word_collect:\r\n if word in letter_str:\r\n words_puzzle.append(word)\r\n\r\n return words_puzzle", "def transition_possible(self, transition):\n if self.is_multitape:\n word_in = transition.word_in\n else:\n word_in = tupleofwords_to_wordoftuples((transition.word_in,))\n if any(len(t) != len(self.cache) for t in word_in):\n raise TypeError('%s has bad input word (entries should be '\n 'tuples of size %s).' % (transition,\n len(self.cache)))\n return self._transition_possible_test_(word_in)", "def comp_choose_word(hand, word_list):\n maxscore = 0\n maxword = \"\" \n for n in range(calculate_handlen(hand)):\n perms = get_perms(hand, n)\n for word in perms:\n wordscore = get_word_score(word, HAND_SIZE)\n if wordscore > maxscore:\n if word not in word_list:\n continue\n else:\n maxscore = wordscore\n maxword = word\n return maxword\n # TO DO...", "def solution(s):", "def solve(formula):\n\n intab, firstLtrsIndex = getGenArgs(formula)\n \n itr = genOuttab(intab, firstLtrsIndex) \n for outtab in itr:\n trantab = maketrans(intab, outtab)\n attempt = formula.translate(trantab) \n if valid(attempt):\n return attempt\n \n return None", "def generate_solutions(possible_words, labels):\r\n return []", "def backtrack_search(self, values):\n values = self.forward_check(values) #forward checking to reduce again\n if values is False:\n return False # Invalid domain through forward checking\n if all(len(values[s]) == 1 for s in self.boxes):\n return values #All boxes have 1 number => solved\n # SELECT-UNASSIGNED-VARIABLE -> used MRV and highest degree\n box = self.MRV_and_degree(values)\n for value in values[box]: #already ordered from smallest to largest, ex: A1: 1257 -> ORDER-DOMAIN-VALUES\n #print(values[s])\n new_sudoku = values.copy()\n new_sudoku[box] = value\n guess = self.backtrack_search(new_sudoku)\n if guess:\n return guess", "def match(pattern: List[str], source: List[str]) -> List[str]:\n sind = 0 # current index we are looking at in the source list\n pind = 0 # current index we are looking at in the pattern list\n result: List[str] = [] # to store the substitutions that we will return if matched\n acc = ''\n\n # keep checking as long as we haven't hit the end of both pattern and source\n while sind != len(source) or pind != len(pattern): \n # Your job is to fill out the body fo this loop\n # 1) if we reached the end of the pattern but not source \n if pind == len(pattern):\n return None\n # 2) if the current thing in the pattern is a %\n elif pattern[pind] == '%':\n pind += 1 # moving from % to next word \n while sind != len(source):\n if pind != len(pattern) and pattern[pind] == source[sind]:\n break \n else: \n if acc == \"\": \n acc += source[sind] # if it is the first character do not add a space \n else: \n acc += \" \"\n acc += source[sind]\n sind += 1\n result.append(acc)\n acc = ''\n # 3) if we reached the end of the source but not the pattern\n elif sind == len(source):\n return None \n # 4) if the current thing in the pattern is an _\n elif pattern[pind] == '_':\n result.append(source[sind])\n sind += 1\n pind += 1\n #appending is for lists and adding is for strings\n # 5) if the current thing in the pattern is the same as the current thing \n # in the source\n elif pattern[pind] == source[sind]:\n sind += 1\n pind += 1\n # 6) else : this will happen if none of the other conditions are met\n # it indicates the current thing it pattern doesn't match the current\n # thing in source\n else: \n return None\n return result", "def test_uc_to_assignment(self):\r\n expected = {'q1': (['A', 'B', 'C'], 1.0, 2),\r\n 'q2': (['A', 'H', 'I', 'J'], 2. / 3., 3),\r\n 'q3': (['Unassigned'], 1.0, 1),\r\n 'q4': (['Unassigned'], 1.0, 1),\r\n 'q5': (['Unassigned'], 1.0, 1)\r\n }\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp}\r\n t = UclustConsensusTaxonAssigner(params)\r\n actual = t._uc_to_assignment(self.uc1_lines)\r\n self.assertEqual(actual, expected)\r\n\r\n # change label for unassignable\r\n expected = {'q1': (['A', 'B', 'C'], 1.0, 2),\r\n 'q2': (['A', 'H', 'I', 'J'], 2. / 3., 3),\r\n 'q3': (['x'], 1.0, 1),\r\n 'q4': (['x'], 1.0, 1),\r\n 'q5': (['x'], 1.0, 1)\r\n }\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp,\r\n 'unassignable_label': 'x'}\r\n t = UclustConsensusTaxonAssigner(params)\r\n actual = t._uc_to_assignment(self.uc1_lines)\r\n self.assertEqual(actual, expected)", "def get_new_word(key, chains):\n values = chains[key]\n return choice(values)", "def learn_bpe(strings: List[str], pair_percent_threshold: float, char_percent_threshold:float, with_position:bool) -> dict:\n\n vocab = get_vocab(strings)\n percent = 1.0\n # 待改进,这里如果 pair_percent_threshold <=0,则会死循环\n while percent >= pair_percent_threshold:\n pair2freq = pair_freq_stats(vocab, with_position)\n best = max(pair2freq, key=pair2freq.get)\n percent = pair2freq.get(best) / len(strings)\n if percent >= pair_percent_threshold:\n vocab = merge_vocab(best, vocab, with_position)\n\n bpe_tokens = get_tokens(vocab, pair_percent_threshold, len(strings), char_percent_threshold, with_position)\n if with_position:\n bpe_tokens = sorted(bpe_tokens, key=lambda x: len(x[0]), reverse=True)\n else:\n bpe_tokens = sorted(bpe_tokens, key=len, reverse=True)\n bpe_token_dict = {'with_position':with_position,\n 'tokens': bpe_tokens}\n return bpe_token_dict" ]
[ "0.7360267", "0.70080864", "0.6352615", "0.62211263", "0.6217823", "0.6217823", "0.5960085", "0.59551066", "0.5912701", "0.58554476", "0.5837058", "0.5765993", "0.57650095", "0.5723373", "0.55428743", "0.5452668", "0.5425594", "0.5342919", "0.53365654", "0.5329358", "0.53284115", "0.52099735", "0.51919985", "0.5122795", "0.5088097", "0.4996302", "0.49825743", "0.49695617", "0.49614692", "0.49407268", "0.49390018", "0.49145094", "0.48974013", "0.48953184", "0.4881254", "0.48732322", "0.48656207", "0.48531696", "0.48271456", "0.47853175", "0.47841808", "0.47759005", "0.47698113", "0.47677532", "0.47675297", "0.47500178", "0.47418272", "0.4680887", "0.4675553", "0.466345", "0.46325147", "0.46208763", "0.46029255", "0.4598478", "0.45769155", "0.4574215", "0.45696867", "0.45628223", "0.4522005", "0.45177615", "0.4499179", "0.44979838", "0.44913888", "0.4479968", "0.44776124", "0.44743872", "0.44690552", "0.44615862", "0.4457585", "0.44561434", "0.44500694", "0.4447415", "0.44473675", "0.4433424", "0.4425957", "0.44247723", "0.44239268", "0.44208527", "0.44172454", "0.44117555", "0.4410181", "0.440835", "0.4408335", "0.44002563", "0.4398828", "0.4398018", "0.439788", "0.4396125", "0.43874905", "0.43850842", "0.4384522", "0.4383164", "0.4382136", "0.4377604", "0.43745807", "0.43737936", "0.43602753", "0.43563816", "0.4353161", "0.4350464" ]
0.70967716
1
Loads data for a specific game map level.
def load_data(self, map_name, grid_name, tp_name): self.map= TiledMap(path.join(self.map_folder, map_name)) self.map_img = self.map.make_map() self.map_img2 = self.map_img #self.noisy_map_img = noisy("gauss", pg.surfarray.array3d(self.map_img)) self.noisy_map_img = make_noisy(pg.surfarray.array3d(self.map_img)) self.map_rect = self.map_img.get_rect() with open(path.join(self.map_folder, tp_name), 'rt') as f: # destinations is a dict mapping each tilemap teleport coordinate to # the destination tilemap coordinate self.destinations = eval(f.read()) self.grid= OccupancyGrid(self, path.join(self.map_folder, grid_name)) #down here because it needs destinations self.graph = self.grid.make_graph() #sounds self.wall_channel=pg.mixer.Channel(0) self.wall_sound=pg.mixer.Sound(WALL_THUD_SOUND) self.teleport_channel=pg.mixer.Channel(1) self.teleport_sound=pg.mixer.Sound(TELEPORT_SOUND)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_map(self):\n # ask for a new level(str)\n answer = simpledialog.askstring(\"Input\", \"Please input a level\",\n parent=self._master)\n if answer is not None:\n if answer in self._level_dic.keys(): # ensure the level exist\n self.reset_world(answer) # load that level\n # if you load a new level, player's health will remain but score will be clear\n self._player.change_score(-self._player.get_score()) # a personal setting\n else:\n messagebox.showerror(\"Invalid filename\",\n \"'\" + answer + \"' is not a filename!\")", "def load_level(level):\n\n global spawn_boxes\n\n level = pytmx.load_pygame('maps/level_' + level + '.tmx')\n\n y_num = 0\n for x, y, gid in level.get_layer_by_name('Objects'):\n if level.get_tile_image_by_gid(gid) != None:\n matrix[y_num].append(1)\n else:\n matrix[y_num].append(0)\n \n if x == 19: y_num += 1\n\n spawn_boxes = [] # Areas in which enemies can spawn. Requires tiled type 'spawn_box'\n for obj in level.get_layer_by_name('Triggers'):\n if obj.type == 'spawn_box':\n rect = pygame.rect.Rect(obj.x, obj.y, obj.width, obj.height)\n if obj.name == 'north': \n rect = rect.move(0, -64)\n rect.height += 64\n if obj.name == 'east': \n rect = rect.move(64, 0)\n rect.width += 64\n if obj.name == 'south': \n rect = rect.move(0, 64)\n rect.height += 64\n if obj.name == 'west': \n rect = rect.move(-64, 0)\n rect.width += 64\n spawn_boxes.append(rect)\n\n return level", "def load_map(self, filename, player=None):\n\n # Close out any old map we have\n self.close_world()\n\n # Now load the new one\n # TODO: check for exceptions, etc.\n (self.world, self.worlddf) = StarboundData.open_world(filename)\n\n if self.world:\n base_filename = os.path.basename(filename)\n self.loaded_filename = filename\n self.set_title()\n if self.world.info.coords:\n self.data_table.set_world_coords(*self.world.info.coords[:2])\n else:\n self.data_table.clear_world_coords()\n self.data_table.set_world_size(*self.world.info.size)\n # We're duplicating some work from Player.get_worlds() here, but\n # consolidating everything would be tricky, and in the end I\n # figured it wouldn't be worth it.\n match = re.match(r'(.*)-([0-9a-f]{32})-(\\d+).(temp)?world', base_filename)\n if match:\n self.data_table.set_world_name(match.group(1))\n self.data_table.set_world_type('Non-Planet System Object')\n self.data_table.set_world_extra('')\n elif filename.endswith('.shipworld'):\n self.data_table.set_world_name('Starship')\n self.data_table.set_world_type('Your Starship')\n self.data_table.set_world_extra('')\n elif self.world.info.name:\n self.data_table.set_world_name(StarboundData.strip_colors(self.world.info.name))\n self.data_table.set_world_type(self.world.info.description)\n self.data_table.set_world_extra(', '.join(self.world.info.world_biomes))\n else:\n self.data_table.set_world_name(base_filename)\n self.data_table.set_world_type('Unknown')\n self.data_table.set_world_extra('')\n self.scene.load_map(self.world)\n\n # Jump to a Mech Beacon, if we have it\n if self.world.get_entity_uuid_coords('mechbeacon') != None:\n self.add_navigation_item('mechbeacon', 'Go to Mech Beacon')\n\n # Update our player-dependent navigation menu actions\n if player:\n\n # Current Player Location\n if player.cur_world_filename and player.cur_world_filename == base_filename:\n self.navigation_actions.append(\n self.navmenu.addAction(\n 'Go to Player Location ({:d}, {:d})'.format(*map(int, player.cur_world_loc)),\n lambda: self.action_to_coords(*player.cur_world_loc),\n ))\n\n # Player Bookmarks\n if base_filename in player.bookmarks:\n marks = player.bookmarks[base_filename]\n for mark in sorted(marks):\n self.add_navigation_item(mark.uuid, 'Go to Bookmark: {}'.format(mark.name))\n else:\n # TODO: Handle this better, too.\n raise Exception('World not found')\n\n # Update menu state, potentially\n self.enforce_menu_state()", "def load_data_map(self):\n with open(\"map/maps.txt\") as maps:\n for x_axis, line in enumerate(maps):\n self.x_axis = x_axis\n self.full_map.insert(x_axis, [])\n for y_axis, case in enumerate(line.strip()):\n self.y_axis = y_axis\n if case == \"D\":\n self.full_map[x_axis].insert(y_axis, \"M\")\n self.user.position = (x_axis, y_axis)\n elif case == \"A\":\n self.full_map[x_axis].insert(y_axis, \"A\")\n elif case == \"_\":\n self.full_map[x_axis].insert(y_axis, \"_\")\n elif case == \"#\":\n self.full_map[x_axis].insert(y_axis, \"#\")", "def use_level(self, level):\n\n if self.min_level <= level <= self.max_level:\n map_extent = self.tiles.use_level(level)\n if map_extent:\n self.level = level\n (self.map_width, self.map_height,\n self.ppd_x, self.ppd_y) = map_extent\n (self.map_llon, self.map_rlon,\n self.map_blat, self.map_tlat) = self.tiles.extent\n\n # do level change callback\n self.handleLevelChangeCallback(level)\n\n return True\n\n return False", "def __init__(self, level_id):\n self.enemy_sprites = {}\n self.bullet_sprites = {}\n\n level_dir = f\"data/levels/{level_id}\"\n\n with open(\"data/configuration.json\", 'r') as f:\n game_config_data = json.load(f)\n with open(f\"{level_dir}/enemies.json\", 'r') as f:\n self.level_enemy_data = json.load(f)[\"enemies\"]\n with open(f\"data/paths.json\", 'r') as f:\n path_data = json.load(f)[\"paths\"]\n with open(\"data/enemies/enemies.json\", 'r') as f:\n all_enemy_data = json.load(f)\n with open(\"data/bullets/bullets.json\", 'r') as f:\n all_bullet_data = json.load(f)[\"bullets\"]\n\n # build a list of all enemies referenced in this level\n self.referenced_enemies = []\n for enemy_id in [x[\"type\"] for x in self.level_enemy_data]:\n if enemy_id not in self.referenced_enemies:\n self.referenced_enemies.append(enemy_id)\n \n # load in paths\n self.paths = {}\n for path in path_data:\n self.paths[path[\"id\"]] = CachedPath(path[\"maxspeed\"], path[\"points\"], path[\"rotatewith\"])\n \n # load enemy ids based on sprites\n for enemy in self.referenced_enemies:\n self.enemy_sprites[enemy] = CachedEnemy(all_enemy_data[\"enemies\"][enemy])\n\n # load bullet data\n for bullet in all_bullet_data:\n self.bullet_sprites[bullet] = CachedBullet(all_bullet_data[bullet], game_config_data)", "def __init__(self, level):\n self.level = level\n self.my_map = {}\n self.my_level = []\n self.my_grid = []", "def load(self):\n file = os.path.join(\"./data\", self.name + \".map\")\n with open(file) as fp:\n lines = fp.readlines()\n self.row, self.col = map(int, lines[0].split())\n self.default = int(lines[1]) # デフォルト値\n for line in lines[2:]: # マップデータを読み込む\n line = line.rstrip() # 改行除去\n self.map.append([int(x) for x in list(line)])", "def get_info(self, level):\n\n # see if we can open the tile info file.\n info_file = os.path.join(self.tile_dir, '%02d' % level,\n self.TileInfoFilename)\n try:\n fd = open(info_file, 'rb')\n except IOError:\n return None\n\n # OK, looks like we actually do have this level!\n info = pickle.load(fd)\n fd.close()\n\n return info", "def loadTiles():\n with open('resources/map.txt', 'r') as f:\n rows = f.readlines()\n global numCols\n numCols = len(rows[0].split('\\t')) # Assumes all rows contain the same number of tabs\n global numRows\n numRows = len(rows)\n for y in range(numRows):\n cols = rows[y].split('\\t')\n for x in range(numCols):\n tileName = cols[x].replace('\\n', '')\n if tileName == \"StartingRoom\":\n global currentPosition\n currentPosition = [x, y]\n _world[(x, y)] = None if tileName == '' else getattr(__import__('tiles'), tileName) (x, y)", "def load_map(self, new_map):\n new_map.on_load()\n\n self._entities = new_map._entities\n self._entities.append(self._player)\n self._player.x = new_map.player_spawn[0]\n self._player.y = new_map.player_spawn[1]\n\n self._mapfeatures = new_map._mapfeatures\n self.width = len(self._mapfeatures[0])\n self.height = len(self._mapfeatures)", "async def _fetch_level_info(self) -> Level:\n \n memory = await self._read_memory()\n\n level_name = memory.get_level_name()\n level_creator = memory.get_level_creator()\n level_id = memory.get_level_id()\n\n level = Level(id=level_id, name=level_name,\n creator = level_creator)\n\n level.practice_mode = memory.is_practice_mode()\n\n level.attempts = memory.get_attempts()\n level.jumps = memory.get_jumps()\n level.difficulty = memory.get_level_difficulty()\n\n level.percent = math.floor(memory.get_percent())\n level.best_percent = math.floor(memory.get_normal_percent())\n\n level.practice_best = math.floor(memory.get_practice_percent())\n\n level.rating = memory.get_level_stars()\n level.featured = memory.is_level_featured()\n level.epic = memory.is_level_epic()\n\n return level", "def __init__(self, game, world_file):\n self.game = game\n self.world_file = world_file\n self.floor_batch = game.floor_batch\n self.wall_batch = game.wall_batch\n self.lightmap = LightMap()\n self.tiles = {}\n self.load_world()\n self.load_tileset()\n self.player_light = self.lightmap.add_light(0,0,15)", "def load_opacitymaps():\n\treturn load_builtin_data('opacitymaps')", "def load_minimap(self):\n minimap_types = ['cover', 'fog']\n self.game_data[\"minimap\"] = {\"fog\": None, \"cover\": None}\n file_name = self.game_data[\"file_name\"].split(\".json\")[0]\n for minimap_type in minimap_types:\n file_name = f\"{file_name}-{minimap_type}.png\"\n self.game_data[\"minimap\"][minimap_type] = pg.image.load(\n path.join(self.saved_minimap, file_name)).convert_alpha()\n logger.info(\"Load the minimap %s\", file_name)", "def load_data(self):\n @Logger.runtime\n def process_coords():\n \"\"\"\n The placement of locations on our minimap is crucial. Panda3D objects however have a coordinate range from\n -1 to 1 on all axis, meaning that if we read a coordinate of a location from some image processing software\n by hand, we have to transform those coordinates into coordinates Panda would understand. This function does\n just that.\n :return: Normalized coordinates of location coordinates.\n \"\"\"\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed\n\n @Logger.runtime\n def process_texture():\n texture_path = Path(\"resource/textures/{}\".format(row[\"texture\"]))\n texture = self.loader.loadTexture(texture_path)\n return texture\n\n # the cylinder is loaded here but it does not yet show up, until it's specifically asked to\n self.scene_3d_model = self.loader.loadModel(self.PATHS[\"3D_SCENE_MODEL\"])\n\n try:\n with open(self.PATHS[\"LOCATIONS_DB\"], \"r\") as l_file:\n data = csv.DictReader(l_file, delimiter=\"|\")\n for row in data:\n id = int(row[\"id\"])\n x, y = process_coords()\n neighbors = [int(neighbor_id) for neighbor_id in row[\"neighbors\"].split(',')]\n texture = process_texture()\n location = Location(id, x, y, neighbors, texture)\n location.reparentTo(self.render2d)\n self.locations.append(location)\n Logger.log_info('The locations_db has been loaded')\n except:\n Logger.error('{} file not found!'.format(self.PATHS[\"LOCATIONS_DB\"]))\n\n self.active_location = self.locations[0]", "def load_map(self, filename):\n with open(filename, 'rb') as file:\n self.current_obstacles = pickle.load(file)\n self.current_goal = pickle.load(file)\n try:\n setstate(pickle.load(file))\n except EOFError:\n print(\"No random state stored\")", "def load(level_file):\n level_text = open(level_file).read()\n level_name = os.path.basename(os.path.splitext(level_file)[0])\n cells_by_char = defaultdict(list)\n level_text = level_text.strip()\n\n max_z = level_text.count('\\n\\n') + 1\n max_w = level_text.split('\\n')[0].count(' ') + 1\n max_x = int((len(level_text.split('\\n')[0]) + 1) / max_w) - 1\n max_y = int((level_text.count('\\n') + 1) / max_z)\n size = (max_x, max_y, max_z, max_w)\n\n # Mimics the World.draw method, but splitting instead of joining.\n for z, z_contents in enumerate(reversed(level_text.split('\\n\\n'))):\n for y, y_contents in enumerate(reversed(z_contents.split('\\n'))):\n for w, w_contents in enumerate(reversed(y_contents.split(' '))):\n for x, x_contents in enumerate(w_contents):\n cell = (x, y, z, w)\n assert (i < j for i, j in zip(cell, size))\n if x_contents != '.':\n cells_by_char[x_contents].append(cell)\n\n target = cells_by_char['X'][0]\n del cells_by_char['X']\n\n objects_by_char = {char: Object(cells, char, char != 'o', None)\n for char, cells in cells_by_char.items()}\n\n player = objects_by_char['@']\n item = objects_by_char['#']\n objects = [o for char, o in objects_by_char.items()]\n return Level(player, item, target, World(objects, size), level_name)", "def level_data(self):\n self.level(self.data)", "def loadFile(self, path):\n print(\"loading \\'{}\\',\".format(path.split('/')[-1]), end = \" \")\n with open(path, \"r\") as file_content:\n list_of_lines = file_content.readlines() # get all lines of level representation\n\n # remove '\\n' if a line has '\\n' at the end\n for i in range(len(list_of_lines)):\n if (list_of_lines[i][-1] == \"\\n\"):\n list_of_lines[i] = list_of_lines[i][:-1] # remove '\\n'\n\n # calculate dimensions of the level tensor\n width = len(list_of_lines[0])\n height = len(list_of_lines)\n depth = len(self.tile_reprs)\n level_tensor = np.zeros((height, width, depth)) # this tensor represent level\n\n # traverse the entire level space to populate the tensor\n for row in range(height):\n for col in range(width):\n char = list_of_lines[row][col]\n # if this character does not exist in json file, it is all zero\n try:\n level_tensor[row, col, self.tile_reprs.index(char)] = 1\n except:\n pass\n\n\n self.loaded_files.append(path.split('/')[-1].split('.')[0])\n self.loaded_data.append(level_tensor) # store this loaded level\n print(\"success.\")", "def load_file(self, mapfile):\n tmx = pytmx.TiledMap(mapfile)\n self.width = tmx.width\n self.height = tmx.height\n self.load_images(tmx)\n self.load_floor(tmx)\n self.load_objects(tmx)\n self.load_pois(tmx)", "def _load_map(self):\n map = choice(self.environment_template)\n environment = Environment(map.name, map.desc, map.habitable, self.level)\n\n # Display map description\n description = environment.description.format(noise=environment.monster_ctrl.monsters[0].noise)\n description = \"\\n\".join(wrap(description, width=80, fix_sentence_endings=True, initial_indent=\" \",\n subsequent_indent=\" \", break_long_words=False))\n print(\"\\n\", description, \"\\n\")\n input(\"Press any key to continue...\")\n\n initiative_monster = \"Monster has\" if environment.monster_ctrl.monster_count == 1 else \"Monsters have\"\n first_attacker = \"Hero has\" if environment.initiative.value == 0 else initiative_monster\n\n while environment.monster_ctrl.monster_count > 0:\n display_battle(self.hero, environment, first_attacker)\n decision = get_user_input([1, 2, 3])\n if decision == 1:\n self._duels(environment)\n\n elif decision == 2:\n self._show_bag()\n\n else:\n if random() < self.hero.health * .1:\n print(\"[+] Successfully ran away!\")\n input(\"Press any key to continue...\")\n return\n else:\n print(\"[!] Bummer, you failed to run away. You loss two dice rolls on your next attack.\")\n input(\"Press any key to continue...\")\n self.hero.dice_count -= 2\n self._duels(environment)\n\n self.level += 1\n display_no_combat_start(self.hero, environment)\n\n decision = 0\n # Keep iterating until user decides to move on\n while decision != 1:\n if environment.has_loot:\n decision = get_user_input([1, 2, 3, -1])\n else:\n decision = get_user_input([1, 2, -1])\n\n if decision == -1:\n self._quit()\n elif decision == 2:\n self._show_bag()\n display_no_combat_start(self.hero, environment)\n elif decision == 3:\n print(\"[+] Looted\")\n for loot in environment.loot_room():\n self.hero.set_loot(loot)\n display_no_combat_start(self.hero, environment)\n else:\n return", "def load_map(file_name, load_graphics=True):\n map = json.loads(open(file_name, \"r\").read())\n\n if not os.path.isabs(map['color_bitmap_file']):\n map['color_bitmap_file'] = os.path.join(os.path.dirname(file_name), map['color_bitmap_file'])\n\n try:\n if load_graphics:\n map['color_bitmap'] = imread(map['color_bitmap_file'])\n except IOError:\n print \"Warning: Not found color file\"\n\n if not os.path.isabs(map['vector_graphics_file']):\n map['vector_graphics_file'] = os.path.join(os.path.dirname(file_name), map['vector_graphics_file'])\n\n if \"title\" not in map:\n map[\"title\"] = \"\"\n\n if \"board\" not in map:\n # Map is assumed to be always a simple box with 1-cell width wall around\n # and single start field.\n grid = [[1]*(map[\"M\"])]\n grid += [[1] + [0]*(map[\"M\"] - 2) + [1]]\n grid += [[1, 0] + [0]*(map[\"M\"] - 4) + [0, 1] for _ in xrange(map[\"N\"] - 4)]\n grid += [[1] + [0]*(map[\"M\"] - 2) + [1]]\n grid += [[1]*(map[\"M\"])]\n grid[map['start_field'][0]][map['start_field'][1]] = MAP_START_POSITION\n map['file_name'] = file_name\n map['board'] = grid\n else:\n raise RuntimeError(\"Not supported custom board parsing\")\n\n return map", "def load_frame(self):\n world_map = self.data[self.time_point][\"tiles\"]\n self.tiles = []\n for x in range(self.width):\n for y in range(self.height):\n index = x + self.width * y\n tile = world_map[index]\n xpos = x * tile_size\n ypos = y * tile_size\n if tile[\"type\"] == \"Wall\":\n sprite = pyglet.sprite.Sprite(images[\"Wall\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"SnakeHead\":\n sprite = pyglet.sprite.Sprite(images[\"SnakeHead\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"SnakeBody\":\n sprite = pyglet.sprite.Sprite(images[\"SnakeBody\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"Doodah\":\n sprite = pyglet.sprite.Sprite(images[\"Doodah\"], x=xpos, y=ypos)\n elif tile[\"type\"] == \"Blank\":\n sprite = pyglet.sprite.Sprite(images[\"Blank\"], x=xpos, y=ypos)\n self.tiles.append(sprite)", "def pickMap(self, selectedmap):\r\n for lvl in self.maplevels:\r\n lvl.style.border_color = (255,255,255)\r\n self.selectedmap = selectedmap;\r\n self.maplevels[selectedmap].style.border_color = (0,0,0)\r\n self.repaint()\r\n self.slevel.mapimage.value = pygame.image.load(\"../media/map\" + str(self.selectedmap) +\".png\")\r\n self.slevel.repaint()", "def get_map_data(data_dir, adm_level):\n # Determine filename\n file_prefix = \"adm1\" if adm_level == \"adm0\" else \"adm2\"\n\n # Read file\n filename = os.path.join(data_dir, file_prefix + \"_quantiles.csv\")\n df = pd.read_csv(filename)\n\n # Keep median\n df = df.loc[df[\"quantile\"] == 0.5]\n\n return df", "def load_map(path):\n file = open(path + '.txt', 'r')\n data = file.read().split('\\n')\n game_map = []\n file.close()\n for row in data:\n game_map.append(list(row))\n return game_map", "def loadMapping(self, mapfile=\"./mapping.json\"):\n\t\ttry:\n\t\t\tfd = open(mapfile, \"r\")\n\t\t\tmappings = json.load(fd)\n\t\t\tif \"Sharing\" in mappings.keys():\n\t\t\t\tself.share_levels = mappings[\"Sharing\"]\n\t\t\tif \"Type\" in mappings.keys():\n\t\t\t\tself.type_map = mappings[\"Type\"]\n\t\t\tif \"Extra-Tag\" in mappings.keys():\n\t\t\t\tself.extra_tag = mappings[\"Extra-Tag\"]\n\t\t\tif \"Privacy\" in mappings.keys():\n\t\t\t\tself.privacy_levels = mappings[\"Privacy\"]\n\t\t\tfd.close()\n\t\texcept Exception as e:\n\t\t\tprint(\"IMPOSSIBLE TO LOAD MAPPINGS from %s\" % mapfile)\n\t\t\tprint(e)\n\t\t\tsys.exit(0)\n\t\treturn", "def mapdata():\n return getmapdata(db, MyTable)", "def load_game(self):\n self.game = db.get_game(self.game_id)", "def loading():\n PTS, COIN, LIVES = 0, 1, 2\n # Loading up and declaring all level elements\n global levelNum, marioPos\n globalSound(\"stop\") # Stopping all music\n # Preparing game variables for next level\n levelNum += 1\n oldState = marioPos[5]\n if oldState == -1:\n oldState = 0\n marioPos = [40, 496, 0, 0, \"Right\", oldState]\n marioStats = [True, 0, False, False, False, False, False, 0]\n backPos = 0\n marioFrame = [0,0, 0]\n # Loading in all level objects\n brickList = loadFile(str(\"data/level_\" + str(levelNum) + \"/bricks.txt\"))\n interactBricks = loadFile(str(\"data/level_\" + str(levelNum) + \"/interactBricks.txt\")) # 1-4: Rect, VY, State, Coins\n questionBricks = loadFile(str(\"data/level_\" + str(levelNum) + \"/questionBricks.txt\")) # 1-4: Rect, VY, State, Type\n coins = loadFile(str(\"data/level_\" + str(levelNum) + \"/coins.txt\"))\n goombas = loadFile(str(\"data/level_\" + str(levelNum) + \"/goombas.txt\"))\n spinys = loadFile(str(\"data/level_\" + str(levelNum) + \"/spinys.txt\"))\n gunRects = loadFile(str(\"data/level_\" + str(levelNum) + \"/guns.txt\"))\n flagInfo = loadFile(str(\"data/level_\" + str(levelNum) + \"/flag.txt\"))\n # Loading screen variables and rendered text\n uniSprite = 0\n currentWorld = marioFontBig.render(\"World 1-%s\" %levelNum, False, (255,255,255))\n lives = marioFontBig.render(\"X %s\" %marioScore[LIVES], False, (255,255,255))\n startTime = time.get_ticks()\n # Menu screen that should only stay for 2.5 seconds\n while time.get_ticks() - startTime < 2500:\n for evnt in event.get(): \n if evnt.type == QUIT:\n return [\"exit\", None, None, None, None, None, None, None, None, None, None, None, None]\n # Drawing loading screen\n screen.fill(BLACK) # Clearing screen\n uniSprite = spriteCounter(uniSprite) # Progressing the sprites\n drawStats(None, None, marioScore[PTS], marioScore[COIN], time.get_ticks(), levelNum, True, True, statCoin, uniSprite)\n screen.blit(currentWorld, (300, 250)) # Blitting current world text\n screen.blit(lives, (390, 315)) # Blitting number of lives text\n screen.blit(marioSprites[0][0], (315, 300)) # Blitting mario\n display.flip()\n fpsCounter.tick(60)\n # Returning all variables for game function to handle\n return [\"game\", brickList, interactBricks, questionBricks, coins, goombas, flagInfo, marioPos, backPos, marioStats, marioFrame, gunRects, spinys]", "def get_map(options, data):\r\n try:\r\n map_f = open(options.map_fname, 'U').readlines()\r\n except (TypeError, IOError):\r\n raise MissingFileError('Mapping file required for this analysis')\r\n data['map'] = parse_mapping_file(map_f)\r\n return data['map']", "def load_map(self, filename):\n self.renderer = TiledRenderer(filename)", "def get_mapdata():\n return render_template(\"l_heatmap.html\")", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if \"data\" in load_dict:\n self._data = load_dict[\"data\"][\"data\"][0]\n self._default = self._data\n else:\n self._logger.warning(\n \"Your parameter `%s` is empty, \"\n \"I did not find any data on disk.\" % self.v_full_name\n )\n\n if \"explored_data\" in load_dict:\n self._explored_range = [\n x for x in load_dict[\"explored_data\"][\"data\"].tolist()\n ]\n self._explored = True\n\n self._locked = True", "def load():\n\n # To run this command type: 'python manage.py shell'\n # 'from map.views import load; load()'\n\n mapping = {\"productivi\": \"productivi\", \"mpoly\": \"MULTIPOLYGON\"}\n map_path = os.path.abspath('gis_django/fields_test/test_fields.shp')\n lm = LayerMapping(Map, map_path, mapping, transform=False, encoding=\"iso-8859-1\")\n lm.save(verbose=True)", "def read_level(self):\n current_level = 1\n\n try:\n if self.store.exists(LEVEL_STORE):\n current_level_str = self.store.get(LEVEL_STORE)['level']\n current_level = int(current_level_str)\n except:\n print 'Exception when reading Galaxy run level from JSON file!'\n current_level = 1\n\n return current_level", "def setGeolevel(self):\n #geocodeDict = {16:\"Block\",12:\"Block_Group\",11:\"Tract\",5:\"County\",2:\"State\",1:\"National\"}\n geocodeLen = len(self.geocode)\n try:\n self.geolevel = self.geocodeDict[geocodeLen]\n except KeyError:\n error_msg = \"No GeoLevel name for geocode of length {} (geocode:{}) in geocode dictionary \\\"{}\\\"\"\\\n .format(geocodeLen, self.geocode, self.geocodeDict)\n logging.error(error_msg)\n raise KeyError(error_msg)", "def load_data_file(self):\n with open(self.files['data'], 'r') as infile:\n data = json.load(infile)\n self.boundary_nodes = data['boundary_nodes']\n self.nodes = {int(k): v for k, v in data['nodes'].items()}\n self.levels = data['levels']\n infile.close()", "def loadSuits(level):\n loadSuitModelsAndAnims(level, flag = 1)\n loadDialog(level)", "def load_image(self, name, colorkey=None):\n dictname = name[0:name.find('.')]\n fullname = os.path.join('TeddyLevel','data', name)\n try:\n image = pygame.image.load(fullname)\n except pygame.error, message:\n print 'Cannot load image:', fullname\n raise SystemExit, message\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n self.dict[dictname] = image, image.get_rect()", "def load_data(self):\n if os.path.isfile(_POSPKL):\n with open(_POSPKL, 'rb') as fpkl:\n dat = pickle.load(fpkl)\n for name, obj in self.toplevel_wins.iteritems():\n if dat.get(name, None):\n obj.set_position(*dat[name])", "def load_map(self, mapFileName):\n self.new_map()\n self.map = Map.load_map(mapFileName)\n return self.map", "def get_data_path(level, env='stage'):\n levelstring = dict(l0='level0', l1a='level1a', l1b='level1b',\n hk='housekeeping/level1a')\n path = env_path(env) / levelstring[level]\n return path", "def load(cls):\n playerdata = Data.raw_load(\"savedata.dat\")\n for key in playerdata:\n cls.name = playerdata[\"name\"]\n cls.max_hp = playerdata[\"max_hp\"]\n cls.hp = playerdata[\"hp\"]\n cls.lv = playerdata[\"lv\"]\n cls.exp = playerdata[\"exp\"]\n cls.atk = playerdata[\"atk\"]\n cls._def = playerdata[\"_def\"]\n cls.inventory = playerdata[\"inventory\"]\n cls.pin = playerdata[\"pin\"]", "def load(self, key):\n if key in self.SMGData.keys():\n return self.SMGData[key]\n else:\n raise Exception('Key does not exist in the data structure')", "def loadingLevelForDisplay(self):\n\n #We load all the elements and the table of the level selected self._set_grille_csv()\n self._set_grille_csv()\n self.whichElementIsInTheLevel()\n self.fillTableWithElements()", "def __load(self, node):\n\n self.tiles = node['data']\n self.name = node['name']\n self.opacity = node['opacity']\n self.visible = node['visible']", "def load_map():\n\tmap_repr = pickle.load(open(\"map_repr.pickle\", \"rb\"))\n\treturn ok(map_repr)", "def LoadSprites(self):\n \"\"\"calculate the center point offset\"\"\"\n x_offset = (BLOCK_SIZE/2)\n y_offset = (BLOCK_SIZE/2)\n\n\n \"\"\"Load the level\"\"\"\n level1 = level001.level()\n layout = level1.getLayout()\n img_list = level1.getSprites()\n\n\n\t\t# > The pellet sprites are grouped\n\t\t# > The block sprites are grouped\n\t\t# > The Wall sprites are grouped\n\t\t# > This is an example of a style of Object Oriented Programming, assigning\n\t\t# > The groups of items as a bluprint of one object to save typing the\n\t\t# > same code over and over.\n self.pellet_sprites = pygame.sprite.Group()\n self.block_sprites = pygame.sprite.Group()\n self.gwall_sprites = pygame.sprite.Group()\n\n for y in xrange(len(layout)):\n for x in xrange(len(layout[y])):\n \"\"\"Get the center point for the rects\"\"\"\n centerPoint = [(x*BLOCK_SIZE)+x_offset,(y*BLOCK_SIZE+y_offset)]\n if layout[y][x]==level1.BLOCK:\n self.block_sprites.add(basicSprite.Sprite(centerPoint, img_list[level1.BLOCK]))\n elif layout[y][x]==level1.GWALL:\n self.gwall_sprites.add(basicSprite.Sprite(centerPoint, img_list[level1.GWALL]))\n elif layout[y][x]==level1.SNAKE:\n self.snake = Snake(centerPoint,img_list[level1.SNAKE])\n elif layout[y][x]==level1.PELLET:\n self.pellet_sprites.add(basicSprite.Sprite(centerPoint, img_list[level1.PELLET]))\n elif layout[y][x]==level1.GHOST:\n self.ghost = Ghost(centerPoint,img_list[level1.GHOST])\n elif layout[y][x]==level1.GHOST2:\n self.ghost2 = Ghost(centerPoint,img_list[level1.GHOST2])\n elif layout[y][x]==level1.GHOST3:\n self.ghost3 = Ghost(centerPoint,img_list[level1.GHOST3])\n elif layout[y][x]==level1.GHOST4:\n self.ghost4 = Ghost(centerPoint,img_list[level1.GHOST4])\n \"\"\"Create the Snake group\"\"\"\n self.snake_sprites = pygame.sprite.RenderPlain((self.snake))\n\tself.ghost_sprites = pygame.sprite.RenderPlain((self.ghost))\n\tself.ghost2_sprites = pygame.sprite.RenderPlain((self.ghost2))\n\tself.ghost3_sprites = pygame.sprite.RenderPlain((self.ghost3))\n\tself.ghost4_sprites = pygame.sprite.RenderPlain((self.ghost4))", "def create_level(self, name):\n \n # Create a level object\n level = Level()\n size_y=8\n size_x=10\n # Separates static and non static parts\n # This will speed up network games, since only the non static part will be\n # sent on the network\n level_static = soya.World(level)\n \n # Load 3 materials (= textures) for files ./materials{grass|ground|snow}.data\n \n ground = soya.Material.get(\"block2\")\n \n \n # Creates a landscape, from the heighmap \"./images/map.png\"\n # The landscape is in the static part (=level_static), because it won't change along the game.\n land = soya.Land(level_static)\n land.y =0.0\n land.from_image(soya.Image.get(\"floor.png\"))\n \n # Sets how high is the landscape\n land.multiply_height(-0.0)\n \n # These values are trade of between quality and speed\n land.map_size = 8\n land.scale_factor = 1.5\n land.texture_factor = 1.0\n \n # Set the texture on the landscape, according to the height\n # (i.e. height 0.0 to 15.0 are textured with grass, ...)\n \n land.set_material_layer(ground, 0.0, 25.0)\n \n # squares where the player starts\n # Note that this is stored in physical, not abstract, coordinates.\n always_clear=[(-1,-1),(-2,-1),(0,-1),(-1,-2),(-1,0)]\n cube = soya.Shape.get(\"cube\")\n \n # r and c represent the cube positions in the grid,\n # while x and y represent the physical coordinates in the world.\n # Note the simple formula: r = x + self.size_x , c = y + self.size_y\n border_row, border_col = 2*size_x - 2, 2*size_y - 2\n for r, x in enumerate(range(-size_x,size_x-1)):\n for c, y in enumerate(range(-size_y,size_y-1)):\n bx = x +128\n by = y +128 \n if (r % 2 == 0 and c % 2 == 0) or \\\n (r == 0 or c == 0 or r == border_row or c == border_col ):\n # This is a wall block\n block = soya.Volume(level_static, cube)\n block.scale(1.0, 1.0, 1.0)\n block.set_xyz(bx, 0.5, by) \n elif random() < 0.8 and not (x, y) in always_clear:\n # A soft block\n block = SoftBox()\n level.add_mobile(block)\n block.scale(1.0, 1.0,1.0)\n block.set_xyz(bx, 0.5, by)\n \n # Creates a light in the level, similar to a sun (=a directional light)\n sun = soya.Light(level_static)\n sun.directional = 1\n sun.diffuse = (1.0, 0.8, 0.4, 1.0)\n sun.rotate_vertical(-45.0)\n \n # Creates a sky atmosphere, with fog\n atmosphere = soya.SkyAtmosphere()\n atmosphere.ambient = (0.3, 0.3, 0.4, 1.0)\n atmosphere.fog = 1\n atmosphere.fog_type = 0\n atmosphere.fog_start = 40.0\n atmosphere.fog_end = 50.0\n atmosphere.fog_color = atmosphere.bg_color = (0.2, 0.5, 0.7, 1.0)\n atmosphere.skyplane = 1\n atmosphere.sky_color = (1.5, 1.0, 0.8, 1.0)\n \n # Set the atmosphere to the level\n level.atmosphere = atmosphere\n \n # Save the level as \"./worlds/level_demo.data\" (remember, levels are subclasses of worlds)\n level_static.filename = level.name = name+\"_bbomber_static\"\n level_static.save()\n level.filename = level.name = name+\"_bbomber\"\n level.save()", "def open(self, path):\n\n # abre el tilemap en formato JSON\n data = JSON.open(path)\n\n # número de tiles en 'x' y 'y'\n self.width = data['width']\n self.height = data['height']\n\n # ancho y alto de los tiles\n self.tilewidth = data['tilewidth']\n self.tileheight = data['tileheight']\n\n # calcula las dimensiones del tilemap en pixeles\n self.rect.w = self.width * self.tilewidth\n self.rect.h = self.height * self.tileheight\n\n # extrae los tilesets\n tilesets = self.tilesets\n for tileset_node in data['tilesets']:\n tileset = TiledTileset(tileset_node, path)\n tilesets.append(tileset)\n self.split_tileset(tileset)\n\n # extrae las capas (layers)\n layers = self.layers\n for layer_node in data['layers']:\n layer = TiledLayer(layer_node)\n layers.append(layer)\n self.arrange_tiles(layer)", "def load_data(self) -> None:", "def loadObjectMaps(self):\n path = os.path.join(self.dir,settings['mosh.modInfos.objectMaps'])\n if os.path.exists(path):\n self.objectMaps = compat.uncpickle(open(path,'rb'))\n else:\n self.objectMaps = {}", "def load_maps(self):\r\n fname = self.path + '\\\\phase_maps.p'\r\n if os.path.isfile(fname):\r\n return pickle.load(open(fname, 'rb'))\r\n else:\r\n return {}", "def generate_level(level):\n seed = level * 69420 # multiply by 69420 to not have the seeds too close to each other\n random.seed(seed)\n dimensions = get_map_size(level)\n level_map = np.full(dimensions, -1)\n while -1 in level_map:\n choice = random.choice(np.argwhere(level_map == -1))\n next_index = (choice[0], choice[1])\n # get indices of the tiles next to the current index\n left_index, up_index, right_index, down_index = get_direction_indices(next_index)\n left = tile_needs_connection(left_index, level_map, has_connection_right)\n up = tile_needs_connection(up_index, level_map, has_connection_down)\n right = tile_needs_connection(right_index, level_map, has_connection_left)\n down = tile_needs_connection(down_index, level_map, has_connection_up)\n level_map[next_index] = get_tile(left, up, right, down)\n return un_solve(level_map)", "def __init__(self, engine, level): \n super().__init__(engine)\n self.tiles = []\n \n \"\"\"R is for the backgrounds (water, grass, etc.)\n G is for obstacles on the tile or roads\n B is for status effects\n \"\"\"\n level_img = cv2.imread(\"levels/\" + level + \".png\")\n for i in range(level_img.shape[0]):\n for j in range(level_img.shape[1]):\n if level_img[i,j][1]:\n pass\n else:\n row.append(Tile.getTyleType(j))", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def get_levels(self):\n return self.levels[self.game]", "def load_game(filename):\n global width, height, dungeon_map # DO NOT REMOVE\n\n try:\n with open(filename, 'r') as file_handler:\n file_text = None # TODO: Replace None so that file_text contains all the text in the file.\n except FileNotFoundError as error:\n return False\n\n lines = file_text.split('\\n')\n line = None # TODO: Replace None so that your code removes the first string in the list lines and stores it in the variable line.\n temp_width, temp_height, player_x, player_y, player_symbol, werewolf_x, werewolf_y, werewolf_health, werewolf_stun_count = None\n # TODO: Replace None on the preceding line of code. Extract values for all of these variables from the variable line.\n # TODO: All of the variables on the line above EXCEPT player_symbol need to be integers, not strings.\n\n temp_dungeon_map = [] # Store the dungeon map from the file in a temporary variable before we decide to throw out our old map.\n while len(temp_dungeon_map) < temp_height and len(lines) > 0:\n row = None # TODO: Replace None so that your code removes the first string in the list lines and stores it in the variable row.\n if row != '':\n temp_dungeon_map.append(list(row)) # What does list do here?\n squares = 0\n for row in temp_dungeon_map: # Count up the number of squares in the file's map.\n squares += len(row)\n if squares != temp_width * temp_height:\n # Validation: If the number of squares in the dungeon map does not match the width and height values in the file, that is a problem.\n return False\n\n # The data from the file is valid so now we can update the real width, height, and dungeon_map variables.\n width = temp_width\n height = temp_height\n dungeon_map = temp_dungeon_map\n\n player_inventory = {}\n for line in lines:\n # TODO: Extract from the remaining lines of text in the file data for the player's inventory.\n # Each line of the inventory data has a single character for the item, followed by a space, followed by the count for that item.\n # Store the data in the player_inventory dictionary; each key in the dictionary is an item, and its matching value is the numeric count.\n pass\n\n # TODO: Return a tuple of values (that you got in his function) so that the order of the values matches the order in the tuple returned by load_default_game().", "def load_data(self):", "def _populate_level_with_enemies(self,\n map_layer_configuration,\n base_enemy_chance_cave: float = 0.006,\n base_enemy_chance_dungeon: float = 0.006,\n base_boss_chance: float = 0.003) -> None:\n enemy_chance_cave = self.generate_enemy_chance(base_enemy_chance_cave)\n enemy_chance_dungeon = self.generate_enemy_chance(base_enemy_chance_dungeon)\n boss_chance = self.generate_enemy_chance(base_boss_chance)\n for row in map_layer_configuration:\n for block in row:\n if block[0] == ' ':\n if np.random.rand() > (1 - enemy_chance_cave):\n if self.sprites.drill.center_x != block[1] or self.sprites.drill.center_y != block[2]:\n enemy_to_add = random.choice(potential_enemies)\n enemy_to_append = enemy_to_add(block[1], block[2], vision=200)\n self.sprites.entity_list.append(enemy_to_append)\n self.sprites.enemy_list.append(enemy_to_append)\n elif block[0] == 'F':\n if np.random.rand() > (1 - enemy_chance_dungeon):\n if self.sprites.drill.center_x != block[1] or self.sprites.drill.center_y != block[2]:\n enemy_to_add = random.choice(potential_enemies)\n enemy_to_append = enemy_to_add(block[1], block[2], vision=200)\n self.sprites.entity_list.append(enemy_to_append)\n self.sprites.enemy_list.append(enemy_to_append)\n elif np.random.rand() > (1 - boss_chance):\n if self.sprites.drill.center_x != block[1] or self.sprites.drill.center_y != block[2]:\n enemy_to_add = random.choice(potential_bosses)\n enemy_to_append = enemy_to_add(block[1], block[2], vision=200, speed=0.7)\n self.sprites.entity_list.append(enemy_to_append)\n self.sprites.enemy_list.append(enemy_to_append)\n self.sprites.drill_list.append(self.sprites.drill)\n\n for entity in self.sprites.entity_list:\n entity.setup_collision_engine([self.sprites.indestructible_blocks_list])", "def LoadMapping(self, fname):\n\n M = [{} for i in range(N_ChanUIDS)]\n\n # Load Map:\n with open(fname, \"r\") as f:\n pass", "def setupLevel(self):\n\n self.state = GameState.SETUP\n\n # vado a leggere il dizionario corrispondente\n # al numero di livello corrente facendo in modo\n # che se il numero di livello richiesto non esiste\n # carico quello più vicino a quello richiesto\n if self.levelIndex>= len(levels):\n self.levelIndex = len(levels) -1\n elif self.levelIndex <0:\n self.levelIndex = 0\n\n level = levels[self.levelIndex]\n\n # nome del livello\n self.level_name = level.get(\"name\", \"Livello %s\" % (self.levelIndex+1))\n\n # dimensione del labirinto (numero di righe e di colonne)\n self.nrows = level.get(\"nrows\", 20)\n self.ncols = level.get(\"ncols\", 20)\n\n # l'algoritmo di generazione del labirinto supporta solo un numero di\n # righe e di colonne dispari, quindi approssimiamo le dimensioni ai\n # valori dispari più vicini\n if self.nrows % 2 == 0:\n self.nrows+=1\n if self.ncols % 2 == 0:\n self.ncols+=1\n\n\n # fattore di scala del labirinto\n # attenzione che, fattori di scala molto\n # grandi, rallentano le prestazioni di gioco\n self.scale = level.get(\"scale\", 30)\n\n background_image_filename = level.get(\"background_image\", None)\n if background_image_filename!=None:\n self.background_image = pygame.image.load(background_image_filename).convert()\n else:\n self.background_image = None\n\n # parametri usati dall'algoritmo di generazione del labirinto\n # si veda https://en.wikipedia.org/wiki/Maze_generation_algorithm\n self.maze_density = level.get(\"maze_density\", Game.MAZE_DENSITY)\n self.maze_complexity = level.get(\"maze_complexity\", Game.MAZE_COMPLEXITY)\n\n # colore delle monete\n self.coin_color = level.get(\"coin_color\", Game.YELLOW)\n\n # tempo a disposizione per completare il livello\n self.time = level.get(\"time\", 240)\n self.clockTime = level.get(\"clock\", 80)\n\n # numero di nemici\n self.numEnemies = level.get(\"num_enemies\", 0)\n\n # numero di ricaricatori temporali\n self.numTimeReloaders = level.get(\"time_reloaders\", 0)\n\n # numero di bombe \"distruggi monete\"\n self.bonus_bombs = level.get(\"bombs\", [])\n # numero di bombe \"distruggi muri\"\n self.bonus_wall_bombs = level.get(\"wall_bombs\", [])\n # numero di bombe \"distruggi nemici\"\n self.bonus_enemy_killers = level.get(\"enemy_killers\", [])\n # numero di pizze che rendono i nemici golosi di monete\n self.bonus_greedy_enemies = level.get(\"greedy_enemies\", 0)\n # numero di portali (teletrasporto del giocatore)\n self.bonus_portals = level.get(\"portals\", 0)\n\n # proiettili a disposizione del giocatore per un certo periodo di tempo\n self.bonus_player_bullets = level.get(\"player_bullets\", [])\n\n #numero di bonus che rendono il giocatore invisibile per un certo periodo di tempo\n self.bonus_invisibility_players = level.get(\"invisibility_players\", [])\n\n # numero di shooters (nemici che sparano contro il giocatore)\n self.numShooters = level.get(\"num_shooters\" , [])\n\n\n # suoni di collisione\n self.sound_explosion = pygame.mixer.Sound(\"Effects/smc-wwvi/big_explosion.ogg\")\n self.sound_bomb_explosion = pygame.mixer.Sound(\"Effects/smc-wwvi/bombexplosion.ogg\")\n\n\n # suono della moneta raccolta\n #self.sound_coin = pygame.mixer.Sound(\"Effects/SFX/beep_7.wav\")\n self.sound_coin = pygame.mixer.Sound(\"Effects/jute-dh/gold.wav\")\n\n # suono del timeReloader\n self.sound_time_reloader = pygame.mixer.Sound(\"Effects/SFX/echo_5.wav\")\n\n # suono di collisione con enemy killer\n self.sound_enemy_killer = pygame.mixer.Sound(\"Effects/smc-wwvi/big_explosion.ogg\")\n\n # suono dell'invisibility player\n self.sound_invisibility_player = pygame.mixer.Sound(\"Effects/sound_effects/trekscan.wav\")\n\n # suono del teletrasporto\n self.sound_portal = pygame.mixer.Sound(\"Effects/sound_effects/trekscan.wav\")\n\n # suono dell'arma presa e del proiettile sparato\n self.sound_weapon = pygame.mixer.Sound(\"Effects/jute-dh/hit_2m.wav\")\n\n # suono dei greedy enemies\n self.sound_greedy_enemies = pygame.mixer.Sound(\"Effects/sound_effects/squeak2.wav\")\n\n # suono del levello completato\n self.sound_completed_level = pygame.mixer.Sound(\"Effects/sound_effects/level_completed.wav\")\n\n #\n # IMMAGINI DEGLI SPRITE DI GIOCO: CONFIGURABILE DA FILE DI CONFIGURAZIONE!!\n #\n\n # immagine delle pareti del labirinto\n self.wall_filename = level.get(\"wall\", \"Backgrounds/Dim/Boards.jpg\")\n\n # immagine dei nemici del labirinto\n self.enemies_filename = level.get(\"enemies\", \"Sprites/Animals/duck.png\")\n\n # immagine dei nemici del labirinto che possono anche sparare\n # di default gli shooters hanno lo stesso aspetto dei nemici normali\n self.shooters_filename = level.get(\"shooters\", self.enemies_filename)\n\n # immagine della bomba distruggi monete\n self.bomb_filename = level.get(\"bomb\", \"Sprites/bomb_bonus.png\")\n # immagine della bomba distruggi muri\n self.wall_bomb_filename = level.get(\"wall_bomb\", \"Sprites/bomb_wall_bonus.png\")\n\n self.time_reloaders_filename = level.get(\"time_reloader\", \"Sprites/clessidra.png\")\n self.enemy_killers_filename = level.get(\"enemy_killer\", \"Sprites/skull2.png\")\n self.greedy_enemies_filename = level.get(\"greedy_enemy\", \"Sprites/pizza.png\")\n self.portals_filename = level.get(\"portal\", \"Sprites/CrawlStone/portal.png\")\n self.invisibility_players_filename = level.get(\"invisibility_player\", \"Sprites/CrawlStone/wizard_hat_2.png\")\n\n # lo sprite che fornisce i proiettili ha la stessa immagine dei proiettili\n self.player_bullets_filename = level.get(\"player_bullet\", \"Sprites/CrawlStone/apple.png\")\n self.bonus_player_bullets_filename = self.player_bullets_filename\n\n self.shooters_bullets_filename = level.get(\"shooter_bullet\", \"Sprites/CrawlStone/apple.png\")\n\n #\n # GRUPPI DI SPRITES\n #\n\n # i muri del mio labirinto\n self.walls = pygame.sprite.Group()\n\n # i nemici\n self.enemies = pygame.sprite.Group()\n\n # i nemici che sparano fanno parte dello stesso gruppo dei nemici!\n #self.shooters = pygame.sprite.Group()\n\n # le bombe\n self.bombs = pygame.sprite.Group()\n\n # gli attivatori/disattivatori di nemici golosi\n self.greedyEnemies = pygame.sprite.Group()\n\n # le bombe che spaccano i muri\n self.wallBombs = pygame.sprite.Group()\n\n # i ricaritori temporali\n self.timeReloaders = pygame.sprite.Group()\n\n # le monete da raccogliere\n self.coins = pygame.sprite.Group()\n\n # i killer dei nemici\n self.enemyKillers = pygame.sprite.Group()\n\n # i portali per spostarsi in nuove aree\n self.portals = pygame.sprite.Group()\n\n # i nemici che rendono invisibile il giocatore\n self.invisibilityPlayers = pygame.sprite.Group()\n\n # i proiettili sparati dal giocatore\n self.playerBullets = pygame.sprite.Group()\n\n # i proiettili sparati dagli shooters\n self.shooterBullets = pygame.sprite.Group()\n\n # il bonus che fornisce proiettili sparati dal giocatore\n self.bonusPlayerBullets = pygame.sprite.Group()\n\n\n self.free_locations = []\n\n # genero il labirinto che prescinde dai fattori di scala\n self.maze = self.generate_maze()\n #print(self.maze)\n\n # il giocatore e i nemici hanno una dimensione che dipende dal fattore di scala\n self.player = pygame.sprite.GroupSingle(Player(int(self.scale * 0.8), int(self.scale * 0.8),\n self.scale, 1,\n \"Sprites/pac-classic/ghost-red-front.png\",\n )\n )\n self.player.sprite.setWalls(self.walls)\n # imposto le immagini del giocatore sulla base della posizione\n # l'ordine è UP, DOWN , RIGHT, LEFT\n\n self.player.sprite.setImages([\n [\"Sprites/pac-classic/ghost-red-rear.png\",\n \"Sprites/pac-classic/ghost-red-front.png\",\n \"Sprites/pac-classic/ghost-red-right.png\",\n \"Sprites/pac-classic/ghost-red-left.png\",\n ],\n\n [\"Sprites/pac-classic/ghost-orange-rear.png\",\n \"Sprites/pac-classic/ghost-orange-front.png\",\n \"Sprites/pac-classic/ghost-orange-right.png\",\n \"Sprites/pac-classic/ghost-orange-left.png\",\n ],\n\n [\"Sprites/pac-classic/ghost-lblue-rear.png\",\n \"Sprites/pac-classic/ghost-lblue-front.png\",\n \"Sprites/pac-classic/ghost-lblue-right.png\",\n \"Sprites/pac-classic/ghost-lblue-left.png\",\n ],\n\n ]\n )\n\n\n\n\n #\n # CREAZIONE DEGLI SPRITES\n #\n\n # CREO I MIEI NEMICI\n self.createEnemies(self.numEnemies,self.enemies_filename,self.enemies)\n\n # CREO I MIEI NEMICI CHE SPARANO che aggiungo allo stesso gruppo dei nemici!\n self.createShooters(self.numShooters, self.shooters_filename, self.shooters_bullets_filename,self.shooterBullets,\n self.sound_weapon, self.enemies)\n\n # CREO LE BOMBE che sono ObjectDestroyer che distruggono le monete\n self.createObjectDestroyers(self.bonus_bombs,self.bomb_filename,self.bombs, self.coins)\n\n\n # CREO LE WALL BOMBS che sono WallDestroyer che consentono di distruggere i muri\n # interni del labirinto\n self.createInnerObjectDestroyers(self.ncols, self.nrows,self.bonus_wall_bombs,\n self.wall_bomb_filename,self.wallBombs,self.walls)\n # CREO GLI ENEMY KILLERS che sono ObjectDestroyer che consentono di eliminare i nemici\n self.createObjectDestroyers(self.bonus_enemy_killers, self.enemy_killers_filename, self.enemyKillers, self.enemies)\n\n # Creo GREEDY_ENEMIES come ENEMY che consentono di rendere, alternativamente, i nemici golosi di monete oppure no\n self.createEnemies(self.bonus_greedy_enemies, self.greedy_enemies_filename, self.greedyEnemies)\n\n # Alternativamente potrei creare GREED ENEMIES come ObjectDestroyer che in realtà non distruggono niente, ma rendono \"golosi\"\n # i nemici che stanno intorno a loro in modo che inizino a mangiare monete. Se stanno già mangiando\n # monete, al contrario, dovrebbero smettere. CHIEDERLO COME ESERCIZIO\n\n # CREO I TIME RELOADERS che consentono di ripristinare il tempo\n self.createEnemies(self.numTimeReloaders, self.time_reloaders_filename, self.timeReloaders)\n\n # CREO I PORTALI che consentono di trasferirsi in una nuova locazione random\n self.createEnemies(self.bonus_portals, self.portals_filename, self.portals)\n\n # CREO I TIME LIMITED POWERS, come quello che rende invisibile il giocatore\n self.createTimeLimitedPowers(self.bonus_invisibility_players, self.invisibility_players_filename, self.invisibilityPlayers)\n # e come il ricaricatore di proiettili\n self.createTimeLimitedPowers(self.bonus_player_bullets, self.bonus_player_bullets_filename, self.bonusPlayerBullets)\n\n self.mazeSurf = pygame.Surface((self.ncols * self.scale, self.nrows * self.scale))\n # disegno il labirinto coi suoi muri\n self.drawMaze()\n\n self.scrollSurface = self.mazeSurf.copy()\n #self.scrollSurface.fill((0, 0, 0))\n\n pos = random.choice(self.free_locations)\n print(\"Loc Player:%s\" % str(pos))\n\n self.player.sprite.setPosition(pos)\n\n # imposto posizione e movimento iniziale\n # ai vari gruppi di sprites\n\n self.setInitialPosition(self.enemies.sprites())\n self.setInitialPosition(self.bombs.sprites())\n self.setInitialPosition(self.wallBombs.sprites())\n self.setInitialPosition(self.timeReloaders.sprites())\n self.setInitialPosition(self.enemyKillers.sprites())\n self.setInitialPosition(self.greedyEnemies.sprites())\n self.setInitialPosition(self.portals.sprites())\n self.setInitialPosition(self.invisibilityPlayers.sprites())\n self.setInitialPosition(self.bonusPlayerBullets.sprites())\n\n #self.setInitialPosition(self.shooters.sprites())\n\n # normalmente i nemici non mangiano monete...\n self.enemies_eater = False\n\n\n # a inizio livello si dà tempo di 5 secondi al Giocatore per divincolarsi\n # da eventuali nemici che compaiono negli immediati dintorni\n # della posizione (casuale) in cui si viene a trovare\n # il giocatore a inizio livello\n self.player.sprite.addPower(PlayerPowers.INVISIBILITY, (self.time,5))\n\n # imposto la musica del livello e la mando in esecuzione\n self.music = level.get(\"music\", \"./Music/Soundimage/Techno-Gameplay_Looping.ogg\")\n pygame.mixer.music.load(self.music)\n # mando in esecuzione in modalità loop (valore -1)\n pygame.mixer.music.play(-1)\n\n # barra di stato del gioco con informazioni sul punteggio\n self.setupGamebarSurface()", "def load_tile(tile):\n return pygame.image.load(tile[\"states\"][\"default\"][0])", "def draw_level(self):\r\n self.level_surface.blit(self.map_image, self.viewport, self.viewport)\r\n self.level_surface.blit(self.title_box, self.title_rect)", "def __init__(self, data):\n self.width = int(data[\"width\"])\n self.height = int(data[\"height\"])\n self._tiles = list(Tile[t] for t in data[\"map\"])", "def use_level(self, n):\n\n # try to get cache for this level, no cache means no level\n try:\n self.tile_cache = self.cache[n]\n except KeyError:\n return None\n\n # get tile info\n info = self.get_info(n)\n if info is None:\n return None\n\n (self.num_tiles_x, self.num_tiles_y, self.ppd_x, self.ppd_y) = info\n\n # cache partial path to level dir\n self.tile_level_dir = os.path.join(self.tile_dir, '%02d' % n)\n\n return (self.tile_size_x*self.num_tiles_x,\n self.tile_size_y*self.num_tiles_y,\n self.ppd_x, self.ppd_y)", "def load(self, index):\n selected = self.games[index]\n try:\n with open(path.join(self.saved_games, selected)) as f:\n self.game_data['game_data'] = json.load(f)\n self.game_data['file_name'] = selected\n self.game_data['loaded'] = True\n self.game_data['next'] = False\n super().set_state(TRANSITION_OUT)\n logger.info('Load : %s', selected)\n except EnvironmentError as e:\n logger.exception(e)\n\n try:\n self.load_minimap()\n except EnvironmentError as e:\n logger.exception(e)", "def open(self):\n options = {'defaultextension': '.lvl',\n 'filetypes': [('Levels', '.lvl'), ('All files', '*')],\n 'initialdir': 'levels',\n 'initialfile': '',\n 'title': 'Open level'}\n filename = askopenfilename(**options)\n if filename:\n self.blocks = LevelLoader.load(filename)[0]", "def __init__(self, player, base, mapp, base_level):\n self.the_Player = player\n self.screen = base\n self.base_enemy_level = base_level\n self.enemy_list = EnemyList(self)\n self.locationMap = pygame.image.load(\"Images/\"+mapp)\n self.display = self.screen.display\n self.display.blit(self.locationMap, (0, 0))", "def _data_for_level(level_name, include_output=True):\n INPUT_FILE_BASE = 'input.txt'\n OUTPUT_FILE_BASE = 'output.txt'\n\n input_file_name = os.path.join(TEST_DATA_DIR, level_name, INPUT_FILE_BASE)\n output_file_name = os.path.join(TEST_DATA_DIR, level_name, OUTPUT_FILE_BASE)\n\n with open(input_file_name) as input_file:\n input_data = _load_input(input_file)\n\n if include_output:\n with open(output_file_name) as output_file:\n expected_output = _load_output(output_file)\n else:\n expected_output = None\n\n return input_data, expected_output", "def load_data(self):\n # Get Paths Setup\n self.dir = path.dirname(__file__)\n self.img_dir = path.join(self.dir, 'img')\n self.snd_dir = path.join(self.dir, 'snd')\n\n # Load High Score\n try:\n with open(path.join(self.dir, HIGH_SCORE_FILE), 'r') as f:\n self.highscore = int(f.read())\n except FileNotFoundError:\n self.highscore = 0\n\n # Load Images / Background\n self.player_image = pg.image.load(path.join(self.img_dir, PLAYER_FILE))\n self.planet_images = []\n for i in range(1, 11):\n self.planet_images.append(pg.image.load(path.join(self.img_dir, 'planets', 'p{}shaded.png'.format(i)))\n .convert())\n self.moon_images = []\n for i in range(1, 4):\n self.moon_images.append(pg.image.load(path.join(self.img_dir, 'moons', 'Moon{}.png'.format(i))).convert())\n self.sun_image = pg.image.load(path.join(self.img_dir, SUN_FILE)).convert()\n self.fuel_image = pg.image.load(path.join(self.img_dir, 'pickups', FUEL_FILE)).convert()\n self.arrow_image = pg.image.load(path.join(self.img_dir, ARROW_FILE)).convert()\n # LOAD BACKGROUNDS\n self.background = pg.image.load(path.join(self.img_dir, BACKGROUND_FILE)).convert()\n self.background_rect = self.background.get_rect()\n print(f'BACKGROUND WIDTH, HEIGHT: {self.background_rect.width}, {self.background_rect.height}')\n self.loadscreen = pg.image.load(path.join(self.img_dir, START_SCREEN_FILE)).convert()\n self.loadscreen_rect = self.loadscreen.get_rect()\n\n # BUILDING EXPLOSION ANIMATIONS\n self.explosion_animation = {'lg': [], 'sm': [], 'tiny': [], 'player': []}\n for i in range(0, 9):\n filename = 'tank_explosion{}.png'.format(i)\n img = pg.image.load(path.join(self.img_dir, 'explosions', filename)).convert()\n img.set_colorkey(BLACK)\n img_lg = pg.transform.scale(img, (50, 50))\n self.explosion_animation['lg'].append(img_lg)\n img_sm = pg.transform.scale(img, (10, 10))\n self.explosion_animation['sm'].append(img_sm)\n img_tiny = pg.transform.scale(img, (6, 6))\n self.explosion_animation['tiny'].append(img_tiny)\n filename = 'sonicExplosion0{}.png'.format(i)\n img_pl = pg.image.load(path.join(self.img_dir, 'explosions', filename)).convert()\n img = pg.transform.scale(img_pl, (80, 80))\n self.explosion_animation['player'].append(img)\n\n # Load Sounds / Music\n self.crash_sound = pg.mixer.Sound(path.join(self.snd_dir, CRASH_SND_FILE))\n self.crash_sound.set_volume(.4)\n self.moon_crash_sound = pg.mixer.Sound(path.join(self.snd_dir, MOON_CRASH_SND_FILE))\n self.moon_crash_sound.set_volume(.03)\n self.player_crash_sound = pg.mixer.Sound(path.join(self.snd_dir, PLAYER_CRASH_SND_FILE))\n self.player_crash_sound.set_volume(.9)\n self.launch_sound = pg.mixer.Sound(path.join(self.snd_dir, JUMP_SND_FILE))\n self.launch_sound.set_volume(.8)\n self.jump_sector_sound = pg.mixer.Sound(path.join(self.snd_dir, JUMP_SECTOR_SND_FILE))\n self.jump_sector_sound.set_volume(1)\n self.jetpack_sound = pg.mixer.Sound(path.join(self.snd_dir, JETPACK_SND_FILE))\n self.jetpack_sound.set_volume(.3)", "def load_default_game():\n global width, height, dungeon_map # DO NOT REMOVE\n width = 5\n height = 3\n dungeon_map = [list(\"&.@:=\"), list(\" \"), list(\"OYO k\")]\n return (\n 2, # player x\n 1, # player y\n '>', # player symbol\n {'+': 1}, # inventory\n 0, # werewolf x\n 1, # werewolf y\n 1, # werewolf health\n 0, # werewolf stun count\n )", "def test_correct_levels_loading(self):\n #initialization\n manager = LevelManager(\"testdata/correctlevels\")\n manager.load_next_level()\n first_level = manager.current_level\n second_level = manager.load_next_level()\n #check first level\n self.assertEqual(first_level.level, 1)\n self.assertEqual(first_level.snake_max_length, 15)\n self.assertEqual(first_level.maze_width, 23)\n self.assertEqual(first_level.maze_height, 8)\n self.assertEqual(first_level.snake_length, 6)\n self.assertEqual(first_level.snake_direction, (0, 1))\n self.assertEqual(first_level.snake,\n [(1, 8), (1, 7), (1, 6), (1, 5), (1, 4), (1, 3)])\n self.assertEqual(first_level.barrier,\n [(1, 14), (2, 14), (3, 15), (3, 16), (3, 17), (3, 18), (3, 19)])\n #check second level\n self.assertEqual(second_level.level, 2)\n self.assertEqual(second_level.snake_max_length, 10)\n self.assertEqual(second_level.maze_width, 10)\n self.assertEqual(second_level.maze_height,10)\n self.assertEqual(second_level.snake_length, 5)\n self.assertEqual(first_level.snake_direction, (0, 1))\n self.assertEqual(second_level.snake,\n [(3, 2), (3, 3), (3, 4), (4, 4), (5, 4)])\n self.assertEqual(second_level.barrier,\n [(1, 4), (6, 4)])\n #test that end of level exception is raised\n self.assertRaises(LastLevelError, manager.load_next_level)", "def reset_world(self, new_level):\n if new_level == self._start_level: # if restart\n self._player.change_health(self._max_health) # set player with max_health\n self._player.change_score(-self._player.get_score()) # set player score 0\n elif self._filename == new_level: # if reset current level\n # set the player's health and score by previous record\n record_health, record_score = self._level_dic[new_level]['record'] # get health & score\n self._player.change_health(record_health - self._player.get_health())\n self._player.change_score(record_score - self._player.get_score())\n\n self._filename = new_level # change the current level name\n self._goal = self._level_dic[self._filename]['goal'] # change the current goal\n self._tunnel = self._level_dic[self._filename]['tunnel'] # change the current tunnel\n self._player.set_bonus_health(False) # player can have bonus health again\n self._player.set_jumping(False) # player can't jump until land\n self._player.set_invincible(False) # buff won't be brought to another level\n # create world, add player, setup collision handlers\n self._world = load_world(self._builder, self._filename)\n self._world.add_player(self._player, self._x, self._y, self._mass)\n self._builder.clear()\n self._setup_collision_handlers()", "def load(filepath=None, data=None, **kwargs):\n return ResearchMap(filepath, data, **kwargs)", "def load_common_map():\r\n\tglobal common_map\r\n\tif os.path.exists(paths.path_data_map_common_pickle):\r\n\t\tprint('\\nloading map')\r\n\t\tcommon_map = pickle.load(open(paths.path_data_map_common_pickle,\"rb\"))\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False", "def load_ligovirgo():\n url = 'https://gracedb.ligo.org/apiweb/superevents/S200115j/files/bayestar.multiorder.fits'\n sky_map = Table.read(url)\n log.info('creating ORM records')\n localization = Localization.from_sky_map(sky_map)\n log.info('saving')\n db.session.add(localization)\n log.info('committing')\n db.session.commit()\n log.info('done')", "def _load_team_map(self) -> None:\n self._cursor.execute(\"select id, franchid, teamid, lgid from teams where yearid = %s;\", (self._yearid,))\n all_teams = self._cursor.fetchall()\n for team in all_teams:\n r = {'id': team[0], 'franchid': team[1], 'teamid': team[2], 'lgid': team[3]}\n self._team_map[team[1]] = r", "def read(self) -> Tuple[Level, Any]:\n dflevel = Level()\n dflevel.name = b\"Linear Randomizer\"\n dflevel.level_type = LevelType.NEXUS\n\n base_tile = Tile(\n sprite_set=TileSpriteSet.FOREST,\n sprite_tile=6,\n sprite_palette=1,\n )\n\n x_left = -6\n x_right = -6 + len(self.data[\"doors\"]) * 8 + 4\n for x in range(x_left, x_right):\n dflevel.tiles[(19, x, 0)] = copy.deepcopy(base_tile)\n dflevel.tiles[(19, x, 1)] = copy.deepcopy(base_tile)\n dflevel.tiles[(19, x, -6)] = copy.deepcopy(base_tile)\n dflevel.tiles[(19, x, -5)] = copy.deepcopy(base_tile)\n if x <= x_left + 1 or x_right - 2 <= x:\n for y in range(-4, 0):\n dflevel.tiles[(19, x, y)] = copy.deepcopy(base_tile)\n\n dflevel.calculate_edge_visibility()\n dflevel.calculate_edge_caps()\n\n for i, door_id in enumerate(range(199, 200 + len(self.level_doors))):\n door = LevelDoor()\n door.door_set = self.data[\"doors\"][door_id][\"door\"]\n dflevel.add_entity(i * 48 * 8, 0, door, id_num=door_id)\n\n fog_trigger = FogTrigger()\n fog_trigger.normalize()\n fog_trigger.star_bottom = 0.0\n fog_trigger.star_middle = 0.0\n fog_trigger.star_top = 0.0\n fog_trigger.width = 500\n dflevel.add_entity(0, 0, fog_trigger)\n\n for i in range((len(self.level_doors) - 1) // 4):\n red_door = RedKeyDoor()\n red_door.keys_needed = i + 1\n dflevel.add_entity(((i + 1) * 4 * 8 + 4) * 48, 0, red_door)\n\n return dflevel, None", "def load_data():\n if _LOCATIONS_BY_ID:\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID\n\n # We need to read the locations in order of country -> admin level 1 -> admin level 2 -> city.\n # This is so that the higher resolution locations can look up the lower resolution locations\n # that they belong to, and compute the necessary fields.\n countries_by_code = _load_country_data(_DATA_FILES['country'])\n admin1_by_code = _load_admin1_data(_DATA_FILES['admin_1'], countries_by_code)\n admin2_by_code = _load_admin2_data(_DATA_FILES['admin_2'], countries_by_code, admin1_by_code)\n _load_city_data(_DATA_FILES['city'], countries_by_code, admin1_by_code, admin2_by_code)\n _add_alternate_names(_DATA_FILES['alt_wiki_names'])\n _add_estimated_importances(_DATA_FILES['estimated_importance'])\n\n return _LOCATIONS_BY_NAME, _LOCATIONS_BY_ID", "def make_map(player, dungeon_level):\n new_map = map.Map(config.MAP_HEIGHT, config.MAP_WIDTH, dungeon_level)\n new_map.objects.append(player)\n player.current_map = new_map\n player.camera_position = algebra.Location(0, 0)\n new_map.random_seed = libtcod.random_save(0)\n _build_map(new_map)\n for new_room in new_map.rooms:\n _place_objects(new_map, new_room, player)\n player.pos = new_map.rooms[0].center()\n\n new_map.initialize_fov()\n return new_map", "def get_tile(tilefile,level,x,y):\n\t\n\ttf=file(tilefile,\"r\")\n\t\n\ttd=pickle.load(tf)\n\ta=td[(level,x,y)]\n\t\n\ttf.seek(a[0],1)\n\tret=tf.read(a[1])\n\t\n\ttf.close()\n\treturn ret", "def _load(self, load_dict):\n self._data_ = load_dict", "def loadLightMap( self, imageName = \"lightmap1.jpg\" ):\n try:\n from PIL.Image import open\n except ImportError, err:\n from Image import open\n glActiveTextureARB(GL_TEXTURE1);\n return texture.Texture( open(imageName) )", "def load_maps(cat,maps=None):\n\n if maps is None:\n if cat.release=='y1':\n maps=np.array(list(config.map_name_y1.keys()))\n elif cat.release=='sv':\n maps=np.array(list(config.map_name_sv.keys()))\n print maps\n for i,x in enumerate(maps):\n print i,x\n if x=='ebv':\n setattr(cat,x,split_methods.get_maps(cat.ra,cat.dec,x,release=cat.release,nside=2048,map=True))\n else:\n setattr(cat,x,split_methods.get_maps(cat.ra,cat.dec,x,release=cat.release))\n\n return", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n try:\n self._data = load_dict[\"data\" + ArrayParameter.IDENTIFIER]\n\n if \"explored_data\" + ArrayParameter.IDENTIFIER in load_dict:\n explore_table = load_dict[\"explored_data\" + ArrayParameter.IDENTIFIER]\n\n idx = explore_table[\"idx\"]\n\n explore_list = []\n\n # Recall the arrays in the order stored in the ObjectTable 'explored_data__rr__'\n for name_idx in idx:\n arrayname = self._build_name(name_idx)\n explore_list.append(load_dict[arrayname])\n\n self._explored_range = [x for x in explore_list]\n self._explored = True\n\n except KeyError:\n super(ArrayParameter, self)._load(load_dict)\n\n self._default = self._data\n self._locked = True", "def setup(self, level):\r\n\r\n # Used to keep track of our scrolling\r\n self.view_bottom = 0\r\n self.view_left = 0\r\n\r\n # Keep track of the score\r\n self.score = 0\r\n\r\n # Keep track of lives\r\n # self.lives = 5\r\n\r\n # Create the Sprite lists\r\n self.player_list = arcade.SpriteList()\r\n self.foreground_list = arcade.SpriteList()\r\n self.background_list = arcade.SpriteList()\r\n self.wall_list = arcade.SpriteList()\r\n self.coin_list = arcade.SpriteList()\r\n\r\n # Set up the player, specifically placing it at these coordinates.\r\n image_source = \"images/Alice/Alice7_front.png\"\r\n self.player_sprite = arcade.Sprite(image_source, CHARACTER_SCALING)\r\n self.player_sprite.center_x = PLAYER_START_X\r\n self.player_sprite.center_y = PLAYER_START_Y\r\n self.player_list.append(self.player_sprite)\r\n\r\n # --- Load in a map from the tiled editor ---\r\n\r\n # Name of the layer in the file that has our platforms/walls\r\n platforms_layer_name = 'Platforms'\r\n moving_platforms_layer_name = 'Moving Platforms'\r\n # Name of the layer that has items for pick-up\r\n coins_layer_name = 'Coins'\r\n # Name of the layer that has items for foreground\r\n foreground_layer_name = 'Foreground'\r\n # Name of the layer that has items for background\r\n background_layer_name = 'Background'\r\n # Name of the layer that has items we shouldn't touch\r\n dont_touch_layer_name = \"Don't Touch\"\r\n\r\n # Map name\r\n map_name = f\"map4_level_{level}.tmx\"\r\n\r\n # Read in the tiled map\r\n my_map = arcade.tilemap.read_tmx(map_name)\r\n\r\n # Calculate the right edge of the my_map in pixels\r\n self.end_of_map = my_map.map_size.width * GRID_PIXEL_SIZE\r\n\r\n # -- Background\r\n self.background_list = arcade.tilemap.process_layer(my_map,\r\n background_layer_name,\r\n TILE_SCALING)\r\n\r\n # -- Foreground\r\n self.foreground_list = arcade.tilemap.process_layer(my_map,\r\n foreground_layer_name,\r\n TILE_SCALING)\r\n\r\n # -- Platforms\r\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map,\r\n layer_name=platforms_layer_name,\r\n scaling=TILE_SCALING,\r\n use_spatial_hash=True)\r\n # -- Moving Platforms\r\n moving_platforms_list = arcade.tilemap.process_layer(my_map, moving_platforms_layer_name, TILE_SCALING)\r\n for sprite in moving_platforms_list:\r\n self.wall_list.append(sprite)\r\n\r\n # -- Coins\r\n self.coin_list = arcade.tilemap.process_layer(my_map,\r\n coins_layer_name,\r\n TILE_SCALING,\r\n use_spatial_hash=True)\r\n\r\n # -- Don't Touch Layer\r\n self.dont_touch_list = arcade.tilemap.process_layer(my_map,\r\n dont_touch_layer_name,\r\n TILE_SCALING,\r\n use_spatial_hash=True)\r\n\r\n # --- Other stuff\r\n # Set the background color\r\n if my_map.background_color:\r\n arcade.set_background_color(my_map.background_color)\r\n\r\n # Create the 'physics engine'\r\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,\r\n self.wall_list,\r\n GRAVITY)", "def __init__(self):\n with open('data.json') as data_file:\n self.data = json.load(data_file)\n self.game_over = False", "def callback_LoadMap(fileName=None):\n loading_msg = 'Load Bioprocess:'\\\n '\\n(will check processes/ by default)'\n\n # get fileName from user\n if not fileName:\n fileName = sg.popup_get_text(loading_msg, 'File Loader')\n\n if fileName:\n # add default path and .json ext\n fileName = brf.default_path(fileName)\n jName, fileName = brf.get_file_ext('.json', fileName)\n # attempt to load in specified json\n try:\n with open(jName) as j:\n currentMods = json.load(j)\n except(FileNotFoundError):\n sg.popup('Error: File could not be opened')\n currentMods = None\n\n else:\n currentMods = 'cancel'\n\n return currentMods", "def load_map_data(path):\n data = np.load(path)\n v_map = data['v_map'].reshape(25, 25)\n q_mean_map = data['q_1_moment'].reshape(25, 25)\n knack_map = data['knack_map'].reshape(25, 25)\n knack_map_kurtosis = data['knack_map_kurtosis'].reshape(25, 25)\n\n # normalize array into (0., 1.) to visualize\n v_map = normalize(v_map)\n q_mean_map = normalize(q_mean_map)\n knack_map = normalize(knack_map)\n knack_map_kurtosis = normalize(knack_map_kurtosis)\n\n return {'v_map': v_map, 'q_mean_map': q_mean_map, 'knack_map': knack_map, 'knack_map_kurtosis': knack_map_kurtosis}", "def _load(self, load_dict):\n if self.v_locked:\n raise pex.ParameterLockedException(\n \"Parameter `%s` is locked!\" % self.v_full_name\n )\n\n if \"data\" in load_dict:\n dump = load_dict[\"data\"]\n self._data = pickle.loads(dump)\n else:\n self._logger.warning(\n \"Your parameter `%s` is empty, \"\n \"I did not find any data on disk.\" % self.v_full_name\n )\n\n try:\n self.v_protocol = load_dict[PickleParameter.PROTOCOL]\n except KeyError:\n # For backwards compatibility\n self.v_protocol = PickleParameter._get_protocol(dump)\n\n if \"explored_data\" in load_dict:\n explore_table = load_dict[\"explored_data\"]\n\n name_col = explore_table[\"idx\"]\n\n explore_list = []\n for name_id in name_col:\n arrayname = self._build_name(name_id)\n loaded = pickle.loads(load_dict[arrayname])\n explore_list.append(loaded)\n\n self._explored_range = explore_list\n self._explored = True\n\n self._default = self._data\n self._locked = True", "def load_map_data(self, path):\n data = np.load(path)\n v_map = data['q_1_moment'].reshape(25, 25)\n knack_map = data['knack_map'].reshape(25, 25)\n knack_map_kurtosis = data['knack_map_kurtosis'].reshape(25, 25)\n\n # normalize array into (0., 1.) to visualize\n v_map = normalize(v_map)\n knack_map = normalize(knack_map)\n knack_map_kurtosis = normalize(knack_map_kurtosis)\n\n v_map = map_reshaper(v_map)\n knack_map = map_reshaper(knack_map)\n knack_map_kurtosis = map_reshaper(knack_map_kurtosis)\n\n return {'v_map': v_map, 'knack_map': knack_map, 'knack_map_kurtosis': knack_map_kurtosis}", "def load_variable_map(\n self,\n varname,\n itime = None,\n idepth = None,\n time = None,\n depth = None,\n ):\n # default values\n if itime is None and time is None:\n itime = -1\n if idepth is None and depth is None:\n idepth = 0\n # load variable\n if self.depth is not None:\n try:\n var = self.load_variable_profile(varname)\n except LookupError:\n var = self.dataset.data_vars[varname]\n else:\n var = self.dataset.data_vars[varname]\n # check position\n if 'nCells' in var.dims:\n position = 'cell'\n elif 'nVertices' in var.dims:\n position = 'vertex'\n elif 'nEdges' in var.dims:\n position = 'edge'\n else:\n raise LookupError('Cannot load \\'{}\\' as MPASOMap or MPASODomain'.format(varname))\n print('Loading \\'{}\\'...'.format(varname))\n # check time dimension\n if 'Time' not in var.dims:\n data_s1 = var\n else:\n if isinstance(itime, numbers.Integral):\n data_s1 = var.isel(Time=itime)\n elif time is not None:\n data_s1 = var.sel(Time=time)\n else:\n raise TypeError('Either \\'itime\\' in \\'int\\' or time is required')\n print(' time = {}'.format(data_s1.coords['Time'].values))\n # check depth dimension\n if self.depth is None:\n data_s2 = data_s1\n else:\n ndim = len(data_s1.dims)\n if ndim == 1:\n data_s2 = data_s1\n elif ndim == 2 and 'nVertLevels' in data_s1.dims:\n if isinstance(idepth, numbers.Integral):\n data_s2 = data_s1.isel(nVertLevels=idepth)\n elif depth is not None:\n data_s2 = data_s1.sel(nVertLevels=depth)\n else:\n raise TypeError('Either \\'idepth\\' in \\'int\\' or depth is required')\n print(' detph = {} ({})'.format(\n data_s2.coords['nVertLevels'].values,\n data_s2.coords['nVertLevels'].attrs['units']))\n elif ndim == 2 and 'nVertLevelsP1' in data_s1.dims:\n if isinstance(idepth, numbers.Integral):\n data_s2 = data_s1.isel(nVertLevelsP1=idepth)\n elif depth is not None:\n data_s2 = data_s1.sel(nVertLevelsP1=depth)\n else:\n raise TypeError('Either \\'idepth\\' in \\'int\\' or depth is required')\n print(' detph = {} ({})'.format(\n data_s2.coords['nVertLevelsP1'].values,\n data_s2.coords['nVertLevelsP1'].attrs['units']))\n elif ndim == 2 and 'nVertLevelsLES' in data_s1.dims:\n if isinstance(idepth, numbers.Integral):\n data_s2 = data_s1.isel(nVertLevelsLES=idepth)\n elif depth is not None:\n data_s2 = data_s1.sel(nVertLevelsLES=depth)\n else:\n raise TypeError('Either \\'idepth\\' in \\'int\\' or depth is required')\n print(' detph = {} ({})'.format(\n data_s2.coords['nVertLevelsLES'].values,\n data_s2.coords['nVertLevelsLES'].attrs['units']))\n else:\n raise LookupError('\\'{}\\' cannot be loaded on either MPASOMap or MPASODomain')\n # create MPASOMap if on a sphere\n if self.dataset.attrs['on_a_sphere'] == 'YES':\n out = MPASOMap(\n data = data_s2.values.squeeze(),\n name = var.attrs['long_name'],\n units = var.attrs['units'],\n mesh = self.mesh,\n position = position,\n )\n else: # otherwise create MPASDomain\n out = MPASODomain(\n data = data_s2.values.squeeze(),\n name = var.attrs['long_name'],\n units = var.attrs['units'],\n mesh = self.mesh,\n position = position,\n )\n print('Done')\n return out", "def map_data(cult):\n try: # Map already exists\n underworld_model = Underworld.objects.get(owner=cult)\n field = generate_map(underworld_model.seed)\n except Underworld.DoesNotExist: # Generate new map\n # Create a new random seed every time we create an Underworld map\n seed = ''.join(random.choice(ascii_letters + digits) for _ in range(32))\n field = generate_map(seed)\n underworld_model = Underworld(owner=cult, seed=seed, x=field['x'], y=field['y'], time=0)\n underworld_model.save()\n\n print('### ########################### Seed used: ' + underworld_model.seed)\n \n return underworld_model, field", "def get_data( obj, prm, lev, date, timelevel=0 ):\n \n parameter = obj( name = prm, level = lev, dataDate = date )[ timelevel ]\n print( parameter.dataDate )\n \n #-----Checking grit type----------------------------------------------\n if parameter.gridType == \"sh\":\n lat, lon, data = sh( parameter.values )\n elif parameter.gridType == \"reduced_gg\":\n lat, lon = parameter.latlons() #very easy implementastion with a gg\n lon = lon - 180. #else it only draws on half the map\n data = parameter.values\n elif parameter.gridType == \"regular_gg\":\n lat, lon = parameter.latlons() #very easy implementastion with a gg\n lon = lon - 180. #else it only draws on half the map\n data = parameter.values\n else: \n print ( parameter.gridType )\n \n return lat, lon, data", "def _init_world(self):\n self.world.restricted_world = {\n 'not_road': [],\n 'cross_road': [],\n }\n for polygon in self._data_loader.data.get_polygons(0):\n polygon_name = polygon['label']\n polygon_points = polygon['points']\n if polygon_name in {'not_road', 'cross_road'}:\n self.world.restricted_world[polygon_name].append(geometry.Polygon(\n self._data_loader.convertIMG2PLAY(polygon_points)\n ))", "def load_game(self, load_dir):\n self.reset_game_fields()\n # Set Game data\n game_space_ids = []\n game_character_ids = []\n game_item_ids = []\n game_exit_ids = []\n with open(os.path.join(load_dir, \"game/game.json\")) as file_handle:\n game_data = json.load(file_handle)\n self.event_status = game_data['event_status']\n self.event_status_list = game_data['event_status_list']\n game_space_ids = game_data['spaces']\n game_character_ids = game_data['characters']\n game_exit_ids = game_data['exits']\n game_item_ids = game_data['items']\n\n # Create Player\n player_item_ids = []\n with open(os.path.join(load_dir, \"player/player.json\")) as file_handle:\n player_data = json.load(file_handle)\n player_name = player_data['name']\n player_alive = player_data['alive']\n player_energy = player_data['energy']\n player_capacity = player_data['capacity']\n player_description = player_data['description']\n player_items = player_data['items']\n player_location = player_data['location']\n new_player = Player(name=player_name, description=player_description, capacity=player_capacity,\n alive=player_alive, energy=player_energy, items=player_items,\n location=player_location)\n # Add player to Game\n self.player = new_player\n\n # Create Items\n item_files = os.listdir(os.path.join(load_dir, \"items\"))\n for file in item_files:\n if file[-1] is not '~':\n with open(os.path.join(load_dir, \"items\", file)) as file_handle:\n item_data = json.load(file_handle)\n item_name = item_data['name']\n item_visible = item_data['visible']\n item_locked = item_data['locked']\n item_weight = item_data['weight']\n item_description = item_data['description']\n item_id = item_data['id']\n\n i = Item(new_id=item_id, name=item_name, visible=item_visible, locked=item_locked,\n weight=item_weight, description=item_description)\n self.items.append(i)\n\n # Create Characters\n character_files = os.listdir(os.path.join(load_dir, \"characters\"))\n for file in character_files:\n if file[-1] is not '~':\n with open(os.path.join(load_dir, \"characters\", file)) as file_handle:\n char_data = json.load(file_handle)\n char_name = char_data['name']\n char_response = char_data['response']\n char_description = char_data['description']\n char_id = char_data['id']\n\n c = Character(new_id=char_id, name=char_name, description=char_description, response=char_response)\n self.characters.append(c)\n\n # Create Spaces\n space_files = os.listdir(os.path.join(load_dir, \"spaces\"))\n for file in space_files:\n if file[-1] is not '~':\n with open(os.path.join(load_dir, \"spaces\", file)) as file_handle:\n space_data = json.load(file_handle)\n space_name = space_data['name']\n space_long_description = space_data['long_description']\n space_short_description = space_data['short_description']\n space_visited = space_data['visited']\n space_id = space_data['id']\n space_characters = space_data['characters']\n space_exits = space_data['exits']\n space_items = space_data['items']\n\n s = Space(new_id=space_id, name=space_name, long_description=space_long_description,\n short_description=space_short_description, visited=space_visited,\n items=space_items, characters=space_characters, exits=space_exits)\n self.spaces.append(s)\n\n\n # Create Exits\n exit_files = os.listdir(os.path.join(load_dir, \"exits\"))\n for file in exit_files:\n if file[-1] is not '~':\n with open(os.path.join(load_dir, \"exits\", file)) as file_handle:\n exit_data = json.load(file_handle)\n exit_space = exit_data['space']\n exit_name = exit_data['name']\n exit_direction = exit_data['direction']\n exit_unlock_item = exit_data['unlock_item']\n exit_visible = exit_data['visible']\n exit_id = exit_data['id']\n exit_locked = exit_data['locked']\n exit_description = exit_data['description']\n\n e = Exit(new_id=exit_id, space=exit_space, name=exit_name, direction=exit_direction,\n unlock_item=exit_unlock_item, visible=exit_visible, locked=exit_locked,\n description=exit_description)\n self.exits.append(e)\n\n # Set player location\n loc_id = self.player.location\n self.player.location = self.get_object_by_id(self.spaces, loc_id)\n\n # Set player items\n player_item_ids = self.player.items\n new_player_items = []\n for item_id in player_item_ids:\n cur_item = self.get_object_by_id(self.items, item_id)\n new_player_items.append(cur_item)\n self.player.items = new_player_items\n\n # Place items, characters, and exits in spaces\n for space in self.spaces:\n item_ids = space.items\n character_ids = space.characters\n exit_ids = space.exits\n space.items = []\n space.characters = []\n space.exits = []\n # items\n for item_id in item_ids:\n item_obj = self.get_object_by_id(self.items, item_id)\n space.items.append(item_obj)\n # characters\n for character_id in character_ids:\n character_obj = self.get_object_by_id(self.characters, character_id)\n space.characters.append(character_obj)\n # exits\n for exit_id in exit_ids:\n exit_obj = self.get_object_by_id(self.exits, exit_id)\n space.exits.append(exit_obj)\n\n # Link spaces to exits, and add unlock items\n for exit in self.exits:\n space_id = exit.space\n item_id = exit.unlock_item\n exit.space = None\n exit.unlock_item = None\n exit.space = self.get_object_by_id(self.spaces, space_id)\n exit.unlock_item = self.get_object_by_id(self.items, item_id)\n\n self.print_all_items_in_all_spaces()", "def load_from_geojson(self, filename_or_url):" ]
[ "0.74006486", "0.6749682", "0.62063956", "0.6168153", "0.597027", "0.59682465", "0.58968747", "0.5858254", "0.57994854", "0.57971156", "0.57004493", "0.5693509", "0.56794953", "0.5672299", "0.5622653", "0.5604859", "0.56037986", "0.5574898", "0.5534123", "0.5477678", "0.5452691", "0.544993", "0.54075867", "0.53974414", "0.5356982", "0.53504527", "0.53306043", "0.5329235", "0.5323243", "0.530253", "0.5291214", "0.52911425", "0.52836764", "0.5249876", "0.5249851", "0.52446127", "0.5244396", "0.5243525", "0.52414376", "0.52362937", "0.5234107", "0.522826", "0.5225521", "0.52141804", "0.52127665", "0.5210781", "0.52057135", "0.52050626", "0.5203685", "0.5200191", "0.51955706", "0.51942664", "0.5182091", "0.5180382", "0.5179897", "0.5158252", "0.5153375", "0.51459575", "0.51315004", "0.5120618", "0.5117726", "0.51153696", "0.5104792", "0.51005864", "0.5099547", "0.5086146", "0.50774235", "0.5077115", "0.50632787", "0.50631124", "0.50630534", "0.5055923", "0.505241", "0.5048829", "0.504528", "0.50399894", "0.5025341", "0.50042003", "0.50009197", "0.50003475", "0.49940228", "0.49892455", "0.49883", "0.49878907", "0.49858874", "0.49856937", "0.4976347", "0.49723262", "0.49721703", "0.49649733", "0.49623814", "0.49620032", "0.4947793", "0.49441195", "0.49413657", "0.49405006", "0.49403977", "0.49349374", "0.49321586", "0.49298528" ]
0.59912586
4
Initialize and setup a new maze level.
def new(self): #groups for drawing self.moving_sprites = pg.sprite.LayeredUpdates() self.static_sprites = pg.sprite.LayeredUpdates() #other groups self.walls = pg.sprite.Group() self.teleports = pg.sprite.Group() self.win = pg.sprite.Group() self.threat = pg.sprite.Group() self.hearts= pg.sprite.Group() for tile_object in self.map.tmxdata.objects: if tile_object.name == "player": self.player = Player(self, tile_object.x, tile_object.y) if tile_object.name == "monster": self.monster = Monster(self, tile_object.x, tile_object.y) if tile_object.name == "wall": Obstacle(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height) if tile_object.name == "mirror": Mirror(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height, self.destinations) if tile_object.name == "pentagram": self.goal=Pentagram(self, tile_object.x, tile_object.y, tile_object.width, tile_object.height) self.camera = Camera(self.map.width, self.map.height) #static sprites self.flashlight=Flashlight(self, int(WIDTH/2), int(HEIGHT/2)) self.darkness=Darkness(self, int(WIDTH/2), int(HEIGHT/2)) if self.minimap_name != None: self.minimap=Minimap(self, self.minimap_name) for i in range(int(PLAYERHEALTH/10)): Heart(self, 726-37*(2-i), 20) self.battery= Battery(self, 726, 52) self.draw_debug = False self.teleport_list=[] for tele in self.teleports: self.teleport_list.append(tele)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_position():\n __maze.init_position()", "def setUp(self):\n\n self.m=Maze()", "def __init__(self, maze, population_size):\n self.maze = maze\n self.population_size = population_size", "def setupLevel(self):\n\n self.state = GameState.SETUP\n\n # vado a leggere il dizionario corrispondente\n # al numero di livello corrente facendo in modo\n # che se il numero di livello richiesto non esiste\n # carico quello più vicino a quello richiesto\n if self.levelIndex>= len(levels):\n self.levelIndex = len(levels) -1\n elif self.levelIndex <0:\n self.levelIndex = 0\n\n level = levels[self.levelIndex]\n\n # nome del livello\n self.level_name = level.get(\"name\", \"Livello %s\" % (self.levelIndex+1))\n\n # dimensione del labirinto (numero di righe e di colonne)\n self.nrows = level.get(\"nrows\", 20)\n self.ncols = level.get(\"ncols\", 20)\n\n # l'algoritmo di generazione del labirinto supporta solo un numero di\n # righe e di colonne dispari, quindi approssimiamo le dimensioni ai\n # valori dispari più vicini\n if self.nrows % 2 == 0:\n self.nrows+=1\n if self.ncols % 2 == 0:\n self.ncols+=1\n\n\n # fattore di scala del labirinto\n # attenzione che, fattori di scala molto\n # grandi, rallentano le prestazioni di gioco\n self.scale = level.get(\"scale\", 30)\n\n background_image_filename = level.get(\"background_image\", None)\n if background_image_filename!=None:\n self.background_image = pygame.image.load(background_image_filename).convert()\n else:\n self.background_image = None\n\n # parametri usati dall'algoritmo di generazione del labirinto\n # si veda https://en.wikipedia.org/wiki/Maze_generation_algorithm\n self.maze_density = level.get(\"maze_density\", Game.MAZE_DENSITY)\n self.maze_complexity = level.get(\"maze_complexity\", Game.MAZE_COMPLEXITY)\n\n # colore delle monete\n self.coin_color = level.get(\"coin_color\", Game.YELLOW)\n\n # tempo a disposizione per completare il livello\n self.time = level.get(\"time\", 240)\n self.clockTime = level.get(\"clock\", 80)\n\n # numero di nemici\n self.numEnemies = level.get(\"num_enemies\", 0)\n\n # numero di ricaricatori temporali\n self.numTimeReloaders = level.get(\"time_reloaders\", 0)\n\n # numero di bombe \"distruggi monete\"\n self.bonus_bombs = level.get(\"bombs\", [])\n # numero di bombe \"distruggi muri\"\n self.bonus_wall_bombs = level.get(\"wall_bombs\", [])\n # numero di bombe \"distruggi nemici\"\n self.bonus_enemy_killers = level.get(\"enemy_killers\", [])\n # numero di pizze che rendono i nemici golosi di monete\n self.bonus_greedy_enemies = level.get(\"greedy_enemies\", 0)\n # numero di portali (teletrasporto del giocatore)\n self.bonus_portals = level.get(\"portals\", 0)\n\n # proiettili a disposizione del giocatore per un certo periodo di tempo\n self.bonus_player_bullets = level.get(\"player_bullets\", [])\n\n #numero di bonus che rendono il giocatore invisibile per un certo periodo di tempo\n self.bonus_invisibility_players = level.get(\"invisibility_players\", [])\n\n # numero di shooters (nemici che sparano contro il giocatore)\n self.numShooters = level.get(\"num_shooters\" , [])\n\n\n # suoni di collisione\n self.sound_explosion = pygame.mixer.Sound(\"Effects/smc-wwvi/big_explosion.ogg\")\n self.sound_bomb_explosion = pygame.mixer.Sound(\"Effects/smc-wwvi/bombexplosion.ogg\")\n\n\n # suono della moneta raccolta\n #self.sound_coin = pygame.mixer.Sound(\"Effects/SFX/beep_7.wav\")\n self.sound_coin = pygame.mixer.Sound(\"Effects/jute-dh/gold.wav\")\n\n # suono del timeReloader\n self.sound_time_reloader = pygame.mixer.Sound(\"Effects/SFX/echo_5.wav\")\n\n # suono di collisione con enemy killer\n self.sound_enemy_killer = pygame.mixer.Sound(\"Effects/smc-wwvi/big_explosion.ogg\")\n\n # suono dell'invisibility player\n self.sound_invisibility_player = pygame.mixer.Sound(\"Effects/sound_effects/trekscan.wav\")\n\n # suono del teletrasporto\n self.sound_portal = pygame.mixer.Sound(\"Effects/sound_effects/trekscan.wav\")\n\n # suono dell'arma presa e del proiettile sparato\n self.sound_weapon = pygame.mixer.Sound(\"Effects/jute-dh/hit_2m.wav\")\n\n # suono dei greedy enemies\n self.sound_greedy_enemies = pygame.mixer.Sound(\"Effects/sound_effects/squeak2.wav\")\n\n # suono del levello completato\n self.sound_completed_level = pygame.mixer.Sound(\"Effects/sound_effects/level_completed.wav\")\n\n #\n # IMMAGINI DEGLI SPRITE DI GIOCO: CONFIGURABILE DA FILE DI CONFIGURAZIONE!!\n #\n\n # immagine delle pareti del labirinto\n self.wall_filename = level.get(\"wall\", \"Backgrounds/Dim/Boards.jpg\")\n\n # immagine dei nemici del labirinto\n self.enemies_filename = level.get(\"enemies\", \"Sprites/Animals/duck.png\")\n\n # immagine dei nemici del labirinto che possono anche sparare\n # di default gli shooters hanno lo stesso aspetto dei nemici normali\n self.shooters_filename = level.get(\"shooters\", self.enemies_filename)\n\n # immagine della bomba distruggi monete\n self.bomb_filename = level.get(\"bomb\", \"Sprites/bomb_bonus.png\")\n # immagine della bomba distruggi muri\n self.wall_bomb_filename = level.get(\"wall_bomb\", \"Sprites/bomb_wall_bonus.png\")\n\n self.time_reloaders_filename = level.get(\"time_reloader\", \"Sprites/clessidra.png\")\n self.enemy_killers_filename = level.get(\"enemy_killer\", \"Sprites/skull2.png\")\n self.greedy_enemies_filename = level.get(\"greedy_enemy\", \"Sprites/pizza.png\")\n self.portals_filename = level.get(\"portal\", \"Sprites/CrawlStone/portal.png\")\n self.invisibility_players_filename = level.get(\"invisibility_player\", \"Sprites/CrawlStone/wizard_hat_2.png\")\n\n # lo sprite che fornisce i proiettili ha la stessa immagine dei proiettili\n self.player_bullets_filename = level.get(\"player_bullet\", \"Sprites/CrawlStone/apple.png\")\n self.bonus_player_bullets_filename = self.player_bullets_filename\n\n self.shooters_bullets_filename = level.get(\"shooter_bullet\", \"Sprites/CrawlStone/apple.png\")\n\n #\n # GRUPPI DI SPRITES\n #\n\n # i muri del mio labirinto\n self.walls = pygame.sprite.Group()\n\n # i nemici\n self.enemies = pygame.sprite.Group()\n\n # i nemici che sparano fanno parte dello stesso gruppo dei nemici!\n #self.shooters = pygame.sprite.Group()\n\n # le bombe\n self.bombs = pygame.sprite.Group()\n\n # gli attivatori/disattivatori di nemici golosi\n self.greedyEnemies = pygame.sprite.Group()\n\n # le bombe che spaccano i muri\n self.wallBombs = pygame.sprite.Group()\n\n # i ricaritori temporali\n self.timeReloaders = pygame.sprite.Group()\n\n # le monete da raccogliere\n self.coins = pygame.sprite.Group()\n\n # i killer dei nemici\n self.enemyKillers = pygame.sprite.Group()\n\n # i portali per spostarsi in nuove aree\n self.portals = pygame.sprite.Group()\n\n # i nemici che rendono invisibile il giocatore\n self.invisibilityPlayers = pygame.sprite.Group()\n\n # i proiettili sparati dal giocatore\n self.playerBullets = pygame.sprite.Group()\n\n # i proiettili sparati dagli shooters\n self.shooterBullets = pygame.sprite.Group()\n\n # il bonus che fornisce proiettili sparati dal giocatore\n self.bonusPlayerBullets = pygame.sprite.Group()\n\n\n self.free_locations = []\n\n # genero il labirinto che prescinde dai fattori di scala\n self.maze = self.generate_maze()\n #print(self.maze)\n\n # il giocatore e i nemici hanno una dimensione che dipende dal fattore di scala\n self.player = pygame.sprite.GroupSingle(Player(int(self.scale * 0.8), int(self.scale * 0.8),\n self.scale, 1,\n \"Sprites/pac-classic/ghost-red-front.png\",\n )\n )\n self.player.sprite.setWalls(self.walls)\n # imposto le immagini del giocatore sulla base della posizione\n # l'ordine è UP, DOWN , RIGHT, LEFT\n\n self.player.sprite.setImages([\n [\"Sprites/pac-classic/ghost-red-rear.png\",\n \"Sprites/pac-classic/ghost-red-front.png\",\n \"Sprites/pac-classic/ghost-red-right.png\",\n \"Sprites/pac-classic/ghost-red-left.png\",\n ],\n\n [\"Sprites/pac-classic/ghost-orange-rear.png\",\n \"Sprites/pac-classic/ghost-orange-front.png\",\n \"Sprites/pac-classic/ghost-orange-right.png\",\n \"Sprites/pac-classic/ghost-orange-left.png\",\n ],\n\n [\"Sprites/pac-classic/ghost-lblue-rear.png\",\n \"Sprites/pac-classic/ghost-lblue-front.png\",\n \"Sprites/pac-classic/ghost-lblue-right.png\",\n \"Sprites/pac-classic/ghost-lblue-left.png\",\n ],\n\n ]\n )\n\n\n\n\n #\n # CREAZIONE DEGLI SPRITES\n #\n\n # CREO I MIEI NEMICI\n self.createEnemies(self.numEnemies,self.enemies_filename,self.enemies)\n\n # CREO I MIEI NEMICI CHE SPARANO che aggiungo allo stesso gruppo dei nemici!\n self.createShooters(self.numShooters, self.shooters_filename, self.shooters_bullets_filename,self.shooterBullets,\n self.sound_weapon, self.enemies)\n\n # CREO LE BOMBE che sono ObjectDestroyer che distruggono le monete\n self.createObjectDestroyers(self.bonus_bombs,self.bomb_filename,self.bombs, self.coins)\n\n\n # CREO LE WALL BOMBS che sono WallDestroyer che consentono di distruggere i muri\n # interni del labirinto\n self.createInnerObjectDestroyers(self.ncols, self.nrows,self.bonus_wall_bombs,\n self.wall_bomb_filename,self.wallBombs,self.walls)\n # CREO GLI ENEMY KILLERS che sono ObjectDestroyer che consentono di eliminare i nemici\n self.createObjectDestroyers(self.bonus_enemy_killers, self.enemy_killers_filename, self.enemyKillers, self.enemies)\n\n # Creo GREEDY_ENEMIES come ENEMY che consentono di rendere, alternativamente, i nemici golosi di monete oppure no\n self.createEnemies(self.bonus_greedy_enemies, self.greedy_enemies_filename, self.greedyEnemies)\n\n # Alternativamente potrei creare GREED ENEMIES come ObjectDestroyer che in realtà non distruggono niente, ma rendono \"golosi\"\n # i nemici che stanno intorno a loro in modo che inizino a mangiare monete. Se stanno già mangiando\n # monete, al contrario, dovrebbero smettere. CHIEDERLO COME ESERCIZIO\n\n # CREO I TIME RELOADERS che consentono di ripristinare il tempo\n self.createEnemies(self.numTimeReloaders, self.time_reloaders_filename, self.timeReloaders)\n\n # CREO I PORTALI che consentono di trasferirsi in una nuova locazione random\n self.createEnemies(self.bonus_portals, self.portals_filename, self.portals)\n\n # CREO I TIME LIMITED POWERS, come quello che rende invisibile il giocatore\n self.createTimeLimitedPowers(self.bonus_invisibility_players, self.invisibility_players_filename, self.invisibilityPlayers)\n # e come il ricaricatore di proiettili\n self.createTimeLimitedPowers(self.bonus_player_bullets, self.bonus_player_bullets_filename, self.bonusPlayerBullets)\n\n self.mazeSurf = pygame.Surface((self.ncols * self.scale, self.nrows * self.scale))\n # disegno il labirinto coi suoi muri\n self.drawMaze()\n\n self.scrollSurface = self.mazeSurf.copy()\n #self.scrollSurface.fill((0, 0, 0))\n\n pos = random.choice(self.free_locations)\n print(\"Loc Player:%s\" % str(pos))\n\n self.player.sprite.setPosition(pos)\n\n # imposto posizione e movimento iniziale\n # ai vari gruppi di sprites\n\n self.setInitialPosition(self.enemies.sprites())\n self.setInitialPosition(self.bombs.sprites())\n self.setInitialPosition(self.wallBombs.sprites())\n self.setInitialPosition(self.timeReloaders.sprites())\n self.setInitialPosition(self.enemyKillers.sprites())\n self.setInitialPosition(self.greedyEnemies.sprites())\n self.setInitialPosition(self.portals.sprites())\n self.setInitialPosition(self.invisibilityPlayers.sprites())\n self.setInitialPosition(self.bonusPlayerBullets.sprites())\n\n #self.setInitialPosition(self.shooters.sprites())\n\n # normalmente i nemici non mangiano monete...\n self.enemies_eater = False\n\n\n # a inizio livello si dà tempo di 5 secondi al Giocatore per divincolarsi\n # da eventuali nemici che compaiono negli immediati dintorni\n # della posizione (casuale) in cui si viene a trovare\n # il giocatore a inizio livello\n self.player.sprite.addPower(PlayerPowers.INVISIBILITY, (self.time,5))\n\n # imposto la musica del livello e la mando in esecuzione\n self.music = level.get(\"music\", \"./Music/Soundimage/Techno-Gameplay_Looping.ogg\")\n pygame.mixer.music.load(self.music)\n # mando in esecuzione in modalità loop (valore -1)\n pygame.mixer.music.play(-1)\n\n # barra di stato del gioco con informazioni sul punteggio\n self.setupGamebarSurface()", "def setUp(self):\n self.m=Maze()\n self.m.reset()", "def on_init(self):\n self.model.maze.initialize(os.path.join(\n config.value['src']['data'], 'maze.csv'))", "def __init__(self, level, treasures, maze_size):\n turtle.Turtle.__init__(self)\n self.shape(\"player_right.gif\")\n self.color(\"blue\")\n self.penup()\n self.pensize(1)\n self.speed(0)\n self.score = 0\n self.level = level\n self.treasures = treasures\n self.maze_size = maze_size\n self.end_writer = writers.EndWriter(maze_size)\n\n turtle.Screen().onkey(self.go_left, \"Left\")\n turtle.Screen().onkey(self.go_right, \"Right\")\n turtle.Screen().onkey(self.go_up, \"Up\")\n turtle.Screen().onkey(self.go_down, \"Down\")\n turtle.Screen().onkey(self.find_path, \"f\")", "def __init__(self, level):\n self.level = level\n self.my_map = {}\n self.my_level = []\n self.my_grid = []", "def __init__(self):\n self.maze = [['#','#','#','#','#','#','#','#','#','#','#',],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#','^','/',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ','@',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#','#','#','#','#','#','#','#','#','#','#'],\n ['#','#','#','#','#','#','#','#','#','#','#']]\n self.diamonds = 1\n self.width = 10\n self.height = 12\n self.crates = 1", "def __init__(self):\r\n\t\t# Gives a dictionary that returns 1 if the key does not exist\r\n\t\tself.MazeMap = defaultdict(lambda: 1, {})\r\n\t\tself.x_maximum, self.y_maximum = 0, 0", "def __init__(self, maze):\n pygame.init()\n # Loads the character's inventory\n self.inventory = 0\n\n # Character's position on the grid and size (pixels)\n self.image = pygame.image.load(MACGYVER_IMG).convert_alpha()\n self.square_x = 1\n self.square_y = 1\n self.x_pos = 40\n self.y_pos = 40\n self.maze = maze", "def initialize():\n\n global PLAYER # this means we use the global var PLAYER and cannot have a local var named PLAYER\n global LEVEL_COUNTER\n\n LEVEL_COUNTER = 1\n \n coordinates = generate_coords()\n\n PLAYER = Stark()\n tree = Tree()\n ww = WhiteWalker()\n crown = Crown()\n gray_gem = GrayGem()\n clear_board()\n GAME_BOARD.create(\"Snow\",\"Snow\")\n GAME_BOARD.draw_msg(\"Level \" + str(LEVEL_COUNTER) + \". Winter is coming.\")\n generate_level(coordinates, [PLAYER, ww, gray_gem, crown, tree, tree, gray_gem, tree, tree, gray_gem, tree])\n\n # for i in range(0,NUM_ELTS):\n # place_on_board(elts[i], coordinates[i][0], coordinates[i][1])", "def _maze(self):\n try:\n return self.__maze\n except AttributeError:\n pass\n # create and store the maze object\n supershape_name, supershape = random.choice(_supershapes)\n grid = polymaze.PolyGrid(supershape=supershape)\n grid.create_string(self._text, complexity=self._complexity)\n self.__maze = polymaze.Maze(grid)\n return self.__maze", "def setup_level_1() -> object:\n #create level object\n level = Level()\n\n #create vertical walls for level\n create_and_add_vertical_walls_to_list(4, 39, 4, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 25, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 54, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 25, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 54, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 25, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(33, 44, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 45, 74, level.wall_list)\n create_and_add_vertical_walls_to_list(54, settings.HEIGHT, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(54, settings.HEIGHT, 30, level.wall_list)\n\n #create horizontal walls for level\n create_and_add_horiontal_walls_to_list(4, 34, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 9, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(15, 24, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 54, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 74, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 24, 39, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 54, 39, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 74, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(19, 24, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 35, 54, level.wall_list)\n\n #create knight character for level\n create_and_add_character_to_list(\"pics\\prison_guard.png\", 0.2, 270, 470, level.character_list)\n\n #knight asks for bribe\n guard_convo = Dialogue(300, 500, 150, 50, \"I know who you are...\\n if you pay me,\\n I'll turn a blind eye.\")\n level.dialogue_list.append(guard_convo)\n\n #create coin item to bribe knight character\n create_and_add_item_to_list(\"pics\\gold_1.png\", 0.5, 400, 250, level.item_list)\n\n #create prompts and info for rooms for object\n cell = RoomInfo(120, 100, \"Dungeon cell. There's a note and key. Someone's waiting for you in the garden.\")\n level.room_info_list.append(cell)\n guard_room = RoomInfo(450, 280, \"Guardroom. There's the unconconsious bodies of the guards. Your saviours must've gone to great lengths...\")\n level.room_info_list.append(guard_room)\n torture_chamber = RoomInfo(120, 280, \"Torture chamber. You've been here before. They were questioning you, but you didn't answer.\")\n level.room_info_list.append(torture_chamber)\n battle_room = RoomInfo(650, 280, \"Battle room. You see that your captors are fighting revolutionaries- those who seek to bring back a lost king.\")\n level.room_info_list.append(battle_room)\n stairwell = RoomInfo(220, 520, \"Stairwell. There's a lone guard who doesn't look surprised to see you\")\n level.room_info_list.append(stairwell)\n\n return level", "def setup_level_2() -> object:\n #create level object\n level = Level()\n\n #create vertical walls for level\n create_and_add_vertical_walls_to_list(4, 19, 4, level.wall_list)\n create_and_add_vertical_walls_to_list(12, 54, 19, level.wall_list)\n create_and_add_vertical_walls_to_list(0, 5, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(0, 4, 30, level.wall_list)\n create_and_add_vertical_walls_to_list(55, settings.HEIGHT, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(55, settings.HEIGHT, 30, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 15, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(24, 54, 34, level.wall_list)\n create_and_add_vertical_walls_to_list(29, 45, 47, level.wall_list)\n create_and_add_vertical_walls_to_list(24, 29, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(44, 54, 54, level.wall_list)\n create_and_add_vertical_walls_to_list(14, 55, 73, level.wall_list)\n\n #create horizontal walls for level\n create_and_add_horiontal_walls_to_list(4, 24, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 34, 4, level.wall_list)\n create_and_add_horiontal_walls_to_list(20, 24, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 74, 14, level.wall_list)\n create_and_add_horiontal_walls_to_list(4, 19, 19, level.wall_list)\n create_and_add_horiontal_walls_to_list(34, 54, 24, level.wall_list)\n create_and_add_horiontal_walls_to_list(48, 60, 29, level.wall_list)\n create_and_add_horiontal_walls_to_list(68, 74, 29, level.wall_list)\n create_and_add_horiontal_walls_to_list(48, 60, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(68, 74, 44, level.wall_list)\n create_and_add_horiontal_walls_to_list(54, 73, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(19, 24, 54, level.wall_list)\n create_and_add_horiontal_walls_to_list(30, 35, 54, level.wall_list) \n\n #create sword item for \"outfit change\" \n create_and_add_item_to_list(\"pics\\sword_item.png\", 0.05, 75, 100, level.item_list)\n\n #create mysterious figure for level\n create_and_add_character_to_list(\"pics\\mystery_figure.png\", 0.095, 270, 350, level.character_list)\n\n #create dialogue for mysterious figure character\n find_disguise_convo = Dialogue(300, 390, 300, 50, \"Someone will notice you!\\n I've hidden something in the servant's quarters,\\n to make you fit in with the nobility.\")\n level.dialogue_list.append(find_disguise_convo)\n\n #info prompts and text for level\n balcony = RoomInfo(640, 500, \"Balcony. Along with the forest and sea, you can see that a battle is coming.\")\n level.room_info_list.append(balcony)\n kitchen = RoomInfo(270, 90, \"Kitchen. There are plentry of servants around. Your torn clothes are eye-catching, and may sabotage your escape\")\n level.room_info_list.append(kitchen)\n great_hall = RoomInfo(270, 470, \"Great hall. You could have sworn that someone recognized you, but nobody acts to capture you.\")\n level.room_info_list.append(great_hall)\n sitting_room = RoomInfo(650, 230, \"Private sitting room. You find several sketches... sketches that look like a richer, healthier version of you.\")\n level.room_info_list.append(sitting_room)\n\n return level", "def __init__(self, _pendown=1, gridmode=False, gridsize=50, homeX = 50 + 25 + 5, homeY = 50 + 25 + 5, canvWidth = 400, canvHeight = 200, \\\n turtleMainColor=\"#00A651\", turtleAccentColor=\"#FFF600\", speed = 5, rotspeed = 5, pencolor = 'red', penwidth=3):\n self._turtleMainColor = turtleMainColor\n self._turtleAccentColor = turtleAccentColor\n self._speed = speed\n self._rotspeed = rotspeed\n self._pendown = _pendown\n self._pencolor = pencolor\n self._penwidth = penwidth\n self._rotation = 90\n self._gridsize = gridsize\n self._gridmode = gridmode\n \n if(gridmode and homeX == 80):\n homeX = 0\n homeY = 0\n \n self._x = homeX\n self._y = homeY\n self._homeX = homeX\n self._homeY = homeY\n \n self._canvWidth = canvWidth\n self._canvHeight = canvHeight\n self._actions = []\n self._levelDataString = [] \n \n self._walls = []\n self._lava = []\n \n self._appendCurrentState();", "def test_ctor(self):\r\n cols = 5\r\n rows = 5\r\n maze = Maze(rows, cols)\r\n\r\n self.assertEqual(maze.num_cols, cols)\r\n self.assertEqual(maze.num_rows, rows)\r\n self.assertEqual(maze.id, 0)\r\n self.assertEqual(maze.grid_size, rows*cols)\r\n\r\n id=33\r\n maze2 = Maze(rows, cols, id)\r\n self.assertEqual(maze2.num_cols, cols)\r\n self.assertEqual(maze2.num_rows, rows)\r\n self.assertEqual(maze2.id, id)\r\n self.assertEqual(maze2.grid_size, rows * cols)", "def setup(self):\n build_world.start_level(self)", "def __init__(self, startpos = (75,75), angle = 0, colour = (240,100,100),\n maxSpeed = 20, maxAccel = 1, maxAngle = 0.1,\n width = 1600, height = 900, maze = None,\n intermediates = (8,), inputdistance = [50,100,150], inputangle = [1.2,0.6,0,-0.6,-1.2],\n parentname = \"\", parentcolour = (240,100,100), name = None,orders = [1,2,3,4,5,6,7,8]):\n self.startpos, self.startangle, self.colour = startpos, angle, colour\n self.maxSpeed, self.maxAccel, self.maxAngle = maxSpeed, maxAccel, maxAngle\n self.maze = maze\n self.width, self.height = width, height\n self.parentname, self.parentcolour = parentname, parentcolour\n # Create dimensions array based on input, intermediate dimensions and output (4)\n self.inputType = 1 # 0: point, 1: linear\n self.setDimension(inputdistance,inputangle,intermediates,orders)\n self.drag = 0.99\n self.initWeights()\n self.sightLength = 200\n \n if name is not None: \n self.name = name\n else:\n self.name = self.getName()\n \n self.reset()", "def __init__(self, input_field: str, depth: int):\n self.maze = self._file_to_matrix(input_field, depth)\n\n # Determine the rim of the labyrinth.\n self.rim_x = self.maze.shape[2] - 4\n self.rim_y = self.maze.shape[1] - 4\n\n self.graph = nx.Graph()\n\n # Connect Path / points\n self.path_coordinates = np.argwhere(self.maze == PATH)\n self._build_path()\n\n # Determine Portal points and connect them to corresponding\n # other dimension.\n self.merge_portals_to_path()\n self.connect_portals()\n\n start = tuple(np.argwhere(self.maze == \"AA\")[0])\n goal = tuple(np.argwhere(self.maze == \"ZZ\")[0])\n\n try:\n self.shortest_path_length = nx.shortest_path_length(\n self.graph, start, goal)\n except nx.NetworkXNoPath:\n self.shortest_path_length = None", "def setup_level_4() -> object:\n #create level object\n level = Level()\n return level", "def _create_new_maze(self, settings: setts.Settings) -> mazegraph.MazeGraph:\n graph = mazegraph.MazeGraph(settings)\n self._make_random_graph(graph)\n\n return graph", "def __init__(self, nx, ny, ix=0, iy=0):\r\n self.__nx = nx\r\n self.__ny = ny\r\n self.__ix = ix\r\n self.__iy = iy\r\n self.__current_room = 0, 0\r\n self.__maze = [[Room(x, y) for y in range(ny)] for x in range(nx)]\r\n self.__entrance_room = 0, 0\r\n self.__exit_room = 0, 0\r\n self.__pillar_a = 0, 0\r\n self.__pillar_e = 0, 0\r\n self.__pillar_i = 0, 0\r\n self.__pillar_p = 0, 0\r\n self.count = 0\r\n self.original_map = \"\"", "def main():\n global numrect\n global rectsize\n rectsize += 1\n if rectsize > 30:\n rectsize = 30\n numrect += 2\n # print(\"rectsize:\\t\" + str(rectsize))\n # print(\"numrect:\\t\" + str(numrect))\n maze, rectangles = MazeGenerator.main(msize, numrect, rectsize)\n global lvl\n # print(str(lvl))\n lvl += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n runMaze(maze, rectangles)", "def __init__(self, speed, maze_layer='walls', prevent_backtracking=True,\n allow_wall_backtracking=False, only_turn_at_wall=False):\n super(RandomMazeWalk, self).__init__(speed, maze_layer=maze_layer)\n self._prevent_backtracking = prevent_backtracking\n self._allow_wall_backtracking = allow_wall_backtracking\n self._only_turn_at_wall = only_turn_at_wall", "def __init__(self, width, height, screen):\n self.width = width\n self.height = height\n self.screen = screen\n self.currentLevel = 1\n self.levels = {\n 1: self.level1Representation(),\n 2: self.level2Representation()\n }\n self.totalLevels = len(self.levels)", "def __init__(self):\n\n # Width and height of the window, in pixels.\n self.width = 800\n self.height = 600\n width = self.width\n height = self.height\n\n # Create the root window.\n self.root = tkinter.Tk()\n root = self.root\n\n #\n # Buttons etc.\n #\n controls = tkinter.Frame(root)\n controls.pack(side=tkinter.TOP, fill='x')\n\n build = tkinter.Button(controls, text='Build new maze')\n build.pack(side=tkinter.LEFT)\n\n reset = tkinter.Button(controls, text='Reset maze')\n reset.pack(side=tkinter.LEFT)\n\n solve = tkinter.Button(controls, text='Solve maze')\n solve.pack(side=tkinter.LEFT)\n\n # maze_type: 0 = prim, 1 = random.\n maze_type = tkinter.IntVar()\n prim = tkinter.Radiobutton(controls, text='Prim', variable=maze_type, \n value=0)\n prim.pack(side=tkinter.LEFT)\n rand = tkinter.Radiobutton(controls, text='Random', variable=maze_type,\n value=1)\n rand.pack(side=tkinter.LEFT)\n prim.select()\n\n def lbl_entry(lbl, v):\n l = tkinter.Label(controls, text=\"{}: \".format(lbl))\n l.pack(side=tkinter.LEFT)\n e = tkinter.Entry(controls, textvariable=v, width=5)\n e.pack(side=tkinter.LEFT)\n\n # Maze size\n nrows_var = tkinter.StringVar()\n lbl_entry('Rows', nrows_var)\n ncols_var = tkinter.StringVar()\n lbl_entry('Columns', ncols_var)\n nrows_var.set('30')\n ncols_var.set('50')\n\n # Sparseness\n sparse = tkinter.StringVar()\n lbl_entry('Sparseness', sparse)\n sparse.set('.05')\n\n # Delay\n delay = tkinter.StringVar()\n lbl_entry('Draw delay (s)', delay)\n delay.set('0.0')\n\n #\n # Canvas in which to display the maze.\n #\n self.cvs = tkinter.Canvas(width=width, height=height)\n cvs = self.cvs\n cvs.pack(side=tkinter.TOP, expand=True, fill='both')\n\n # Build callback\n def build_act():\n nrows = int(nrows_var.get())\n ncols = int(ncols_var.get())\n sparseness = float(sparse.get())\n self.maze = self.build_fn(nrows, ncols, sparseness)\n self.display_maze()\n build.configure(command=build_act)\n\n # Reset callback\n def reset_act():\n self.display_maze()\n reset.configure(command=reset_act)\n\n\n # Solve callback\n def solve_act():\n self.solve_maze(float(delay.get()))\n\n solve.configure(command=solve_act)\n\n # Prim callback\n def prim_act():\n self.build_fn = get_prebuilt_maze_instance\n \"\"\"\n nrows = int(nrows_var.get())\n ncols = int(ncols_var.get())\n sparseness = float(sparse.get())\n self.maze = get_prebuilt_maze_instance(nrows, ncols, sparseness)\n self.display_maze()\n \"\"\"\n prim.configure(command=prim_act)\n\n # Random callback\n def random_act():\n self.build_fn = get_random_maze_instance\n \"\"\"\n nrows = int(nrows_var.get())\n ncols = int(ncols_var.get())\n sparseness = float(sparse.get())\n self.maze = get_random_maze_instance(nrows, ncols, sparseness)\n self.display_maze()\n \"\"\"\n rand.configure(command=random_act)\n\n prim.invoke()\n\n root.mainloop()\n\n return", "def setup_scene(self):\n\n # read map\n options, landscapes, statics, dynamics, trees, hero, hare = read_map('test.map')\n self.num_of_blocks_X, self.num_of_blocks_Y = options['size']\n with self.canvas:\n # init landscapes\n block_x = 0\n for i in xrange(self.num_of_blocks_X):\n block_y = 0\n for j in xrange(self.num_of_blocks_Y):\n class_name = landscapes[i][j]\n if class_name is not None:\n clazz = eval(class_name.capitalize())\n else:\n clazz = Grass\n block = clazz(pos=(block_x, block_y),\n size=(self.block_width, self.block_height), border=(0, 0))\n self.blocks[i][j] = block\n block_y += self.block_height \n block_x += self.block_width\n\n # init dynamics\n for x, y, class_name in dynamics:\n if 'dynamics_as_blocks' in options and options['dynamics_as_blocks']:\n x, y = (x + 0.5) * self.block_width, (y + 0.5) * self.block_height\n eval(class_name.capitalize())(x, y)\n \n with self.canvas:\n # draw or hero\n HeroRabbit(BLOCK_SIZE[0]*(hero[0] + 0.5), BLOCK_SIZE[1]*(hero[1] + 0.5))\n Hare(BLOCK_SIZE[0]*(hare[0] + 0.5), BLOCK_SIZE[1]*(hare[1] + 0.5))\n\n # init statics\n def _is_mountain(i, j):\n return int(0 <= i < self.num_of_blocks_X and 0 <= j <= self.num_of_blocks_Y and\n statics[i][j] == 'mountain')\n\n def _get_mountain_type(i, j):\n opensides = (_is_mountain(i - 1, j), _is_mountain(i, j + 1),\n _is_mountain(i + 1, j), _is_mountain(i, j - 1)) # left, top, right, bottom\n opensides_to_type = {\n (1, 1, 1, 1): 'center',\n (1, 0, 1, 0): 'horizontal_center',\n (0, 1, 0, 1): 'vertical_center',\n (1, 0, 0, 0): 'horizontal_right',\n (0, 1, 0, 0): 'vertical_bottom',\n (0, 0, 1, 0): 'horizontal_left',\n (0, 0, 0, 1): 'vertical_top',\n }\n return opensides_to_type.get(opensides, 'horizontal_center')\n \n _mountains = []\n _bushes= []\n \n for i in xrange(self.num_of_blocks_X):\n for j in xrange(self.num_of_blocks_Y):\n class_name = statics[i][j]\n if class_name is not None:\n pos = (i + 0.5) * self.block_width, (j + 0.5) * self.block_height\n if class_name == 'bush':\n #Bush(*pos)\n _bushes.append(pos)\n elif class_name == 'mountain':\n _mountains.append((pos, _get_mountain_type(i, j)))\n #Mountain(*pos, type=_get_mountain_type(i, j))\n \n for tree_pos in trees:\n Tree(BLOCK_SIZE[0]*(tree_pos[0] + 0.5), BLOCK_SIZE[1]*(tree_pos[1] + 0.5))\n \n with self.canvas:\n for pos in _bushes:\n Bush(*pos)\n \n for pos, type in _mountains:\n Mountain(*pos, type=type)\n\n HolyCarrot(13.5*self.block_width, 7.5*self.block_height)\n # This should be called at the end\n self.reindex_graphics()", "def __init__(self, parent, maze_width, maze_height, scale=20):\n self.parent = parent\n self.parent.title(\"Maze Exploration Visualization\")\n\n self.maze_width = maze_width\n self.maze_height = maze_height\n self.scale = scale\n\n # Compute actual width and height\n self.width = maze_width * scale\n self.height = maze_height * scale\n\n # Store tkinter object\n self.frame = tkinter.Frame(self.parent,\n width=self.width,\n height=self.height,\n highlightthickness=1,\n highlightbackground=\"black\")\n self.canvas = tkinter.Canvas(self.frame,\n width=self.width, \n height=self.height)\n self.canvas.pack(expand=False)\n self.frame.pack(expand=False)\n\n # Initialize look of grid\n self.draw_gray_grid()\n\n self.person = None\n self.draw_person(self.maze_width // 2, self.maze_height // 2)", "def __init__(self, maze):\n self.alpha = 0.1 # learning rate.\n self.gamma = 0.8 # discount factor.\n self.epsilon = 1 # randomness factor. e=0 makes the agent greedy.\n self.maze = copy.deepcopy(maze)\n self.grid = copy.deepcopy(maze.grid)\n self.reward_table = [\n [None for _ in range(len(self.grid[0]))] for _ in range(len(self.grid))]\n self.qtable = [\n [0 for _ in range(len(self.grid[0]))] for _ in range(len(self.grid))]\n\n # Initialize the reward table.\n for i in range(len(self.reward_table)):\n for j in range(len(self.reward_table[0])):\n if self.grid[i][j] == ' ':\n self.reward_table[i][j] = -0.04 # -0.01\n elif self.grid[i][j] == 'G':\n self.reward_table[i][j] = 1\n elif self.grid[i][j] == 'E':\n self.reward_table[i][j] = -1", "def setup_level_3() -> object:\n #create level object\n level = Level()\n\n #create vertical walls for level\n create_and_add_vertical_walls_to_list(4, settings.HEIGHT, 4, level.wall_list)\n create_and_add_vertical_walls_to_list(0, 4, 23, level.wall_list)\n create_and_add_vertical_walls_to_list(0, 4, 30, level.wall_list)\n create_and_add_vertical_walls_to_list(4, 24, 49, level.wall_list)\n create_and_add_vertical_walls_to_list(24, settings.HEIGHT, 74, level.wall_list)\n\n #create horizontal walls for level\n create_and_add_horiontal_walls_to_list(4, 24, 4, level.wall_list) \n create_and_add_horiontal_walls_to_list(30, 49, 4, level.wall_list) \n create_and_add_horiontal_walls_to_list(4, 19, 24, level.wall_list)\n create_and_add_horiontal_walls_to_list(34, 74, 24, level.wall_list)\n \n #create rebels for level\n create_and_add_character_to_list(\"pics\\mystery_figure.png\", 0.12, 300, 490, level.character_list)\n create_and_add_character_to_list(\"pics\\prison_guard.png\", 0.21, 230, 440, level.character_list)\n create_and_add_character_to_list(\"pics\\prison_guard.png\", 0.21, 370, 440, level.character_list)\n\n #rebels greet player\n rebel_1_greet = Dialogue(200, 490, 100, 20, \"It's the lost king!\")\n level.dialogue_list.append(rebel_1_greet)\n rebel_2_greet = Dialogue(400, 490, 130, 40, \"We've spent so long\\ntrying to free you.\")\n level.dialogue_list.append(rebel_2_greet)\n rebel_3_greet = Dialogue(300, 540, 150, 40, \"You're our only hope,\\nkeep going.\")\n level.dialogue_list.append(rebel_3_greet)\n\n return level", "def __init__(self):\n # Passing the class make this Python 2 and Python 3 compatible\n super(MayaSceneLevelGeneratorUI, self).__init__(parent=maya_main_window())\n\n # Create the generators needed\n self._level_gen = level.LevelGenerator([blocks.BlockFile(\"\", blk_type) for blk_type in VALID_BLOCK_TYPES])\n self._scene_gen = MayaSceneLevelGenerator(None) # Fill in level at button press time\n\n # Window things\n self.setWindowTitle(\"Maya Scene Level Generator\")\n self.resize(500, 200)\n self.setWindowFlags(self.windowFlags() ^ PySide2.QtCore.Qt.WindowContextHelpButtonHint)\n\n # Set up for the first time\n self._create_widgets()\n self._create_layout()\n self._refresh_view()\n self._create_connections() # Order matters, since refreshing triggers connections\n\n print(self._level_gen.block_list) # TODO delete", "def setup_members(self):\n ### cell\n self.cell_size = 8\n self.cell_row = 80\n self.cell_col = 100\n self.color_alive = \"black\"\n self.color_dead = \"white\"\n\n ### world\n self.init_modes = {} # read modes from json file\n self.init_world = {} # begining status\n self.world = {} # world's map\n # current status of world\n self.world_status = GOL(self.cell_row, self.cell_col)\n self.world_setable = True\n self.world_alive = False\n\n # widgets\n self.toolbar_height = 40\n self.world_size = [self.cell_size * self.cell_row,\n self.cell_size * self.cell_col]\n self.window_size = self.world_size\n self.window_size[0] += self.toolbar_height\n\n # resource\n self.saver_icon = \"save.gif\"\n self.run_icon = \"run.gif\"\n self.pause_icon = \"pause.gif\"\n self.stop_icon = \"stop.gif\"\n self.modes_file = \"gol.json\"\n self.modes_names = []", "def setup(self, level):\r\n\r\n # Used to keep track of our scrolling\r\n self.view_bottom = 0\r\n self.view_left = 0\r\n\r\n # Keep track of the score\r\n self.score = 0\r\n\r\n # Keep track of lives\r\n # self.lives = 5\r\n\r\n # Create the Sprite lists\r\n self.player_list = arcade.SpriteList()\r\n self.foreground_list = arcade.SpriteList()\r\n self.background_list = arcade.SpriteList()\r\n self.wall_list = arcade.SpriteList()\r\n self.coin_list = arcade.SpriteList()\r\n\r\n # Set up the player, specifically placing it at these coordinates.\r\n image_source = \"images/Alice/Alice7_front.png\"\r\n self.player_sprite = arcade.Sprite(image_source, CHARACTER_SCALING)\r\n self.player_sprite.center_x = PLAYER_START_X\r\n self.player_sprite.center_y = PLAYER_START_Y\r\n self.player_list.append(self.player_sprite)\r\n\r\n # --- Load in a map from the tiled editor ---\r\n\r\n # Name of the layer in the file that has our platforms/walls\r\n platforms_layer_name = 'Platforms'\r\n moving_platforms_layer_name = 'Moving Platforms'\r\n # Name of the layer that has items for pick-up\r\n coins_layer_name = 'Coins'\r\n # Name of the layer that has items for foreground\r\n foreground_layer_name = 'Foreground'\r\n # Name of the layer that has items for background\r\n background_layer_name = 'Background'\r\n # Name of the layer that has items we shouldn't touch\r\n dont_touch_layer_name = \"Don't Touch\"\r\n\r\n # Map name\r\n map_name = f\"map4_level_{level}.tmx\"\r\n\r\n # Read in the tiled map\r\n my_map = arcade.tilemap.read_tmx(map_name)\r\n\r\n # Calculate the right edge of the my_map in pixels\r\n self.end_of_map = my_map.map_size.width * GRID_PIXEL_SIZE\r\n\r\n # -- Background\r\n self.background_list = arcade.tilemap.process_layer(my_map,\r\n background_layer_name,\r\n TILE_SCALING)\r\n\r\n # -- Foreground\r\n self.foreground_list = arcade.tilemap.process_layer(my_map,\r\n foreground_layer_name,\r\n TILE_SCALING)\r\n\r\n # -- Platforms\r\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map,\r\n layer_name=platforms_layer_name,\r\n scaling=TILE_SCALING,\r\n use_spatial_hash=True)\r\n # -- Moving Platforms\r\n moving_platforms_list = arcade.tilemap.process_layer(my_map, moving_platforms_layer_name, TILE_SCALING)\r\n for sprite in moving_platforms_list:\r\n self.wall_list.append(sprite)\r\n\r\n # -- Coins\r\n self.coin_list = arcade.tilemap.process_layer(my_map,\r\n coins_layer_name,\r\n TILE_SCALING,\r\n use_spatial_hash=True)\r\n\r\n # -- Don't Touch Layer\r\n self.dont_touch_list = arcade.tilemap.process_layer(my_map,\r\n dont_touch_layer_name,\r\n TILE_SCALING,\r\n use_spatial_hash=True)\r\n\r\n # --- Other stuff\r\n # Set the background color\r\n if my_map.background_color:\r\n arcade.set_background_color(my_map.background_color)\r\n\r\n # Create the 'physics engine'\r\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,\r\n self.wall_list,\r\n GRAVITY)", "def __init__(self, nx, ny, ix=0, iy=0):\n\n self.nx, self.ny = nx, ny\n self.ix, self.iy = ix, iy\n self.svg_name = \"maze.svg\"\n self.maze_map = [[Cell(x, y, maze_size=nx) for y in range(ny)] for x in range(nx)]\n self.__map_ids = {}\n self.__init_map_ids()\n\n self.__current_position = None\n self.__objective_position = None\n\n # Generate the initial position\n self.__initialize_current_position()\n\n # Generate objective position\n self.__create_objective()\n\n # Generating the traps\n self.__generate_random_traps(max(nx, ny)*2)", "def __activate(self, x: int, y: int, tree: int) -> None:\n self.__maze[x, y] = tree", "def __init__(self, maze_string, cli, start, finish):\n self.maze = [list(string) for string in maze_string.splitlines()]\n self.set_interface(cli)\n self.solved = False\n self.start = start\n self.current_pos = None\n self.finish_pos = None\n self.set_starting_pos()\n self.set_finishing_pos(finish)", "def create_level(self, name):\n \n # Create a level object\n level = Level()\n size_y=8\n size_x=10\n # Separates static and non static parts\n # This will speed up network games, since only the non static part will be\n # sent on the network\n level_static = soya.World(level)\n \n # Load 3 materials (= textures) for files ./materials{grass|ground|snow}.data\n \n ground = soya.Material.get(\"block2\")\n \n \n # Creates a landscape, from the heighmap \"./images/map.png\"\n # The landscape is in the static part (=level_static), because it won't change along the game.\n land = soya.Land(level_static)\n land.y =0.0\n land.from_image(soya.Image.get(\"floor.png\"))\n \n # Sets how high is the landscape\n land.multiply_height(-0.0)\n \n # These values are trade of between quality and speed\n land.map_size = 8\n land.scale_factor = 1.5\n land.texture_factor = 1.0\n \n # Set the texture on the landscape, according to the height\n # (i.e. height 0.0 to 15.0 are textured with grass, ...)\n \n land.set_material_layer(ground, 0.0, 25.0)\n \n # squares where the player starts\n # Note that this is stored in physical, not abstract, coordinates.\n always_clear=[(-1,-1),(-2,-1),(0,-1),(-1,-2),(-1,0)]\n cube = soya.Shape.get(\"cube\")\n \n # r and c represent the cube positions in the grid,\n # while x and y represent the physical coordinates in the world.\n # Note the simple formula: r = x + self.size_x , c = y + self.size_y\n border_row, border_col = 2*size_x - 2, 2*size_y - 2\n for r, x in enumerate(range(-size_x,size_x-1)):\n for c, y in enumerate(range(-size_y,size_y-1)):\n bx = x +128\n by = y +128 \n if (r % 2 == 0 and c % 2 == 0) or \\\n (r == 0 or c == 0 or r == border_row or c == border_col ):\n # This is a wall block\n block = soya.Volume(level_static, cube)\n block.scale(1.0, 1.0, 1.0)\n block.set_xyz(bx, 0.5, by) \n elif random() < 0.8 and not (x, y) in always_clear:\n # A soft block\n block = SoftBox()\n level.add_mobile(block)\n block.scale(1.0, 1.0,1.0)\n block.set_xyz(bx, 0.5, by)\n \n # Creates a light in the level, similar to a sun (=a directional light)\n sun = soya.Light(level_static)\n sun.directional = 1\n sun.diffuse = (1.0, 0.8, 0.4, 1.0)\n sun.rotate_vertical(-45.0)\n \n # Creates a sky atmosphere, with fog\n atmosphere = soya.SkyAtmosphere()\n atmosphere.ambient = (0.3, 0.3, 0.4, 1.0)\n atmosphere.fog = 1\n atmosphere.fog_type = 0\n atmosphere.fog_start = 40.0\n atmosphere.fog_end = 50.0\n atmosphere.fog_color = atmosphere.bg_color = (0.2, 0.5, 0.7, 1.0)\n atmosphere.skyplane = 1\n atmosphere.sky_color = (1.5, 1.0, 0.8, 1.0)\n \n # Set the atmosphere to the level\n level.atmosphere = atmosphere\n \n # Save the level as \"./worlds/level_demo.data\" (remember, levels are subclasses of worlds)\n level_static.filename = level.name = name+\"_bbomber_static\"\n level_static.save()\n level.filename = level.name = name+\"_bbomber\"\n level.save()", "def init(self, windowsize:tuple):\r\n y_count, x_count = 3, 0 #< Set the starting counter for the look_up_table. y starts with three because the first three lines are just Nones\r\n # Creating the constant maze \r\n maze_size = windowsize[0], windowsize[1] - 2 * self.grid_size\r\n self.maze = pg.Surface(maze_size) \r\n \r\n \r\n \r\n # Draw the outermost rectangles on self.maze\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0, 3 * self.grid_size), (28 * self.grid_size, 31 * self.grid_size)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0 + self.grid_size // 2, 3 * self.grid_size + self.grid_size // 2),(27 * self.grid_size, 30 * self.grid_size)), 4) \r\n # Draw the inner rectangles\r\n for y in self.look_up_table[3 : -2]: #< y is a list of one row from the maze\r\n for x in y: #< x is a string that is decoded as already explained\r\n pos = [self.grid_size * x_count, self.grid_size * y_count]\r\n # Set reference position in the middle of one square\r\n pos[0] += self.grid_size // 2\r\n pos[1] += self.grid_size // 2\r\n x_count += 1\r\n # Check if x is rectangle\r\n if x != None and x[0] == 'r':\r\n # When the size of the string is equal or greater than 4 it's rectangle with a specific size and not just a border.\r\n if len(x) >= 4:\r\n # get the x and y size of the rectangle. x will be something like 'rx1_y1' x1 resprestens the size in x direction and y1 in y direction.\r\n xy_dim = x[1:].split(\"_\") \r\n xy_dim[0] = int(xy_dim[0])\r\n xy_dim[1] = int(xy_dim[1])\r\n rect = tuple(pos), (xy_dim[0] * self.grid_size , xy_dim[1] * self.grid_size )\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], rect, self.width)\r\n # If the last char is a w (white), u (up) or l (left) a line gets draw one a specific position \r\n if x[-1] == 'w':\r\n self.draw_line(self.maze, 'u', (x_count,y_count), True)\r\n if x[-1] == 'u' or x[-1] == 'l':\r\n if x_count == 0:\r\n self.draw_line(self.maze, x[-1], (len(y), y_count))\r\n else:\r\n self.draw_line(self.maze, x[-1], (x_count, y_count))\r\n \r\n y_count += 1\r\n x_count = 0\r\n # Just some cosmetic drawing\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((0, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((28 * self.grid_size - self.grid_size // 2 - 1, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 13 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 19 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 13 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 19 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((11 * self.grid_size, 16 * self.grid_size), (6 * self.grid_size, 3 * self.grid_size)), self.width)\r\n \r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size // 2 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 18 * self.grid_size + self.grid_size // 2), (self.grid_size // 2 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size * 28 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 18 * self.grid_size + self.grid_size // 2), (self.grid_size * 28 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n self.is_init = True", "def initialize_maze(self, file):\n with open(file, 'r') as txt_file:\n for y_pos in range(21):\n maze_line = txt_file.readline().split()\n x_pos = 0\n while x_pos < len(maze_line):\n if maze_line[x_pos] == '1':\n self.forbidden_tiles.append((x_pos, y_pos))\n elif maze_line[x_pos] == '0':\n self.allowed_tiles.append((x_pos, y_pos))\n x_pos += 1\n\n for obj in self.forbidden_tiles:\n x_pos = obj[0]\n y_pos = obj[1]\n self.grid[y_pos][x_pos] = 'X'\n\n for position in self.allowed_tiles:\n if position[1] == 0:\n self.exit = position\n elif position[1] == self._len_y-1:\n self.entrance = position\n\n return (\n self.grid,\n self.forbidden_tiles,\n self.allowed_tiles,\n self.entrance,\n self.exit\n )", "def setup(player, level):\n display('f1', 'inventory', player._inventory)\n\n maze(callback=partial(image, 'stone'))\n\n player.keys(right = 'd', left = 'a', up = 'w', down = 's')\n\n # randomly pick a background\n background()\n\n player.take(Punch(call='1'))\n player.take(FlameThrower(call='2'))\n player.take(Grenade(call='3', distance=6, radius=10))\n player.take(MustardGas(call='4', distance=10, radius=20))\n player.take(AirGun(call='space'))\n player.take(MachineGun(call='5', distance=15, repeat=3))\n player.take(Landmine(call='6', delay=1))\n player.take(C4(call='7', detonate='8', distance=8, radius=10))\n player.take(NuclearBomb(call='n'))\n\n player.take(WallBuster())\n #wall = partial(image, 'stone')\n #player.take(WallBuilder(left='left', right='right', front='up', back='down', wall=wall))\n display('f1', 'inventory', player._inventory)\n\n def drink(soda, player):\n soda.destroy()\n player.energy = 10\n fill(partial(image,'sprite', size=1.0), 0.05, player, drink)\n\n def claim(coin, player):\n coin.destroy()\n player.wealth = 5\n fill(partial(image,'coin', size=1.0), 0.25, player, claim)", "def test_create_maze(self):\n maze = Maze(4, 4)\n self.assertEqual(maze.row_count, 4)\n self.assertEqual(maze.col_count, 4)\n self.assertEqual(maze.size, 16)\n self.assertTrue(isinstance(maze.entrance, list))\n self.assertTrue(isinstance(maze.exit, list))", "def _get_maze(self):\n if self._maze is None:\n maze_str = self._env.observations()['DEBUG.MAZE.LAYOUT'].strip()\n lines = maze_str.split('\\n')\n\n height = len(lines)\n width = 0\n for line in lines:\n width = max(width, len(line))\n\n maze = np.zeros((width, height), dtype=np.int32)\n\n for j, line in enumerate(lines):\n for i, cell in enumerate(line):\n if cell == _WALL_SYMBOL:\n maze[i, j] = 1\n self._maze = maze\n return self._maze", "def __init__(self, master: tk.Tk):\n self._master = master\n master.title(\"Mario\") # set title\n self._config = {} # set config nest dictionary\n self._level_dic = {} # set level nest dictionary\n self._config_status = True # status for successfully read config or not\n self._pause = False # if True, game will pause, if False, game continue\n\n if self.config_input(): # read config info from file. If can't read, get False\n for key in self._config.keys(): # seeking for some basic info\n if key == 'World': # from heading: 'World'\n if all(k in self._config[key] for k in ('gravity', 'start')):\n try: # get gravity from 'World', then turns it into 'int'\n self._gravity = int(self.get_config(key, 'gravity'))\n except ValueError: # if failed\n messagebox.showerror(\"Invalid value in World\", \"Invalid value in gravity!\")\n self.config_exit()\n try: # get start level. Try to open it\n self._start_level = self.get_config(key, 'start')\n open(self._start_level)\n except IOError: # if failed\n messagebox.showerror(\"Invalid value in World\",\n \"Don't have this \" + self._start_level + \" file!\")\n self.config_exit()\n else: # if 'World' don't have gravity and start_level\n messagebox.showerror(\"Missing attribute\", \"Missing attributes in World!\")\n self.config_exit()\n elif key == 'Player': # from heading: 'Player'\n if all(k in self._config[key] for k in\n ('character', 'x', 'y', 'mass', 'health', 'max_velocity')):\n try: # try get all those stuff below, and change their type\n self._x = float(self.get_config(key, 'x')) # get x co-ordinate\n self._y = float(self.get_config(key, 'y')) # get y co-ordinate\n self._mass = int(self.get_config(key, 'mass')) # get mass\n self._max_health = int(self.get_config(key, 'health')) # get max_health\n self._max_velocity = int(self.get_config(key, 'max_velocity')) # get max_velocity\n except ValueError: # if failed => invalid value\n messagebox.showerror(\"Invalid value in Player\", \"Invalid value in Player attributes!\")\n self.config_exit()\n self._character = self.get_config(key, 'character') # get character\n if self._character not in PLAYERS: # check character\n messagebox.showerror(\"Invalid value in Player\",\n \"Don't have this '\" + self._character + \"' character!\")\n self.config_exit()\n else: # must missing some of the attribute\n messagebox.showerror(\"Missing attribute\", \"Missing attributes in Player!\")\n self.config_exit()\n else: # from heading which is not 'World' and 'Player' => 'Level'\n try: # check the level existence\n open(key)\n if self.get_config(key, 'goal') is not None: # level must have a goal\n self._this_level = {} # create a new dic for this level\n self._this_level.update(goal=self.get_config(key, 'goal')) # store the goal\n else: # warn that must have a goal\n messagebox.showerror(\"Missing attribute\", \"'\" + key +\n \"' level must have a goal!\")\n self.config_exit()\n # if has tunnel, update; if don't, update with None\n self._this_level.update(tunnel=self.get_config(key, 'tunnel'))\n self._this_level.update(record=(self._max_health, 0)) # set record(health, score)\n # update this level to the general level dic\n self._level_dic.update(dict([(key, self._this_level)]))\n except IOError: # if this level don't exist\n messagebox.showerror(\"Invalid heading\", \"Don't have this '\" + key + \"' level\")\n self.config_exit()\n else: # if fail in read progress\n self.config_exit()\n\n if self._config_status: # only build the world with success config settings\n # build the world with config settings\n world_builder = WorldBuilder(BLOCK_SIZE, gravity=(0, self._gravity), fallback=create_unknown)\n world_builder.register_builders(BLOCKS.keys(), create_block)\n world_builder.register_builders(ITEMS.keys(), create_item)\n world_builder.register_builders(MOBS.keys(), create_mob)\n self._builder = world_builder\n\n self._player = Player(self._character, max_health=self._max_health)\n # set max_velocity to player to avoid hard-coding\n self._player.set_max_velocity(self._max_velocity)\n self._filename = self._start_level # set current level\n self._goal = self._level_dic[self._filename]['goal'] # get current level's goal\n self._tunnel = self._level_dic[self._filename]['tunnel'] # get current level's tunnel\n self.reset_world(self._filename) # load the start level\n # View entities on canvas\n self._renderer = MarioViewRenderer(BLOCK_IMAGES, ITEM_IMAGES, MOB_IMAGES)\n size = tuple(map(min, zip(MAX_WINDOW_SIZE, self._world.get_pixel_size())))\n self._view = GameView(master, size, self._renderer)\n self._view.pack()\n\n self._press = False # status for whether player press the switch\n self.bind() # bind the keyboard\n\n # Status Display\n self._percentage = 1 # player health percentage\n self._score = self._player.get_score() # player's score\n self._statue = StatueDisplay(master, size[0], size[1]) # build statue display\n self._statue.pack(side=tk.BOTTOM, fill=tk.X) # pack it in the bottom\n\n # Wait for window to update before continuing\n master.update_idletasks()\n self.step()\n\n # File menu\n menubar = tk.Menu(self._master)\n # Tell master what is this menu\n self._master.config(menu=menubar)\n file_menu = tk.Menu(menubar) # build a menu\n menubar.add_cascade(label=\"File\", menu=file_menu) # File\n file_menu.add_command(label=\"Load Level\", command=self.load_map) # Load Level\n file_menu.add_command(label=\"Reset Level\", command=self.reset_map) # Reset Level\n file_menu.add_command(label=\"High Score\", command=self.show_score) # show High Score\n file_menu.add_command(label=\"Exit\", command=self.exit) # Exit the game\n menubar.add_cascade(label=\"Pause/Begin\", command=self.pause) # pause switch", "def __init__(self, _filename):\n # -- open text file containing maze\n self.file = open(_filename, 'r')\n self._grid = []\n # -- initialize line_list and append into list\n line_list = []\n lines = self.file.readlines()\n for line in lines:\n line = line.strip('\\n')\n line_list = [char for char in line]\n self._grid.append(line_list)\n # -- placing the player at the very start\n self._player = Player(1,2)\n self._grid[self._player._x][self._player._y] = POINT_OF_PLAYER\n self._grid[3][-1] = POINT_OF_EXIT\n \n \n\n # --- Rename the check method to can_move_to\n \"\"\" \n :return: return False if the location is a wall, otherwise return True\n :rtype: bool\n \"\"\"", "def __init__(self, level, parent):\n self.color = Color(0, 0, 0)\n self.pixel_count = 0\n self.palette_index = 0\n self.children = [None for _ in range(8)]\n # add node to current level\n if level < OctreeQuantizer.MAX_DEPTH - 1:\n parent.add_level_node(level, self)", "def new(self):\n self.playing = True\n self.score = 0\n # GROUPS AND LAYERS\n self.all_sprites = pg.sprite.LayeredUpdates() # Lets you assign layer to group to render in correct order\n self.planets = pg.sprite.Group()\n self.moons = pg.sprite.Group()\n self.mobs = pg.sprite.Group()\n self.stars = pg.sprite.Group()\n self.pickups = pg.sprite.Group()\n self.arrows = pg.sprite.Group()\n # SCREEN\n self.frame_coordinates = vec(0, 0)\n self.first_planet = Planet(self) # Add 1st Planet\n self.player = Player(self, self.first_planet) # Add Player on First Planet\n self.added_planets = 0\n self.spawn_planets(PLANETS + self.added_planets)\n self.arrow = Arrow(self)\n self.sun = Sun(self)\n\n # Messages\n self.corner_msg = 'Traverse & Score!'\n self.corner_msg_flag = False\n self.corner_msg_start_time = pg.time.get_ticks()\n\n self.arrow_msg = False\n\n # Play Music\n pg.mixer.music.load(path.join(self.snd_dir, LEVEL_1_MUSIC))\n\n # Start Game Loop\n self.run()\n print(\"GOT TO END OF NEW()\")", "def __init__(self, world, location, elevation):\n self.world = world\n self.location = location # (row, col)\n self.elevation = elevation\n if elevation < 0:\n self.water_level = -elevation\n else:\n self.water_level = 0\n self.pollution = 0\n self.crab = None", "def __init__(self, nrows, ncols, st_row, st_col, end_row, end_col):\n\n if not maze._allow_creation: raise NotImplementedError\n\n self.nrows = nrows\n self.ncols = ncols\n\n # Initialize the maze so that all walls are present.\n self.cells = {}\n for i in range(0, nrows):\n for j in range(0, ncols):\n self.cells[i, j] = 0x0\n\n self.st_row, self.st_col = (st_row, st_col)\n self.end_row, self.end_col = (end_row, end_col)", "def main():\n maze = Vines()\n maze.drawSolu()\n maze.drawDead()", "def __init__(self):\n self.grid = {}\n for i in range(21):\n self.grid[i] = [' ']*21\n self._len_x = len(self.grid[0])\n self._len_y = len(self.grid)\n self.forbidden_tiles = []\n self.allowed_tiles = []\n self.exit = None\n self.entrance = None", "def __init__(self, layout):\n self._start = self._target = None\n empty_spaces = set()\n grid = []\n for line in layout.split(\"\\n\"):\n if line.strip():\n grid.append(list(line.strip()))\n self._height = len(grid)\n self._width = len(grid[0])\n for row in xrange(len(grid)):\n for col in xrange(len(grid[row])):\n if grid[row][col] != '1':\n empty_spaces.add((row,col))\n if grid[row][col] == 's':\n if self._start:\n raise RuntimeError('More than one start location in maze')\n self._start = (row, col)\n elif grid[row][col] == 't':\n if self._target:\n raise RuntimeError('More than one target location in maze')\n self._target = (row, col)\n if not self._start or not self._target:\n raise RuntimeError('Start and target location not in maze')\n self._empty_spaces = frozenset(empty_spaces)\n self._visited = set([self._start]) # currently at self._start", "def generate(self, level):\n # TODO The dungeon's instances are spawned and loaded here.\n # fill map with \"blocked\" tiles\n level.maze = [[Tile(x, y, True) for y in range(level.height)] for x in range(level.width)]\n\n for r in range(level.max_rooms):\n # random width and height\n w = random.randint(level.min_room_size, level.max_room_size)\n h = random.randint(level.min_room_size, level.max_room_size)\n\n # random position without going out of the boundaries of the map\n x = random.randint(0, level.width - w - 1)\n y = random.randint(0, level.height - h - 1)\n\n # \"DungeonRoom\" class makes rectangles easier to work with\n new_room = Room(x, y, w, h)\n level.rooms.append(new_room)\n\n # run through the other rooms and see if they intersect with this one\n failed = False\n for other_room in level.rooms:\n if other_room is not new_room and new_room.intersect(other_room):\n failed = True\n break\n\n if not failed:\n # this means there are no intersections, so this room is valid\n\n # \"paint\" it to the map's tiles\n self._create_room(level, new_room)\n\n # center coordinates of new room, will be useful later\n new_x, new_y = new_room.center()\n\n if level.num_rooms > 0:\n # connect it to the previous room with a tunnel\n # center coordinates of previous room\n (prev_x, prev_y) = level.rooms[level.num_rooms - 1].center()\n\n # draw a coin (random number that is either 0 or 1)\n if random.randint(0, 1) == 1:\n # first move horizontally, then vertically\n self._create_h_tunnel(level, prev_x, new_x, prev_y)\n self._create_v_tunnel(level, prev_y, new_y, new_x)\n else:\n # first move vertically, then horizontally\n self._create_v_tunnel(level, prev_y, new_y, prev_x)\n self._create_h_tunnel(level, prev_x, new_x, new_y)\n\n # finally, append the new room to the list\n level.rooms.append(new_room)\n level.num_rooms += 1\n\n # connect them with a tunnel\n self._create_h_tunnel(level, 25, 55, 23)", "def setUp(self):\n self.pts = ((0, 0, 0), (1, 1, 1), (1, 0, 2), (0, 1, 2), (0.5, 1.5, 1))\n self.tris = (0, 2, 1, 0, 1, 3, 3, 1, 4)", "def create_scene():\n create_floor()\n if config.M != \"\":\n if config.LEVEL == 1:\n create_wall()\n create_enemy()\n create_gap()\n create_platform()\n create_marijuana()\n create_star()\n create_fish()\n elif config.LEVEL == 2:\n create_boss()\n create_platform()\n create_star()", "def start_game(self, level: Level):\n # Initialize game state and begin\n self.game_state = Gamestate(level, len(self.player_list), 1, self.init_levels)\n for player in self.player_list:\n spawn_tile = self.game_state.get_random_spawn_tile()\n self.game_state.add_character(player.entity, spawn_tile)\n # First level has one zombie and no ghosts\n first_zombie = EnemyZombie(\"zombie\", \"zombie\")\n self.add_enemies(first_zombie)\n for enemy in self.enemy_list:\n enemy_spawn = self.game_state.get_random_spawn_tile()\n self.game_state.add_adversary(enemy.entity, enemy_spawn)", "def __init__(self, world, location, elevation):\n LandCell.__init__(self, world, location, elevation)\n self.plant = 0\n self.reset_food_level()", "def setUp(self):\n self.location = [(0, 0), (0, 1)]\n self.hit = (0, 0)", "def create_maze(size):\n dots = MazeGenerator.generate_dot_positions(size.x, size.y)\n maze = MazeGenerator.create_grid_string(dots, size.x, size.y)\n return maze", "def __init__(self) -> None:\n self._grid_sol = []\n self._grid_init = []\n self._grid_display = []\n self._difficulty = 2\n self._move_history = []\n for _ in range(9):\n self._grid_sol.append([0, 0, 0, 0, 0, 0, 0, 0, 0])\n self._grid_display.append([0, 0, 0, 0, 0, 0, 0, 0, 0])\n self._grid_init.append([0, 0, 0, 0, 0, 0, 0, 0, 0])", "def load_level(level):\n\n global spawn_boxes\n\n level = pytmx.load_pygame('maps/level_' + level + '.tmx')\n\n y_num = 0\n for x, y, gid in level.get_layer_by_name('Objects'):\n if level.get_tile_image_by_gid(gid) != None:\n matrix[y_num].append(1)\n else:\n matrix[y_num].append(0)\n \n if x == 19: y_num += 1\n\n spawn_boxes = [] # Areas in which enemies can spawn. Requires tiled type 'spawn_box'\n for obj in level.get_layer_by_name('Triggers'):\n if obj.type == 'spawn_box':\n rect = pygame.rect.Rect(obj.x, obj.y, obj.width, obj.height)\n if obj.name == 'north': \n rect = rect.move(0, -64)\n rect.height += 64\n if obj.name == 'east': \n rect = rect.move(64, 0)\n rect.width += 64\n if obj.name == 'south': \n rect = rect.move(0, 64)\n rect.height += 64\n if obj.name == 'west': \n rect = rect.move(-64, 0)\n rect.width += 64\n spawn_boxes.append(rect)\n\n return level", "def __init__(self,height,width): \r\n self.width = 2*(width//2) + 1 # Make width odd\r\n self.height = 2*(height//2) + 1 # Make height odd\r\n\r\n # grid of cells\r\n self.grid = [[0 for j in range(self.width)] for i in range(self.height)]\r\n\r\n # declare instance variable\r\n self.visited = [] # visited cells\r\n self.unvisited = [] # unvisited cells\r\n self.path = dict() # random walk path\r\n\r\n # valid directions in random walk\r\n self.directions = [(0,1),(1,0),(0,-1),(-1,0)]\r\n\r\n # indicates whether a maze is generated\r\n self.generated = False\r\n\r\n # shortest solution\r\n self.solution = []\r\n self.showSolution = False\r\n self.start = (0,0)\r\n self.end = (self.height-1,self.width-1)", "def main(self):\n sys.setrecursionlimit(2**20)#10**8)\n self.result=self.Breakwalls() #IS THE INITIAL RESULT OF THE MAZE. THE INITIAL LIST STARTING FROM A RANDOM POINT AND ENDING WHEN ALL CELLS VISITED.\n if self.CurrentCell==[1,1]: #[1,1] is already taken as the start of explore \n self.CurrentCell=[1,randrange(2,self.N+1)]\n self.MazeKey=self.GenerateKey() ##Generates a mazekey\n print(\"Start: \", [1,1])\n print(\"MazeKey: \", self.MazeKey)\n print(\"Exit: \", self.CurrentCell)\n self.Objectives() #Things to do in the maze, find key, find exit.\n print(\"Path to key: \", self.ExploreKey) #Path to the Key\n print()\n print(\"Path from key to exit: \", self.ExploreExit)\n print()\n print(\"Entire Path from Start to Key, and Key to Exit: \", self.EntirePath) \n self.drawmaze()", "def __init__(self, rows, cols, mines):\n self.rows = rows\n self.cols = cols\n self.mines = mines\n self.opened = 0\n self.game_won = False\n self.game_lost = False\n self.board = self.__init__minefield__()\n self.tiles = self.__init__tiles__()", "def get_maze_instance(nrows, ncols, st_row, st_col, end_row, end_col):\n maze._allow_creation = True\n m = maze(nrows, ncols, st_row, st_col, end_row, end_col)\n maze._allow_creation = False\n return m", "def mazeTest():\r\n\tmyMaze = Maze()\r\n\tmyMaze.addCoordinate(1,0,0)\r\n\tmyMaze.addCoordinate(1,1,0)\r\n\tmyMaze.addCoordinate(7,1,0)\r\n\tmyMaze.addCoordinate(1,2,0)\r\n\tmyMaze.addCoordinate(2,2,0)\r\n\tmyMaze.addCoordinate(3,2,0)\r\n\tmyMaze.addCoordinate(4,2,0)\r\n\tmyMaze.addCoordinate(6,2,0)\r\n\tmyMaze.addCoordinate(7,2,0)\r\n\tmyMaze.addCoordinate(4,3,0)\r\n\tmyMaze.addCoordinate(7,3,0)\r\n\tmyMaze.addCoordinate(4,4,0)\r\n\tmyMaze.addCoordinate(7,4,0)\r\n\tmyMaze.addCoordinate(3,5,0)\r\n\tmyMaze.addCoordinate(4,5,0)\r\n\tmyMaze.addCoordinate(7,5,0)\r\n\tmyMaze.addCoordinate(1,6,0)\r\n\tmyMaze.addCoordinate(2,6,0)\r\n\tmyMaze.addCoordinate(3,6,0)\r\n\tmyMaze.addCoordinate(4,6,0)\r\n\tmyMaze.addCoordinate(5,6,0)\r\n\tmyMaze.addCoordinate(6,6,0)\r\n\tmyMaze.addCoordinate(7,6,0)\r\n\tmyMaze.addCoordinate(5,7,0)\r\n\tmyMaze.printMaze()\r\n\tprint(myMaze.findRoute(x1=1, y1=0, x2=5, y2=7))", "def __init__(self, limb, kin, total_time, center_pos, num_way, radius, z_height=0.1):\n MotionPath.__init__(self, limb, kin, total_time)\n self.center_pos = center_pos\n self.num_way = num_way\n self.radius = radius\n self.z_height = z_height\n self.base_frame = 'base'\n self.tool_frame = 'left_gripper'\n self.plan_path()\n self.plot_path()", "def __init__(self, map_config):\n self.current_obstacles = []\n self.current_goal = None\n self.cfg = map_config", "def __init__( self, level, outter = None ):\n assert isinstance( level, int )\n assert isinstance( outter, Env ) or ( outter is None )\n\n self._level = level\n self._steps = []\n self._outter = outter", "def __init__(self):\n\n self.frameCount = 0\n self._initScreen()\n self._initObjects()\n self._initControls()\n self._initLevel()\n self._start()\n print \"DEBUG: Initializing Game\"\n pass", "def __init__(self):\n pos = _get_mc().player.getTilePos() + Vec3(2, 0, 0)\n pos = _Vec3(pos.x, pos.y, pos.z)\n _GenericBot.__init__(self, pos)\n self._pos = pos\n self._move(self._pos)", "def extendMaze(self, factor):\n newMaze = Maze(width=self.WORLD_WIDTH * factor,\n height=self.WORLD_HEIGHT * factor,\n start_state=[self.START_STATE[0] * factor, self.START_STATE[1] * factor],\n goal_states=self.extendState(self.GOAL_STATES[0], factor),\n return_to_start=self.return_to_start,\n reward_goal=self.reward_goal,\n reward_move=self.reward_move,\n reward_obstacle=self.reward_obstacle\n )\n newMaze.obstacles = []\n for state in self.obstacles:\n newMaze.obstacles.extend(self.extendState(state, factor))\n newMaze.resolution = factor\n return newMaze", "def extendMaze(self, factor):\n newMaze = Maze(width=self.WORLD_WIDTH * factor,\n height=self.WORLD_HEIGHT * factor,\n start_state=[self.START_STATE[0] * factor, self.START_STATE[1] * factor],\n goal_states=self.extendState(self.GOAL_STATES[0], factor),\n return_to_start=self.return_to_start,\n reward_goal=self.reward_goal,\n reward_move=self.reward_move,\n reward_obstacle=self.reward_obstacle\n )\n newMaze.obstacles = []\n for state in self.obstacles:\n newMaze.obstacles.extend(self.extendState(state, factor))\n newMaze.resolution = factor\n return newMaze", "def setUp(self):\r\n self.matrix = array(\r\n [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])\r\n self.cells = [(0, 1), (1, 3)]\r\n self.cells2 = [(0, 2), (2, 3)]", "def __init__(self):\n self.last_reward_pos = 0\n super().__init__()\n self.TERRAIN_VARIANCE = 0.0\n self.stump_spacing = 4.0\n self.stump_height = 1.0\n self.my_init({'leg_length': 35, 'walker_type': 'default'})", "def __init__(self):\n \tself.transposition = {}\n \tself.currentDepthLimit = 0\n\tself.counter = 0\t\n pass", "def __init__(self):\n self.board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.last_move = None", "def klyubin_world(self):\n maze = self.create_maze_world(10,10)\n # wall A\n for i in range(6):\n maze.add_wall( (1, i), \"N\" )\n # wall B & D\n for i in range(2):\n maze.add_wall( (i+2, 5), \"E\")\n maze.add_wall( (i+2, 6), \"E\")\n # wall C\n maze.add_wall( (3, 6), \"N\")\n # wall E\n for i in range(2):\n maze.add_wall( (1, i+7), \"N\")\n # wall F\n for i in range(3):\n maze.add_wall( (5, i+2), \"N\")\n # wall G\n for i in range(2):\n maze.add_wall( (i+6, 5), \"W\")\n # walls HIJK\n maze.add_wall( (6, 4), \"N\")\n maze.add_wall( (7, 4), \"N\")\n maze.add_wall( (8, 4), \"W\")\n maze.add_wall( (8, 3), \"N\")\n return maze", "def __init__(self, tile_dir):\n\n # open top-level info file\n self.tile_dir = tile_dir\n info_file = os.path.join(tile_dir, self.TileInfoFilename)\n try:\n fd = open(info_file, 'rb')\n (self.extent, self.tile_size,\n self.sea_colour, self.land_colour) = pickle.load(fd)\n fd.close()\n except IOError:\n msg = \"'%s' doesn't appear to be a tile directory\" % tile_dir\n raise RuntimeError(msg)\n\n (self.tile_size_x, self.tile_size_y) = self.tile_size\n\n # get list of tile levels\n tile_mask = os.path.join(tile_dir, self.TileFilenameTemplate)\n self.levels = [int(os.path.basename(l))\n for l in glob.glob(os.path.join(tile_mask))]\n\n # setup the tile caches\n self.cache = {}\n for l in self.levels:\n self.cache[l] = {}\n\n # set min and max tile levels\n self.min_level = min(self.levels)\n self.max_level = max(self.levels)", "def __init__(self, env=None, tilesEnv=False):\n super(MarioEnv, self).__init__(env)\n self.resetCount = -1\n # reward is distance travelled. So normalize it with total distance\n # https://github.com/ppaquette/gym-super-mario/blob/master/ppaquette_gym_super_mario/lua/super-mario-bros.lua\n # However, we will not use this reward at all. It is only for completion.\n self.maxDistance = 3000.0\n self.tilesEnv = tilesEnv", "def __init__(self):\n self.Robot = Robot()\n self.Omega = matrix()\n # self.Omega.value[0][0] = 1.0\n # self.Omega.value[1][1] = 1.0\n self.Xi = matrix()\n # Xi.value[0][0] = 0.0\n # Xi.value[1][0] = 0.0\n self.measure = {}\n self.landMarkCount = 0\n self.init = False\n self.bearing = 0\n self.x = 0\n self.y = 0\n \n # TODO", "def __init__(self, limb, kin, total_time, goal_pos, num_way, start_pos=None):\n MotionPath.__init__(self, limb, kin, total_time)\n self.start_pos = start_pos\n self.goal_pos = goal_pos\n self.num_way = num_way\n self.base_frame = 'base'\n self.tool_frame = 'left_hand_camera' # 'left_gripper'\n self.plan_path()\n # self.plot_path()", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, depth_limit=float(20)):\n\n self.root = None\n self.depth_limit = depth_limit", "def __init__(self, size):\n self.world = [[None for y in range(size)] for x in range(size)]", "def __init__(self):\n self.opening_scene = DungeonGate()\n # this list define the order of scenes in the corridor\n self.corridor_scenes = [GuardsRoom(), Cell(), Armory(), EmptyRoom(), Dormitory()]\n shuffle(self.corridor_scenes)\n self.explored_scenes = {\n \"GuardsRoom\": \"unexplored\",\n \"Cell\": \"unexplored\",\n \"Dormitory\": \"unexplored\",\n \"Armory\": \"unexplored\",\n \"EmptyRoom\": \"unexplored\",\n \"DungeonGate\": \"unexplored\"\n }", "def __init__(self):\r\n \r\n # World params\r\n self.spawn_distance = 0\r\n\r\n # Nest planning\r\n self.done_init = False\r\n self.wall_set = None\r\n self.planned_nest_set = None\r\n self.nest_completion_set = None\r\n\r\n # Task mapping\r\n self.uuid_task_map = {}\r\n\r\n self.turn = 0", "def __init__(self, player, screen):\n\n # Call the parent constructor\n Level.__init__(self, player, screen)\n\n self.level_x_limit = -1380\n self.level_y_limit = 270\n\n\n # Array with type of platform, and x, y location of the platform.\n level = [[platforms.GRASS_MID, 15, 500],\n [platforms.GRASS_DIRT_LONG, 15, 575],\n [platforms.GRASS_RIGHT_EDGE, 225, 500],\n [platforms.GRASS_RIGHT_EDGE_DIRT, 225, 574],\n\n [platforms.GRASS_RIGHT_LONG, -68, 290],\n\n\n [platforms.GRASS_RIGHT_CORNER, 1274, 100],\n [platforms.GRASS_RIGHT_LONG, 1274, 192],\n [platforms.GRASS_LEFT_CORNER, 1193, 100],\n [platforms.GRASS_LEFT_LONG, 1193, 192],\n [platforms.GRASS_MID, 983, 193],\n [platforms.GRASS_DIRT_LONG, 983, 268],\n [platforms.GRASS_LEFT_CORNER, 901, 193],\n [platforms.GRASS_LEFT_LONG, 901, 285],\n [platforms.GRASS_MID, 691, 285],\n [platforms.GRASS_DIRT_LONG, 691, 360],\n [platforms.GRASS_LEFT_CORNER, 609, 285],\n [platforms.GRASS_LEFT_LONG, 609, 377],\n [platforms.GRASS_MID, 399, 377],\n [platforms.GRASS_DIRT_LONG, 399, 452],\n [platforms.GRASS_LEFT_CORNER, 318, 377],\n [platforms.GRASS_LEFT_LONG, 317, 469],\n\n\n [platforms.GRASS_LEFT_CORNER, 1558, 100],\n [platforms.GRASS_LEFT_LONG, 1558, 192],\n [platforms.GRASS_RIGHT_CORNER, 1639, 100],\n [platforms.GRASS_RIGHT_LONG, 1639, 192],\n [platforms.GRASS_MID, 1721, 193],\n [platforms.GRASS_DIRT_LONG, 1721, 268],\n [platforms.GRASS_RIGHT_CORNER, 1931, 193],\n [platforms.GRASS_RIGHT_LONG, 1931, 285],\n [platforms.GRASS_MID, 2013, 285],\n [platforms.GRASS_DIRT_LONG, 2013, 360],\n [platforms.GRASS_RIGHT_CORNER, 2223, 285],\n [platforms.GRASS_RIGHT_LONG, 2223, 377],\n [platforms.GRASS_MID, 2305, 377],\n [platforms.GRASS_DIRT_LONG, 2305, 452],\n [platforms.GRASS_RIGHT_CORNER, 2515, 377],\n [platforms.GRASS_RIGHT_LONG, 2515, 469],\n\n [platforms.GRASS_LEFT_EDGE, 2607, 500],\n [platforms.GRASS_LEFT_EDGE_DIRT, 2617, 574],\n [platforms.GRASS_MID, 2692, 500],\n [platforms.GRASS_DIRT_LONG, 2692, 575],\n\n\n ]\n\n # Go through the array above and add platforms\n for platform in level:\n block = platforms.Platform(platform[0])\n block.rect.x = platform[1]\n block.rect.y = platform[2]\n block.player = self.player\n self.platform_list.add(block)\n\n\n\n\n choosePort =[[platforms.PORTAL, 1356, 420, 3],\n\n [platforms.PORTAL, 2712, 320, 1],\n ]\n\n for port in choosePort:\n wego = platforms.ChooseLev(port[0], port[3])\n wego.rect.x = port[1]\n wego.rect.y = port[2]\n wego.player = self.player\n self.platform_choose.add(wego)\n\n\n\n background = platforms.backgroundGrass()\n background.rect.x = 0\n background.rect.y = 0\n self.decor.add(background)", "def __init__(self, screen, maze_arrangement):\n pygame.sprite.Sprite.__init__(self)\n \n self.__walk_down = [pygame.image.load(\"./PlayerImages/stand_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_down.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_down.png\")]\n \n self.__walk_up = [pygame.image.load(\"./PlayerImages/stand_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_up.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_up.png\")] \n \n self.__walk_right = [pygame.image.load(\"./PlayerImages/stand_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_right.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_right.png\")]\n \n self.__walk_left = [pygame.image.load(\"./PlayerImages/stand_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/walk1_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/walk2_face_left.png\"), \\\n pygame.image.load(\"./PlayerImages/stand_face_left.png\")]\n \n self.image = self.__walk_down[0]\n self.rect = self.image.get_rect()\n \n # Set direction, current frame index, animation state, and \n self.__direction = \"DOWN\"\n self.__frame_index = 0\n self.__animating = False\n self.__move_length = 0\n \n self.__maze_arrangement = maze_arrangement\n \n self.rect.x = 50\n self.rect.y = 50 \n \n self.__user_x = self.rect.x / 50\n self.__user_y = self.rect.y / 50 \n self.__x = self.rect.x\n self.__y = self.rect.y", "def __init__(self, d=1):\r\n self.depth = d", "def _generate_maze(self):\n grid = [[GridCell(x, y, self._treasure_prob) for x in range(self._map_size)] for y in range(self._map_size)]\n\n center_x = self._map_size // 2\n center_y = self._map_size // 2\n\n for _ in range(self._sparsity):\n current = grid[center_x][center_y]\n stack = list()\n start = True\n while len(stack) or start:\n start = False\n current.visited = True\n children = current.has_children(grid)\n\n if children:\n choice = np.random.choice(children)\n choice.visited = True\n\n stack.append(current)\n\n self._remove_walls(current, choice)\n\n current = choice\n\n elif stack:\n current = stack.pop()\n for row in grid:\n for cell in row:\n cell.visited = False\n\n # edit center area\n grid[center_x][center_y].set_treasury()\n for x in range(center_x - 1, center_x + 2):\n for y in range(center_y - 1, center_y + 2):\n grid[x][y].erase_walls()\n return grid", "def __init__(self,screen):\n\t\tsuper(LevelOne,self).__init__(screen)\n\t\tself.villain_one = None\n\t\tself._set_villain()", "def createLevelMap(self):\n for a in self.hierarchy.iterkeys():\n self.lvl = 0\n self.calcLevel(a)\n if self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n self.levelMap.addLevelData(AgentName=a, Level=self.lvl)", "def __init__(self, no_name_level):#width height tile rover x y list\n\t\tself.no_name_level = no_name_level\n\t\tpass", "def __init__(self, position, is_horizontal, map_state):\n\n self.position = position\n self.spawn_position = position[:]\n self.in_spawn_area = True\n self.is_horizontal = is_horizontal\n self.map_state = map_state\n self.previous_direction = (0, 0)", "def _initialize_trees(self):", "def view_init(self, elev=None, azim=None):\n\n self.dist = 10\n\n if elev is None:\n self.elev = self.initial_elev\n else:\n self.elev = elev\n\n if azim is None:\n self.azim = self.initial_azim\n else:\n self.azim = azim", "def __init__(self, engine, level): \n super().__init__(engine)\n self.tiles = []\n \n \"\"\"R is for the backgrounds (water, grass, etc.)\n G is for obstacles on the tile or roads\n B is for status effects\n \"\"\"\n level_img = cv2.imread(\"levels/\" + level + \".png\")\n for i in range(level_img.shape[0]):\n for j in range(level_img.shape[1]):\n if level_img[i,j][1]:\n pass\n else:\n row.append(Tile.getTyleType(j))", "def setup_pymol():\n pymol.finish_launching() # Prevent threading errors\n # Configure global settings\n cmd.set('scene_buttons', 1)\n cmd.set('matrix_mode', 1)\n cmd.set('movie_panel', 1)\n # Configure quality settings\n cmd.mset(\"1 x500\")\n cmd.set('ray_trace_frames', 1)\n cmd.viewport(800, 800)", "def createLevelMap(self):\n\t\tfor a in self.hierarchy.iterkeys():\n\t\t\tself.lvl = 0\n\t\t\tself.calcLevel(a)\n\t\t\tif self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n\t\t\tself.levelMap.addLevelData(AgentName=a, Level=self.lvl)" ]
[ "0.7217811", "0.7040007", "0.6914307", "0.68994147", "0.6807719", "0.67093295", "0.67023313", "0.6577321", "0.6552819", "0.6521437", "0.651588", "0.634067", "0.6334667", "0.6277603", "0.62732935", "0.6267083", "0.62319756", "0.6219", "0.6209007", "0.6164258", "0.6150346", "0.61331093", "0.6119976", "0.6107852", "0.61008096", "0.6094501", "0.60784864", "0.60779846", "0.60535127", "0.59669024", "0.5935856", "0.5932658", "0.59276694", "0.59193736", "0.59147215", "0.5873113", "0.5868843", "0.5850112", "0.5830122", "0.5828933", "0.57978827", "0.5781101", "0.575272", "0.57516134", "0.57503235", "0.57372475", "0.5722208", "0.5721194", "0.5719562", "0.57143366", "0.5709162", "0.5708405", "0.5701953", "0.5697377", "0.5695524", "0.568791", "0.56823516", "0.5678761", "0.5676635", "0.56647635", "0.56522405", "0.56274277", "0.56203604", "0.56116545", "0.559654", "0.55866855", "0.5572696", "0.5571814", "0.55582184", "0.5557006", "0.5542495", "0.5532113", "0.5532113", "0.55250686", "0.55208427", "0.55189896", "0.55187833", "0.5515713", "0.5514085", "0.55105066", "0.550723", "0.549941", "0.5492289", "0.5492289", "0.5477313", "0.5472079", "0.547049", "0.54688853", "0.546454", "0.54533494", "0.54530203", "0.54477483", "0.54382277", "0.5434403", "0.5425576", "0.542496", "0.5411133", "0.5404675", "0.5398254", "0.53965956", "0.5383304" ]
0.0
-1
Runs the Mazescape game.
def run(self): #game loop set self.playing to False to end game self.playing = True while self.playing: self.dt = self.clock.tick(FPS) / 1000 self.events() self.update() self.draw() self.losing_sequence()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n g = Game(800, 600)\n g.start()", "def main():\n g = DemoGame(800, 600)\n g.start()", "def main():\n game = RiichiMahjongApp()\n game.run()", "def run(self):\n print(\"WELCOME TO MINESWEEPER!\")\n\n\n while True:\n\n self.get_input()\n start_game(self.rows, self.cols, self.mines)", "def run_game_logic(self):\n pass", "def run():\n\tif len(sys.argv) > 1 and sys.argv[1] in {'-V', '--version'}:\n\t\tprint(\"pokesim - Pokémon Battle Simulator - Version %s\" % __version__)\n\t\texit()\n\n\trandom.seed()\n\ttry:\n\t\tmain()\n\texcept (KeyboardInterrupt, EOFError):\n\t\texit(0)", "def run(self):\n r = self.engine.run()\n while r != 1:\n if r == 0:\n if self.engines.index(self.engine) < len(self.engines) - 1:\n self.engine = self.engines[self.engines.index(self.engine) + 1]\n print self.engines.index(self.engine)\n self.e_e.reset_input()\n else:\n self.engine = self.engines[0]\n r = self.engine.run()\n pygame.quit()\n raise SystemExit", "def main():\n game = Blackjack()\n game.play()", "def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.main_loop()", "def main():\r\n\r\n movetwotimes()\r\n pick_beeper()\r\n move()\r\n turn_left()\r\n movetwotimes()\r\n put_beeper()\r\n turn_left()\r\n turn_left()\r\n movetwotimes()\r\n rotatethreetimes()\r\n movetwotimes()\r\n move()\r\n turn_left()\r\n turn_left()", "def run(self):\n r = self.engine.run()\n while r != QUIT_FLAG:\n if r == SWITCH_FLAG:\n if self.engines.index(self.engine) < len(self.engines) - 1:\n self.engine = self.engines[self.engines.index(self.engine) + 1]\n print self.engines.index(self.engine)\n self.e_e.reset_input()\n else:\n self.engine = self.engines[0]\n r = self.engine.run()\n pygame.quit()\n raise SystemExit", "def run_gui_game():\n # Set up game\n view = GuiView()\n game = GameEngine(view)", "def run(self) -> None:\n pg.mixer.init()\n pg.mixer.music.load(path.join(sound_dir, 'theme.wav'))\n pg.mixer.music.set_volume(0.1)\n pg.mixer.music.play(-1, fade_ms=1000)\n while True:\n if self.state == State.MENU:\n self.show_menu()\n if self.state == State.GAME_OVER:\n self.show_game_over_screen()\n if self.state == State.PLAY:\n self.run_game()", "def main() -> None:\r\n game = advanced_game(MAP_FILE)\r\n\r\n root = tk.Tk()\r\n root.title('EndOfDayz')\r\n if TASK == 1:\r\n gui = BasicGraphicalInterface\r\n elif TASK == 2:\r\n gui = ImageGraphicalInterface\r\n # else:\r\n # gui = MastersGraphicalInterface\r\n app = gui(root, game.get_grid().get_size())\r\n app.play(game)\r\n root.mainloop()", "def main():\n if \"cli\" in sys.argv:\n run_cli_game()\n else:\n run_gui_game()", "def run(self):\n\n machine = StateMachine()\n\n while machine.state != GameState.END:\n\n if machine.state == GameState.PLAYING:\n machine.state = self.play(self.screen, self)\n elif machine.state == GameState.INTRO:\n machine.state = Intro.run(self.screen)\n elif machine.state == GameState.MENU:\n machine.state = Menu.run(self.screen)\n else:\n raise EnvironmentError\n\n pygame.quit()", "def main():\n ans = random_word()\n run_game(ans, N_TURNS)", "def main():\n pygame.init()\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pygame.display.set_caption('8-Puzzle game')\n screen = pygame.display.set_mode((800, 500))\n fpsclock = pygame.time.Clock()\n program = SlidePuzzle((3, 3), 160, 5, difficulty=10) # program is also the gym environment\n\n choice = program.selectPlayerMenu(fpsclock, screen)\n if choice == \"AI\":\n pygame.display.quit()\n trainAI(program)\n elif choice == \"human\":\n launchWithGUI(program, fpsclock, screen)\n del program", "def main():\n boba_blast_game.main()", "def run():\n import argparse\n parser = argparse.ArgumentParser(description=\"Create and solve mazes\")\n parser.add_argument(\"-c\", \"--cli\", help=\"Switch to CLI mode\", action='store_true')\n parser.add_argument(\"-f\", \"--file\", help=\"File to import map from\")\n parser.add_argument(\"-s\", \"--start\", help=\"Starting position in the maze\")\n parser.add_argument(\"-e\", \"--end\", help=\"Ending position in the maze\")\n args = parser.parse_args()\n if args.file:\n myfile = args.file\n else:\n myfile = 'map1.txt'\n with open(myfile, 'r') as mapfile:\n maze_str = mapfile.read()\n maze = Maze(maze_str, cli=args.cli, start=parse_seq(args.start), finish=parse_seq(args.end))\n maze.game_loop()", "def run_game(self) -> None:\n decision = 0\n if self._initial:\n self._initial = False\n while decision != 1:\n try:\n display_no_combat_init(self.hero)\n decision = get_user_input([1, 2, -1])\n if decision == -1:\n self._quit()\n elif decision == 2:\n self._show_bag()\n else:\n break\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")\n\n while not self.hero.is_dead:\n try:\n self._load_map()\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")", "def run_game(self):\n while True:\n self._check_events()\n self._update_rain()\n self._update_screen()", "def run():\r\n \r\n match = a4_acc.Game() # Instantiate a Game object \r\n setup(match)\r\n\r\n if constants.SHOW_GRAPHICS:\r\n axes= startGraphics(match.board) #step 0\r\n \r\n \r\n for k in range(constants.STEPS):\r\n update(match)\r\n updateGraphics(board, k, caxes)\r\n \r\n ########\r\n # TO DO: \r\n # Simulate game given the intial state for constants.STEPS iterations\r\n \r\n # Example code to call the updateGraphics function; the second argument\r\n # needs to be replaced:\r\n # if constants.SHOW_GRAPHICS:\r\n # updateGraphics(match.board, None, axes) \r\n \r\n # Do not change or add code below here for function run\r\n endNow= raw_input('Press ENTER to continue.')", "def main():\n\n name, game = select_game(vgc.KNOWN_GAMES)\n print('---- Launching: %s -----'%name)\n game.game.main()\n sys.exit(0)", "def run_game(self):\n while True:\n # Watch for keyboard and mouse events.\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n # redraw the screen during each pass through the loop.\n self.screen.fill(self.bg_color)\n self.falcon.blitme()\n\n # Make the most recently drawn screen visible\n pygame.display.flip()", "def main():\n global numrect\n global rectsize\n rectsize += 1\n if rectsize > 30:\n rectsize = 30\n numrect += 2\n # print(\"rectsize:\\t\" + str(rectsize))\n # print(\"numrect:\\t\" + str(numrect))\n maze, rectangles = MazeGenerator.main(msize, numrect, rectsize)\n global lvl\n # print(str(lvl))\n lvl += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n runMaze(maze, rectangles)", "def run_application():\n show_theme_message()\n keep_playing = 'y'\n health_meter = {}\n reset_health_meter(health_meter)\n show_game_mission()\n\n while keep_playing == 'y':\n reset_health_meter(health_meter)\n play_game(health_meter)\n keep_playing = input(\"\\nPlay again? Yes(y)/No(n): \")", "def run(self, GameState):\n pass", "def execute(self):\n self.init()\n\n while self.running:\n self.render()\n self.events()\n\n pygame.quit()", "def main():\n \n # load_and_initialize_func()\n\n loop_and_update_forever()\n\n pygame.quit()", "def run(self):\n self.__power_on()\n\n self.__main()", "def run():\n import hmmmAssembler ; reload(hmmmAssembler) # import helpers\n hmmmAssembler.main(Random) # this runs the code!", "def main():\n global TURRET\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pg.init()\n pg.display.set_caption(CAPTION)\n pg.display.set_mode(SCREEN_SIZE)\n TURRET = pg.image.load(\"turret.png\").convert()\n TURRET.set_colorkey(COLOR_KEY)\n Control().main_loop()\n pg.quit()\n sys.exit()", "def main():\n game = TinkerGame()\n game.setup()\n while game.calculate_points() > 0 and not game.game_over:\n game.play()\n game.end()", "def main():\n run_it = scene.Control()\n state_dict = {\"TITLE\" : title.Title(),\n \"INTRO\" : cutscene.Cutscene0(),\n \"GAMEPLAY\" : gameplay.gamePlay(),\n \"ENDING\" : cutscene.Cutscene1()\n }\n run_it.setup_states(state_dict, \"TITLE\")\n run_it.main()", "def main():\n dealCards().mainloop()", "def main():\n game = Hangman()\n game.play_hangman()", "def main():\n even_game()", "def main():\n secret_word = get_word()\n play_game(secret_word)", "def play_game():\n pass", "def main():\n try:\n check_os()\n set_arguments(add_arguments())\n start_attack()\n except AttributeError as e:\n show_error(e)\n except TypeError as e:\n show_error(e)\n except KeyboardInterrupt:\n sys.exit(0)\n except Exception as e:\n show_error(e)", "def main():\n game = Game(TIMES, HARDNESS)\n game.start()\n game.print_score()", "def run():\n # first load all sprites\n print(\"Loading sprites...\", end=\"\")\n sprite_db = _load_sprites()\n\n # abort if failed\n if sprite_db is None:\n print(\"\\nERROR in loading sprites. Aborting...\")\n return\n\n # now sort keys\n sprite_db_keys = sorted(sprite_db.keys())\n\n # otherwise success\n print(\"DONE\")\n\n choice = True\n while choice is not None:\n\n # set apropriate title text\n if _need_to_gen_sprites:\n title_entry = (\"Sprite Maker\" + MSG_UNSAVED, \"Option: \")\n else:\n title_entry = (\"Sprite Maker\", \"Option: \")\n\n menu_main[0] = title_entry\n\n choice = menutils.menu(menu_main)\n\n if choice is not None:\n result = choice(sprite_db, sprite_db_keys)\n\n # only make sprite returns a value, which is the updated keys\n # list\n if result is not None:\n sprite_db_keys = result\n\n elif _need_to_gen_sprites:\n # user hit None, but we should make sure that they wanted to leave\n # without saving changes\n menutils.clear_screen()\n print(\"\\n\\n\" + MSG_WARN_GEN)\n if not menutils.ask(\"Leave this menu\"):\n choice = True", "def main():\n\n # Fix crackling audio\n util.set_environment('PULSE_LATENCY_MSEC', '60')\n\n # Replace launcher with game exe in proton arguments\n util.replace_command('FF9_Launcher.exe', 'x64/FF9.exe')", "def main():\r\n\r\n #set the display, caption, and timer\r\n pygame.init()\r\n mainClock = pygame.time.Clock()\r\n windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)\r\n pygame.display.set_caption(\"Cat's Big Adventure\")\r\n\r\n #Display a menu, choose a level and instantiate a game\r\n display_menu(windowSurface)\r\n\r\n #initialize the game\r\n stats = [6]\r\n game = Game(stats)\r\n \r\n # run the game loop until the user quits\r\n while True:\r\n # Process events (keystrokes, mouse clicks, etc)\r\n game.process_events(windowSurface)\r\n\r\n # Update object positions, check for collisions\r\n game.run_logic()\r\n \r\n # Draw the current frame\r\n game.display_frame(windowSurface)\r\n\r\n #draw background image\r\n background_image = pygame.image.load(\"sky.png\").convert()\r\n windowSurface.blit(background_image, [0, 0])\r\n \r\n mainClock.tick(FRAMERATE)", "def run():\n renanme_action()\n\n write_anim()\n alc.save_file()", "def main():\r\n # create the EdenLudo sample\r\n EdenEvolves = EdenLudo()\r\n # ru-n the scene\r\n run()", "def main():\n game_of_life(10, 20)", "def Gameloop():", "def main():\n arcade.open_window(WINDOW_WIDTH, WINDOW_HEIGHT, \"Snake.exe\")\n # Set the window background colour\n arcade.set_background_color(light_green)\n\n # Calls the on_draw method every 1/3(20 seconds) of a second\n arcade.schedule(on_draw, 1/3)\n # Keeps the window open until closed by the user\n arcade.run()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def main():\n play_game(progression)", "def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass", "def run_cli_game():\n # Set up game\n view = ConsoleView()\n game = GameEngine(view)\n\n # Game loop\n while not game.game_over:\n view.turn_started()\n\n # Get move to make from user and execute it\n move = input()\n print()\n \n execute_move(move, game, view)", "def main(self):\n _age = info.getInfo(self)\n _flag = game.check_age(self, _age)\n if _flag == False:\n exit()\n game.wants_to_play(0)", "def run():\n main()", "def run_game(self) -> None:\n\n self.setup_play()\n pg.mixer.music.load(path.join(sound_dir, 'theme_full.wav'))\n pg.mixer.music.set_volume(0.3)\n pg.mixer.music.play(-1, fade_ms=1000)\n\n while self.state == State.PLAY:\n\n # Using clock.tick each loop ensures framerate is limited to target FPS\n self.dt = self.clock.tick(FPS)\n\n self.events()\n\n if self.game_state == InGameState.READY:\n # A pause before the game starts.\n\n state_text = \"READY!\"\n\n if self.display_timer is None:\n self.display_timer = 1\n\n elif self.display_timer == 0:\n self.display_timer = None\n self.game_state = InGameState.RUNNING\n\n elif self.game_state == InGameState.COMPLETE:\n # Player survived the time limit and moves n to next level.\n\n state_text = \"You survived!\"\n\n if self.display_timer is None:\n self.display_timer = 1\n\n elif self.display_timer == 0:\n self.display_timer = None\n self.setup_play(reset=True)\n self.game_state = InGameState.READY\n\n else:\n # Regular update step\n\n self.update()\n\n if self.display_timer is None:\n state_text = None\n elif self.display_timer == self.timer:\n self.display_timer = None\n\n if self.player.death_timer == 0:\n if self.lives == 0:\n self.state = State.GAME_OVER\n else:\n self.setup_play(reset=True)\n self.game_state = InGameState.READY\n\n if self.kill_bonus is None:\n\n if self.no_kills() >= self.target_no_kills:\n\n self.display_timer = self.timer - 2\n\n half_time = TIME_LIMIT // 2\n if self.timer >= half_time:\n self.kill_bonus = (self.timer - half_time) // 10 * ENEMY_CLEARANCE_BONUS\n state_text = f\"Kill bonus: {self.kill_bonus}\"\n else:\n self.kill_bonus = 0\n state_text = f\"Too slow - No kill bonus\"\n\n if self.timer == 0:\n self.game_state = InGameState.COMPLETE\n\n self.draw(state_text=state_text)", "def main():\n initialize()\n inputs = InputsTemp()\n\n ui_font = pygame.font.SysFont(\"Comic Sans MS\", 50)\n\n assets_library = AssetsLibrary((Path(__file__).parent / \"Assets\"))\n\n # todo: create display class to wrap display from pygame\n window = setup_display(inputs.width_height)\n\n background_img = assets_library.assets.bg_black\n\n run = True\n FPS = 60\n lives = 5\n level = 1\n clock = pygame.time.Clock()\n\n ui_margin = {\n \"left\": 10,\n \"right\": 10,\n \"top\": 10,\n \"bottom\": 10,\n }\n\n def redraw_window():\n window.blit(background_img.get_image(inputs.width_height), (0, 0))\n\n lives_label = ui_font.render(f\"lives: {lives}\", 1, (255, 255, 255))\n level_label = ui_font.render(f\"level: {level}\", 1, (255, 255, 255))\n\n window.blit(lives_label, (ui_margin[\"left\"], ui_margin[\"top\"]))\n window.blit(level_label, (inputs.width_height[0] - level_label.get_width() - ui_margin[\"right\"], ui_margin[\"top\"]))\n pygame.display.update()\n\n while run:\n clock.tick(FPS)\n\n redraw_window()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n print(\"Game ended\")", "def main():\n run_it = tools.Control(prepare.ORIGINAL_CAPTION)\n state_dict = {\"SPLASH\" : splash.Splash(),\n \"MENU\" : menu.Menu(),\n \"DEMO\" : demo.Demo(),\n \"GAME\" : game.Game()}\n run_it.setup_states(state_dict, \"SPLASH\")\n run_it.main()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def run(self):\n self.cmdloop()", "def run(self):\n # TODO: Clean this up to better facilitate running as client: There's \n # still too much being done in this function.\n #\n # Only start if we've been appropriately initialised\n # TODO: Are assertion checks stripped out in optimised builds? Is this\n # the wrong method for an important check?\n assert self.isInitialised, \"Detective must be initialise()d before \\\nrunning.\"\n #\n ## If not secretive, announce our cards ################################\n if not self.SECRETIVE:\n announcestr = \"Preparing for battle. I hold cards: \"\n for card in self.myCards:\n announcestr += game.CARDNAMES[card]+\", \"\n self.hook_notifydebug(announcestr[:-2],\"Velma.run\")\n #\n #\n # TODO: Move the following commented code stack to a test routine.\n # Miss Scarlet known to be culprit\n #ui.dbgstatus('tweak','Miss Scarlet known culprit')\n #for ixPlayer in range(1,self.nPlayers):\n # self.event_pass(character=4,room=8,weapon=19,player=ixPlayer)\n # Kitchen known to be scene\n #ui.dbgstatus('tweak','Kitchen known scene')\n #for ixPlayer in range(1,self.nPlayers):\n # self.event_pass(character=0,room=9,weapon=19,player=ixPlayer)\n # Unseen answer 1 Plum/Billiard/Wrench\n #ui.dbgstatus('tweak','Unseen answer from 1')\n #self.event_unseenresponse(character=1,room=12,weapon=20,shower=1,viewer=3)\n # 1 known to have Peacock\n #ui.dbgstatus('tweak','1 known has Peacock')\n #self.event_seenresponse(card=3,shower=1,viewer=0)\n # 1 known not to have candlestick\n #ui.dbgstatus('tweak','1 known without candlestick')\n #self.event_pass(character=0,room=8,weapon=16,player=1)\n # 2 known to have knife\n #ui.dbgstatus('tweak','2 known has knife')\n #self.event_seenresponse(card=15,shower=2,viewer=0)\n # 2 known to have either White or Lounge or Candlestick\n #ui.dbgstatus('tweak','Unseen answer from 2')\n #self.event_unseenresponse(character=5,room=7,weapon=16,shower=2,viewer=1)\n # 3 known has ballroom\n #ui.dbgstatus('tweak','3 known has ballroom')\n #self.event_seenresponse(card=10,shower=3,viewer=0)\n #\n #\n while not self.isGameOver:\n # Output everybody's identity and position on the board. This \n # information is not privileged, and should be helpful in ensuring\n # consistency between what Velma thinks is going on and the state\n # of the real-world board\n for ixPlayer in range(self.nPlayers):\n self.hook_notifydebug(\"Player \"+str(ixPlayer)+\" is \"+\n game.CARDNAMES[game.CHARS[self.playerCharIxs[ixPlayer]]]+\n \" at \"+\n str(self.charLocations[self.playerCharIxs[ixPlayer]]),\n \"Velma.run\")\n #\n # Remind our conversant of any pre-set scenario\n if self.DBGSCENARIOREMINDER:\n self.hook_notifydebug('Reminder: \\n' + self.DBGSCENARIOREMINDER,\n \"Velma.run\")\n #\n # If we're not competing with our conversant, plot our knowledge\n if not self.SECRETIVE:\n self.hook_displaysuspicions()\n #\n if self.ixHotSeat == 0:\n self.move()\n else:\n self.hook_observemove()\n #\n # The hot seat increments, and skips over any players previously\n # knocked out\n self.ixTurn += 1\n self.ixHotSeat = (self.ixHotSeat + 1) % self.nPlayers\n while self.playersOusted[self.ixHotSeat]:\n self.ixHotSeat = (self.ixHotSeat + 1) % self.nPlayers", "def main(argv):\n config_options = parse_config(CONFIG_FILE_NAME)\n arguments_options = parse_args(argv, **config_options)\n playgame.main(arguments_options)", "def main():\n\n # Init pygame\n pygame.init()\n screen = pygame.display.set_mode((500, 310))\n pygame.display.set_caption(\"Black Jack by Hackiflette\")\n\n # Load background image\n bgd_tile = load_image(\"background_menu.png\")\n background = pygame.Surface((500, 310))\n background.blit(bgd_tile, (0, 0))\n\n # Prepare text\n title_font = pygame.font.Font(None, 36)\n text = title_font.render(\"Black Jack Project\", 2, (255, 255, 255))\n\n # Display on windows\n screen.blit(background, (0, 0))\n screen.blit(text, (80, 30))\n pygame.display.flip()\n\n # Init sprites\n all_sprites = pygame.sprite.RenderUpdates()\n clock = pygame.time.Clock()\n\n play = True\n while play:\n\n # Clear all the sprites\n all_sprites.clear(screen, bgd_tile)\n all_sprites.update()\n\n # Check for events\n for event in pygame.event.get():\n if event.type == QUIT:\n play = False\n\n # Update the scene\n dirty = all_sprites.draw(screen)\n pygame.display.update(dirty)\n\n clock.tick(40)\n\n pygame.quit()", "def main():\n character1 = generate_random_character(\"Dr. Bones\", 100, 60, 15, 5)\n character2 = generate_random_character(\"Mr. Meeseeks\", 100, 60,\n 15, 5)\n battle = BattleSimulator(character1, character2)\n battle.simulate()", "def main():\n field = Field(10, 10)\n snake = Snake((0, 0))\n game = Game(field, snake)\n game.start()", "def run_game(self):\n while True:\n self._check_event()\n self._update_screen()", "def run(self):\r\n\r\n # If any of the test constructors update the settings, reflect\r\n # those changes on the GUI before running\r\n if GUIEnabled:\r\n self.gui_table.updateGUI(self.settings)\r\n self.clock = pygame.time.Clock()\r\n self.screen.fill((0, 0, 0))\r\n\r\n # Run the simulation loop\r\n self.SimulationLoop([0, 0, 0])\r\n\r\n if GUIEnabled and self.settings.drawMenu:\r\n self.gui_app.paint(self.screen)\r\n\r\n pygame.display.flip()\r\n self.clock.tick(self.settings.hz)\r\n self.fps = self.clock.get_fps()", "def run(self):\n for move in self.model.moves:\n pygame.event.get()\n self.model.update_matrix(self.model.board.cars[move[0]], move[1])\n self.setup()\n time.sleep(0.5)\n \n time.sleep(5)\n pygame.quit", "def Run():\r\n pass", "def run_episode(self):\n self.pygame_clock = pygame.time.Clock()\n while True:\n pygame.event.pump()\n is_human_agent = isinstance(self.agents[self.env.turn], HumanAgent)\n\n # handle exit event\n self.handle_input_event()\n\n # pick the next action\n if is_human_agent:\n self.handle_input_event()\n else:\n self.ai_event()\n self.place_a_disk()\n self.render()\n\n if self.event == Event.END_GAME:\n pygame.time.wait(self.END_GAME_DELAY)\n\n if self.event == Event.END_GAME_VIEW:\n pygame.time.wait(self.END_GAME_VIEW_DELAY)\n break", "def run_game(self):\n while True:\n self._check_events()\n self.update_screen()", "def main():\r\n\r\n pygame.init()\r\n pygame.display.init()\r\n\r\n # Set the pygame clock\r\n clock = pygame.time.Clock()\r\n\r\n pygame.display.set_caption(\"Blackbox game\")\r\n current_game = BlackBoxGame()\r\n clock = pygame.time.Clock()\r\n\r\n while True:\r\n current_game.check_events()\r\n clock.tick(60)\r\n current_game.update_screen()\r\n\r\n pygame.quit()", "def run_graphics():\n # Initialize Pygame window, fill it with solid color.\n pygame.init()\n screen = pygame.display.set_mode(SCREEN_RESOLUTION)\n pygame.display.set_caption(\"Nyarmageddon's Hexmaps\")\n screen.fill(color=BG_COLOR)\n\n my_map = _draw_map(screen)\n\n # Main loop.\n while True:\n # React to user actions, such as key presses.\n _handle_events(screen, my_map)\n\n # Update the screen.\n pygame.display.flip()", "def GAME_LOOP():\n pass", "def execute(self):\n\n # Set the value for the game loop to True.\n self.__running = True\n\n # Determines if the application delete the game instance or not.\n # When user loose and go back to the main menu, we can remove\n # the game resource.\n delete_on_leaving = False\n\n while self.__running:\n pygame.event.pump()\n event = pygame.event.wait()\n\n # Send events to the handler.\n self.on_event(event)\n\n # Perform checks about walls and items.\n self.on_loop()\n\n # Render graphics.\n self.render()\n\n if self.guardian.alive():\n # MacGyver's in front of the guardian.\n if self.macgyver.rect in self.guardian.adjacent_tiles:\n self.mixer.play_sound('wilhelm_scream')\n\n # Calculates whether MacGyver\n # will die or put the guardian to sleep.\n self.__running = self.guardian.is_beatable(self.macgyver)\n\n if not self.macgyver.alive():\n # MacGyver is dead, display the defeat screen.\n next_action = defeat_screen(self.screen, self.mixer)\n\n delete_on_leaving = \\\n self.handle_next_action(next_action)\n\n if self.macgyver.coordinates == self.finish_point:\n # MacGyver win, display the victory screen.\n next_action = victory_screen(self.screen, self.mixer)\n\n delete_on_leaving = self.handle_next_action(next_action)\n\n return delete_on_leaving", "def run(self):\n self.initialise()\n self.setup_disks()\n self.solve_puzzle()\n input('Finished. Press ENTER to exit.')", "def main(cls):\n parser = optparse.OptionParser()\n parser.add_option('-c', '--columns', type=\"int\", default=16)\n parser.add_option('-r', '--rows', type=\"int\", default=16)\n parser.add_option('-m', '--mines-density', type=\"float\", default=0.2,\n help=\"percent of mines: 0.15 is trivial, 0.2 good [default], 0.25 hard\")\n (options, args) = parser.parse_args()\n if args:\n parser.error(\"unexpected arguments: \" + \" \".join(args))\n \n game = cls(options.columns, options.rows, options.mines_density)\n game.window.mainloop()", "def run(self):\n if self.next_state == \"initialize_rexarm\":\n self.initialize_rexarm()\n\n if self.next_state == \"idle\":\n self.idle()\n\n if self.next_state == \"estop\":\n self.estop()\n\n if self.next_state == \"execute_tp\":\n self.execute_tp()\n\n if self.next_state == \"execute\":\n self.execute()\n\n if self.next_state == \"calibrate\":\n self.calibrate()\n\n if self.next_state == \"manual\":\n self.manual()\n\n if self.next_state == \"learn\":\n self.learn()\n\n if self.next_state == \"remember\":\n self.remember()\n\n if self.next_state == \"write\":\n self.write()\n\n if self.next_state == \"get_color\":\n self.get_color()\n\n if self.next_state == \"find_blocks\":\n self.find_blocks()\n\n # if self.next_state == \"dance\":\n # self.execute_dance()", "def main():\n\n # Create logging file, rotate if filesize exceeds 1MB\n logger.add(\"logs/{time}.log\", rotation=\"1 MB\")\n\n GameContainer()\n logger.info(\"Started the game launcher. Make sure to support pygame!\")", "def run(self):\n\n print 'Starting Event Loop'\n\n running = True\n # run until something tells us to stop\n while running:\n\n # tick pygame clock\n # you can limit the fps by passing the desired frames per seccond to tick()\n self.clock.tick(60)\n\n # handle pygame events -- if user closes game, stop running\n running = self.handleEvents()\n\n # update the title bar with our frames per second\n pygame.display.set_caption('Pygame Tutorial 4 - Breakout %d fps' % self.clock.get_fps())\n\n # render blocks\n self.blocks.clear(self.window, self.background)\n dirty = self.blocks.draw(self.window)\n\n # render everything else\n self.sprites.clear(self.window, self.background)\n dirty += self.sprites.draw(self.window)\n\n # draw a grid on our background\n self.drawGrid()\n\n # blit the dirty areas of the screen\n pygame.display.update(dirty) # updates just the 'dirty' areas\n\n print 'Quitting. Thanks for playing'", "def main():\n\n # This is for text mode.\n\n if len(sys.argv) == 2 and sys.argv[1] == '-t':\n model.main()\n sys.exit(0)\n\n # Do initialization.\n\n pygame.init()\n screen = pygame.display.set_mode(DISPLAY_MODE)\n pygame.display.set_caption(TITLE)\n clock = pygame.time.Clock()\n background = pygame.Surface(screen.get_size()).convert()\n background.fill(BACKGROUND)\n pygame.display.flip()\n\n game_model = model.Game()\n board_view = view.Board(game_model)\n score_board = view.ScoreBoard(game_model)\n rendering_groups = [board_view, score_board]\n\n while True:\n\n clock.tick(FRAMES_PER_SEC)\n scheduler.tick()\n\n # Handle user input.\n\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key in (K_ESCAPE, K_q) or event.type == QUIT:\n sys.exit(0)\n elif event.key == K_h:\n url = \"file://\" + os.path.abspath(data.find(\"help.html\"))\n webbrowser.open(url, new=True)\n elif event.key == K_r:\n game_model.reset()\n elif event.type == MOUSEBUTTONDOWN:\n for square_view in board_view:\n if square_view.rect.collidepoint(*pygame.mouse.get_pos()):\n xyz = square_view.square_model.xyz\n try:\n game_model.move(xyz)\n except ValueError:\n pass\n break\n\n # Provide the simulation and render it.\n\n for i in rendering_groups:\n i.update()\n i.clear(screen, background)\n pygame.display.update(i.draw(screen))", "def main():\n args = get_parser().parse_args()\n players = prepare_game(\n decks_count=args.decks,\n auto_mode=args.auto_mode,\n player_one_name=args.name_player,\n players_count=args.players,\n )\n game(players=players)", "def run(self):\n pygame.init()\n pygame.display.set_caption(\"Genetic Game\")\n self.screen = pygame.display.set_mode((self.SCREEN_W, self.SCREEN_H), 0, 32)\n\n self.ominus_sprites = [OminusSprite(self.screen, o, PLAYERS_COLORS[o.id]) for o in self.model.get_players()]\n for o in self.ominus_sprites:\n self.agent_group.add(o)\n\n self.wall_sprites = [WallSprite(self.screen, w) for w in self.model.get_walls()]\n for w in self.wall_sprites:\n self.terrain_group.add(w)", "def run(self):\n while True:\n if self.game_over: \n return \n\n self.handle_events() \n if self.paused:\n continue\n\n self.update_generation()\n self.draw_grid()\n\n self.cap_frame_rate()", "def emulator(filename):\n\n while True:\n log(\"dbg>\", end=\"\")\n if scripting_enabled():\n cmd = from_script()\n if len(cmd) == 0:\n continue\n else:\n cmd = input()\n if logging_enabled():\n log(cmd, echo_cmd = False)\n if cmd:\n cmd = cmd.strip()\n if cmd[0] == \"q\":\n break\n \n args = cmd.split()\n if args[0] in cmds:\n cmds[args[0]](cmd, len(args), args)\n elif args[0][0] == \"r\":\n cmds[\"r\"](cmd, len(args), args)\n else:\n log(\"Unsupported command. Try one of \" + \n str(list(cmds.keys())))\n log(\"Exiting\")\n return \"\"", "def main():\n # parse arguments from the command line (argparse validates arguments)\n args = _get_args()\n # build the environment with the given ID\n env = gym.make(args.env)\n # play the environment with the given mode\n if args.mode == 'human':\n play_human(env)\n else:\n play_random(env, args.steps)", "def run():\n\n args = parse_arguments()\n app = rummage_app.RummageApp(args)\n app.MainLoop()\n\n return 0", "def Main():\n EnigmaSim = simulation() #Creates the simulation object\n EnigmaSim.Run() #Runs the simulation", "def main():\n app = RunSnakeRunApp(0)\n app.MainLoop()", "def main():\n testlife = CellularAutomation()\n testlife.printParams()\n testlife.printLifeformsDir()\n testlife.printRuleset()\n testlife.printDisplay()\n \n params = getConfig()\n XRES = params[0]\n YRES = params[1]\n BLOCK_SIZE = params[2]\n DELTA_T = params[3]\n FCOLOR = params[4]\n BCOLOR = params[5]\n XMAX = XRES / BLOCK_SIZE\n YMAX = YRES / BLOCK_SIZE\n\n options, args = getOptions() #IGNORE:W0612\n if options.directory:\n printLifeformsDir()\n sys.exit(0)\n \n \n if options.ruleset == None:\n ruleset_string = 'B3/S23' # Conway's Life\n else:\n ruleset_string = options.ruleset\n ruleset_string = ruleset_string.upper()\n ruleset = createRuleset(ruleset_string)\n\n pygame.display.init()\n fullname = os.path.join('data', 'glider32x32.bmp')\n seticon(fullname)\n os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'\n screen = pygame.display.set_mode((XRES, YRES))\n pygame.display.set_caption('My So-Called Life')\n pygame.mouse.set_visible(1)\n \n pygame.time.set_timer(USEREVENT, DELTA_T) # change state\n\n #Create The Backgound\n background = pygame.Surface(screen.get_size())\n background = background.convert()\n background.fill(BCOLOR)\n \n #Display The Background\n screen.blit(background, (0, 0))\n pygame.display.flip()\n \n #Prepare Game Objects\n white_block = pygame.Surface((BLOCK_SIZE - 2, BLOCK_SIZE - 2))\n white_block.fill(FCOLOR)\n\n if options.random or \\\n (not options.random and options.filename == None) or \\\n options.filename == 'random.lif':\n blocks, ca_matrix = randomStart(XMAX, YMAX, BLOCK_SIZE)\n else:\n try:\n filename = options.filename\n startx = options.startx\n starty = options.starty\n blocks, ca_matrix = loadStart(filename, startx, starty, XMAX, YMAX, BLOCK_SIZE)\n #print \n except Usage, err:\n print >>sys.stderr, err.msg\n print >>sys.stderr, \"for help use --help\"\n return 2\n\n for block in blocks:\n screen.blit(white_block,block)\n pygame.display.flip() \n\n print 'Ruleset: ', ruleset_string, rulesetName(ruleset_string)\n\n generation = 0\n while 1:\n if options.sstep:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.display.quit()\n return\n elif event.type == KEYDOWN and event.key == K_SPACE:\n ca_matrix = updateCA_MATRIX(ca_matrix, XMAX, YMAX, ruleset)\n blocks = updateDisplay(ca_matrix, XMAX, YMAX, BLOCK_SIZE) \n screen.blit(background, (0, 0))\n for block in blocks:\n screen.blit(white_block,block)\n pygame.display.flip()\n generation += 1\n print 'Generation: ', generation\n elif event.type == KEYDOWN and event.key == K_ESCAPE:\n pygame.display.quit()\n return\n elif event.type == KEYDOWN and event.key == K_f:\n pygame.display.toggle_fullscreen()\n elif event.type == KEYDOWN and event.key == K_d:\n printLifeformsDir()\n elif event.type == KEYDOWN and event.key == K_s:\n options.sstep = False\n elif event.type == KEYDOWN and event.key == K_w:\n # open a file for output\n try:\n fullname = os.path.join('lifeforms', 'snapshot.cells')\n fout = open(fullname, \"w\")\n except IOError:\n print \"Error! Cannot open file\"\n print\n sys.exit(1)\n \n # get program version number\n id_list = __version__.split()\n version_num = id_list[2]\n fout.write('! Output from mylife.py version' + \\\n version_num + '\\n')\n fout.write('! XMAX: ' + str(XMAX) + '\\n')\n fout.write('! YMAX: ' + str(YMAX) + '\\n')\n line_list = []\n for row in range(YMAX):\n for col in range(XMAX):\n if ca_matrix[row][col] == 1:\n line_list.append('0')\n else:\n line_list.append('.')\n line_list.append('\\n')\n line = \"\".join(line_list)\n fout.write(line)\n line_list = []\n \n fout.close()\n print 'Screen written to file \"snapshot.cells\"'\n else:\n #Handle Input Events\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.display.quit()\n print generation, 'generations run.'\n return\n elif event.type == USEREVENT:\n ca_matrix = updateCA_MATRIX(ca_matrix, XMAX, YMAX, ruleset)\n blocks = updateDisplay(ca_matrix, XMAX, YMAX, BLOCK_SIZE) \n for block in blocks:\n screen.blit(white_block,block)\n elif event.type == KEYDOWN and event.key == K_ESCAPE:\n pygame.display.quit()\n print generation, 'generations run.'\n return\n elif event.type == KEYDOWN and event.key == K_d:\n printLifeformsDir()\n elif event.type == KEYDOWN and event.key == K_s:\n options.sstep = True\n\n #Draw Everything\n generation += 1\n screen.blit(background, (0, 0))\n for block in blocks:\n screen.blit(white_block,block)\n pygame.display.flip()", "def run(self):\n while True:\n if self.is_game_over():\n break\n self.run_turn()", "def run(self):\n self.run()", "def main(args):\n global numBirds, numToads\n if len(args) > 2:\n numBirds = int(args[1])\n numToads = int(args[2])\n gameLoop(GameManager(MapSize, numBirds, numToads))", "async def game(self):\n pass", "def start_game(self):\n\n\t\tpass", "def start_game(self):\n print(\"hi there, game started!\")\n self.draw()", "def main():\n\n window = ArcadeButWithStuff(screen_h=920, screen_w=1080)\n\n window.setup()\n arcade.run()", "def game_tick_run(self):\n pass", "def main(win):\n\tbird = Bird()\n\tpipes = []\n\n\tclock = pygame.time.Clock()\n\tlost = False\n\n\trun = True\n\twhile run:\n\t\tclock.tick(30)\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trun = False\n\t\t\t\tbreak\n\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tif event.key == pygame.K_SPACE:\n\t\t\t\t\tbird.jump()\n\t\t\t\t\tprint(\"jump\")\n\n\t\tbird.move()\n\n\t\tfor pipe in pipes:\n\t\t\tif pipe.collide(bird.x, bird.y):\n\t\t\t\tprint(\"bird hit pipe\")\n\t\t\t\tlost = True\t\n\n\t\tif lost:\n\t\t\tbreak\n\n\tbird.die()\n\tend_screen()", "def main(self,Surf):\n while True:\n if self.state == \"GAME\":\n self.event_loop()\n self.update(Surf)\n elif self.state == \"QUIT\":\n break\n pg.display.update()\n self.Clock.tick(65)" ]
[ "0.6874502", "0.6857758", "0.68047696", "0.6561532", "0.6534774", "0.64771336", "0.64398795", "0.63985527", "0.638074", "0.63475657", "0.6324274", "0.6295392", "0.6273534", "0.62623614", "0.6261345", "0.62363064", "0.6226019", "0.621045", "0.61986893", "0.6194582", "0.6194038", "0.6159195", "0.61590964", "0.6148583", "0.6139057", "0.6138463", "0.6130612", "0.6123007", "0.6114873", "0.61127114", "0.6111481", "0.61106676", "0.6093362", "0.60886526", "0.6080751", "0.6072416", "0.60651124", "0.6064644", "0.60598487", "0.6053626", "0.604685", "0.6036916", "0.6032295", "0.6018166", "0.6017024", "0.60113704", "0.599788", "0.5994185", "0.5988572", "0.5984766", "0.5977409", "0.5972793", "0.59628624", "0.595757", "0.5956754", "0.59566116", "0.5955825", "0.5955468", "0.5954678", "0.5951971", "0.5949825", "0.5949274", "0.5947864", "0.5946933", "0.59446985", "0.59399086", "0.59392065", "0.5939151", "0.59276974", "0.5924333", "0.5912324", "0.5907902", "0.59066963", "0.5905608", "0.5902494", "0.58979726", "0.58966297", "0.5889551", "0.58772177", "0.5876275", "0.5874447", "0.58709544", "0.58630955", "0.5859996", "0.5859406", "0.58451355", "0.5841975", "0.58412766", "0.58383083", "0.5826552", "0.5821601", "0.5819079", "0.58147866", "0.5812831", "0.58045846", "0.5802321", "0.5785588", "0.57759446", "0.5775591", "0.577215", "0.5769058" ]
0.0
-1
Quits the Mazescape game
def quit_game(self): pg.quit() sys.exit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _quit(self) -> None:\n self._show_bag(True)\n print(\"Thanks for playing!\")\n exit()", "def qpressed(): #QUITTNG FUNCTION\n #print(\"q pressed\")\n sys.exit()", "def __quit(self):\n self.clear_screen()\n self.__print_logo()\n print('\\n'*3)\n self.cool_print(\"THANKS FOR PLAYING!\")\n sleep(2)\n self.stop_game = True", "def quit_game(self):\n self.done = True", "def quit():\n #quits from python turtle graphics screen\n bye()", "def command_quit(self, arg):\n self.write('221 Bye', self.finish)", "def cmd_quit(args):", "def use(self):\n print(\"Type 'back' to go back.\")\n while True:\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == \"filled kettle\":\n print(\"You turn off the fire and find a burnt note with \"\n \"letters 'gjkh'. It looks like a password of some kind.\")\n break\n else:\n print(\"That is the wrong item!\")\n else:\n print(\"You have not found the item yet.\")", "def do_quit(self, arg):\n\n print('Good Bye!')\n exit()", "def quit_everything():\r\n \r\n pygame.quit()\r\n sys.exit()", "def bye(event=None):\r\n s_message.set(\"{quit}\")\r\n send()", "def quit():\n return 'quit'", "def do_q(self, arg):\n self.do_exit(arg)", "def Quit():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True", "def test_quit_game(run):\n out, _ = run(dork.cli.quit_game)\n assert \"Thank you\" in out", "def call_quit(self, _):\n return MENU_QUIT", "def call_quit(self, _):\n return MENU_QUIT", "def call_quit(self, _):\n return MENU_QUIT", "def quit(self):\n pass", "def quit(self):\n self.running = False\n pygame.quit()", "def quit():\r\n autoquit()", "def quit(self):\n self._pygame.quit()", "def quit(self):\n\t\tpass", "def on_actionQuit_triggered(self):\n\t\texit()", "def do_quit(self, arg):\n exit()", "def quitting(self):\n pass", "def do_quit(self, args):\n print('Good Bye!')\n exit()", "def stop(self): \r\n print(\"Thank you for playing the game, hope you had fun!!!\")\r\n self.reset()", "def do_quit(self, arg):\n return True", "def do_quit(self, arg):\n return True", "def do_quit(self, arg):\n return True", "def do_quit(self, arg):\n return True", "def quit():\n\tsys.exit()", "def command_quit(arguments):\n global quitting\n quitting = True\n return 'Now quitting'", "def clearing():\n choices = {\"Go to the \\033[1;32mfarmhouse\\033[0m\" : [\"farm\", \"house\", \"farmhouse\"],\n \"Go to the \\033[1;32mshed\\033[0m\" : [\"shed\"], \"Enter the \\033[1;32mforest\\033[0m\" : [\"forest\"]}\n print_pause(\"\\nYou are standing in a pleasant field on the outskirts\"\n \" of a forest.\")\n if \"clean up bottles\" not in actions:\n print_pause(\"The beauty of nature has been somewhat\"\n \" spoiled by the fact that someone left a bunch of\"\n \" bottles lying around the place.\")\n choices.update({\"Clean up \\033[1;32mbottles\\033[0m\" : [\"clean up\", \"bottles\"]})\n print_pause(\"To the right you see a large, old farmhouse.\")\n print_pause(\"Directly ahead is a small garden shed.\")\n print_pause(\"To your left stands a dark, forbodeing forest. \")\n if \"note\" not in inventory:\n print_pause(\"You notice what appears to be a note on the ground.\")\n choices.update({\"Pick up the \\033[1;32mnote\\033[0m\" : [\"note\"]})\n action = valid_input(choices)\n print(action)\n if action == \"Go to the farmhouse\":\n farmhouse()\n elif action == \"Go to the shed\":\n shed()\n elif action == \"Enter the forest\":\n forest(monster)\n elif action == \"Pick up the note\":\n print_pause(\"You pick up the hand-scribbled note and read it.\")\n print_pause(\"The note says:\\n\")\n print_pause(\"Come on up to the farm house\")\n print_pause(\" - Emily\")\n print_pause(\"p.s. - the magic word is XYZZY\\n\")\n inventory.append(\"note\")\n clearing()\n elif action == \"Clean up bottles\":\n print_pause(\"You pick up your discarded empties. Make sure you dispose\"\n \" of them in a recycling bin.\")\n actions.append(\"clean up bottles\")\n inventory.append(\"empty bottles\")\n clearing()", "def quit():\n raise EmbeddedConsoleExit", "def call_q(self, _):\n return MENU_GO_BACK", "def call_q(self, _):\n return MENU_GO_BACK", "def call_q(self, _):\n return MENU_GO_BACK", "def endGame(self):\n pass", "def OnQuit(self, e):\n\t\tself.EndRun()", "def do_quit(self, arg):\n cprint(('Thankyou for Using this todo Application!'), 'yellow')\n exit()", "def handler(state, _):\n if state[0] == 'u':\n player.quit()\n playlist.clear()\n print('Quitting manually.')", "def quit():\n while True:\n try:\n choice = input('press q to quit \\n r to restart')\n choice = choice.lower() # sanitize inputs before comparision\n\n except TypeError:\n print('Please enter q to quit or r to restart')\n if choice not in ('q', 'r'):\n continue\n else:\n break\n if choice == 'q':\n return True\n elif choice == 'r':\n return False", "def quit(self):\n return pygame.event.Event(pygame.QUIT)", "def end_game():\n pygame.quit()\n exit()", "def doQuit(self):\n self.protocol.sendPacket(networkpackets.PacketPokerTableQuit(**self._serial_and_game_id))", "def clickQuit(self, event):\n self.quitFlag = True", "def on_action_Quit_triggered(self):\n quit()", "def quit(self):\n self.quit = True", "def shutdown(self):\n ev3.Sound.speak(\"Goodbye\")", "def do_quit(self, arg):\n self.do_exit(arg)", "def bye_bye():\n\n os.system(\"clear\")\n print(\"Your opponent was too strong...\")\n time.sleep(1)\n print(\"You've should be better prepared...\")\n time.sleep(2)\n print()\n print()\n for i in range(20):\n os.system(\"clear\")\n print(\"\"\"\n * ) ( \n ( ( ( ` ( /( )\\ ) \n )\\ ) )\\ )\\))( ( )\\())( ( ( (()/( \n (()/( ((((_)( ((_)()\\ )\\ ((_)\\ )\\ )\\ )\\ /(_)) \n /(_))_)\\ _ )\\(_()((_|(_) ((_|(_)((_|(_)(_)) \n (_)) __(_)_\\(_) \\/ | __| / _ \\ \\ / /| __| _ \\ \n | (_ |/ _ \\ | |\\/| | _| | (_) \\ V / | _|| / \n \\___/_/ \\_\\|_| |_|___| \\___/ \\_/ |___|_|_\\ \"\"\")\n time.sleep(0.05)\n os.system(\"clear\")\n print(\"\"\"\n * ) ( \n ( ( ( ` ( /( )\\ ) \n )\\ ) )\\ )\\))( ( )\\())( ( ( (()/( \n (()/( ((((_)( ((_)()\\ )\\ ((_)\\ )\\ )\\ )\\ /(_)) \n /(_))_)\\ _ )\\(_()((_|(_) ((_|(_)((_|(_)(_)) \n (_)) __(_)_\\(_) \\/ | __| / _ \\ \\ / /| __| _ \\ \n | (_ |/ _ \\ | |\\/| | _| | (_) \\ V / | _|| / \n \\___/_/ \\_\\|_| |_|___| \\___/ \\_/ |___|_|_\\ \"\"\")\n time.sleep(0.05)\n os.system(\"clear\")\n print(\"\"\"\n * ) ( \n ( ( ( ` ( /( )\\ ) \n )\\ ) )\\ )\\))( ( )\\())( ( ( (()/( \n (()/( ((((_)( ((_)()\\ )\\ ((_)\\ )\\ )\\ )\\ /(_)) \n /(_))_)\\ _ )\\(_()((_|(_) ((_|(_)((_|(_)(_)) \n (_)) __(_)_\\(_) \\/ | __| / _ \\ \\ / /| __| _ \\ \n | (_ |/ _ \\ | |\\/| | _| | (_) \\ V / | _|| / \n \\___/_/ \\_\\|_| |_|___| \\___/ \\_/ |___|_|_\\ \"\"\")\n os.system(\"clear\")\n time.sleep(0.05)\n print(\"\"\"\n * ) ( \n ( ( ( ` ( /( )\\ ) \n )\\ ) )\\ )\\))( ( )\\())( ( ( (()/( \n (()/( ((((_)( ((_)()\\ )\\ ((_)\\ )\\ )\\ )\\ /(_)) \n /(_))_)\\ _ )\\(_()((_|(_) ((_|(_)((_|(_)(_)) \n (_)) __(_)_\\(_) \\/ | __| / _ \\ \\ / /| __| _ \\ \n | (_ |/ _ \\ | |\\/| | _| | (_) \\ V / | _|| / \n \\___/_/ \\_\\|_| |_|___| \\___/ \\_/ |___|_|_\\ \"\"\")\n os.system(\"clear\")\n time.sleep(0.05)\n print(\"\"\"\n * ) ( \n ( ( ( ` ( /( )\\ ) \n )\\ ) )\\ )\\))( ( )\\())( ( ( (()/( \n (()/( ((((_)( ((_)()\\ )\\ ((_)\\ )\\ )\\ )\\ /(_)) \n /(_))_)\\ _ )\\(_()((_|(_) ((_|(_)((_|(_)(_)) \n (_)) __(_)_\\(_) \\/ | __| / _ \\ \\ / /| __| _ \\ \n | (_ |/ _ \\ | |\\/| | _| | (_) \\ V / | _|| / \n \\___/_/ \\_\\|_| |_|___| \\___/ \\_/ |___|_|_\\ \"\"\")\n time.sleep(0.05)", "def do_quit(self, args):\n quit()", "def end_pygame(self):\n pygame.quit()", "def try_again(self):\n font = pygame.font.Font(None, CASE_SIZE)\n text = font.render('Press (y) to play again or' \\\n , True,(255, 255, 255), (0, 0, 0))\n self.screen.blit(text,(CASE_SIZE * 4, CASE_SIZE * 8))\n font = pygame.font.Font(None, CASE_SIZE)\n text = font.render('press (n) to quit' \\\n , True,(255, 255, 255), (0, 0, 0))\n self.screen.blit(text,(CASE_SIZE * 5, CASE_SIZE * 9))", "def on_click(self):\n arcade.play_sound(button, volume=constants.MUSIC_VOLUME / 40)\n\n global success\n global fails\n if success or fails == 20:\n reset_global_variables()\n self.minigame.window.show_view(self.minigame.main_view)\n else:\n self.minigame.window.show_view(self.minigame.main_view)\n print(f\"Exit Button.\")", "def play_DQN_game(self):\n self.reset()\n while(not self._exit):\n pg.event.pump()\n self.clock.tick(self.actions_per_second)\n self.check_for_exit()\n self.handle_keyboard_input()\n self.perform_DQN_actions()\n self.check_for_end_game()\n self.render()\n\n self.cleanup()", "def use(self):\n while True:\n print(\"Type 'back' to go back.\")\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == \"bronze key\":\n print(\"You open the door and step outside.\")\n jt = Outside('outside')\n jt.just_there()\n else:\n print(\"That is the wrong item!\")\n else:\n print(\"You have not found the item yet.\")", "def do_quit(self, args):\n return True", "def do_quit(self, args):\n return True", "def play():\n\n while True:\n print(\"Press any key to pick a piece (or q to quit): \")\n user_input = getch.getch()\n clear_screen()\n\n if user_input != 'q':\n current_piece = choice(list(pieces.keys())) \n print(current_piece, ': ', pieces[current_piece]['icon'], '\\n', sep=\"\")\n print(pieces[current_piece]['move'], '\\n')\n else:\n print(\"Thanks for trying No Stress Chess®\")\n break", "def play_game():\n pass", "def run(self):\n r = self.engine.run()\n while r != QUIT_FLAG:\n if r == SWITCH_FLAG:\n if self.engines.index(self.engine) < len(self.engines) - 1:\n self.engine = self.engines[self.engines.index(self.engine) + 1]\n print self.engines.index(self.engine)\n self.e_e.reset_input()\n else:\n self.engine = self.engines[0]\n r = self.engine.run()\n pygame.quit()\n raise SystemExit", "def quit(self, *args, **kwargs):\n pass", "def do_quit(self, arg):\n # clear terminal first.\n os.system('cls' if os.name == 'nt' else 'clear')\n print colored.red('Goodbye.', bold=12)\n time.sleep(3) # delays for 3 seconds\n # clear terminal first.\n os.system('cls' if os.name == 'nt' else 'clear')\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' oooo$$$$$$$$$$$$oooo', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' oo$$$$$$$$$$$$$$$$$$$$$$$$o', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' oo$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o o$ $$ o$', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' o $ oo o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o $$ $$ $$o$', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' oo $ $ \"$ o$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$$o $$$o$$o$', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' \"$$$$$$o$ o$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$o $$$$$$$$', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' $$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$$$$$$$$$$$$$', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' $$$$$$$$$$$$$$$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$$$$$$$ \"\"\"$$$', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' \"$$$\"\"\"\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ \"$$$', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' $$$ o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ \"$$$o', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' o$$\" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$$o', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' $$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\" \"$$$$$$ooooo$$$$o', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' o$$$oooo$$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ o$$$$$$$$$$$$$$$$$', bold=12)\n time.sleep(0.25) # delays for 0.3 seconds\n print colored.magenta(' $$$$$$$$\"$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$$$\"\"\"\"\"\"\"\"', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' \"\"\"\" $$$$ \"$$$$$$$$$$$$$$$$$$$$$$$$$$$$\" o$$$', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' \"$$$o \"\"\"$$$$$$$$$$$$$$$$$$\"$$\" $$$', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' $$$o \"$$\"\"$$$$$$\"\"\"\" o$$$', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' $$$$o oo o$$$\"', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' \"$$$$o o$$$$$$o\"$$$$o o$$$$', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' \"$$$$$oo \"\"$$$$o$$$$$o o$$$$\"\"', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' \"\"$$$$$oooo \"$$$o$$$$$$$$$\"\"\"', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' \"\"$$$$$$$oo $$$$$$$$$$', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' \"\"\"\"$$$$$$$$$$$', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' $$$$$$$$$$$$', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' $$$$$$$$$$\"', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.magenta(' \"$$$\"\"\"\"', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' ___ ___', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' /\\ \\ ___ /\\ \\\\', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' \\:\\ \\ /\\ \\ /::\\ \\\\', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' \\:\\ \\ \\:\\ \\ /:/\\:\\ \\\\', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' /::\\ \\ /::\\__\\ /::\\~\\:\\ \\\\', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' /:/\\:\\__\\ __/:/\\/__/ /:/\\:\\ \\:\\__\\\\', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' /:/ \\/__/ /\\/:/ / \\/__\\:\\/:/ /', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' /:/ / \\::/__/ \\::/ /', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' \\/__/ \\:\\__\\ /:/ /', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' \\/__/ /:/ /', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n print colored.cyan(' \\/__/', bold=12)\n time.sleep(0.3) # delays for 0.3 seconds\n exit()", "def call_quit(self, _):\n return True", "def call_quit(self, _):\n return True", "def call_quit(self, _):\n return True", "def bandit_camp_travel():\n\n os.system(\"clear\")\n print(\"You are about to sail to the Bandit Camp.\")\n time.sleep(2)\n print(\"After starting this event you will have to face very powerful enemies, without ability to escape.\")\n time.sleep(2)\n player_choice = input(\"Are you sure you want to go to Bandit Camp? y/n \")\n if player_choice == y:\n bandit_camp_bagin()", "def terminate():\r\n pygame.quit()\r\n os._exit(1)", "def rock():\n typer.echo(\"🤖🤘\")", "def quit(phenny, input):\n # Can only be done in privmsg by the owner\n if input.sender.startswith('#'): return\n if input.owner: \n phenny.write(['QUIT'])\n __import__('sys').exit(0)", "def terminate():\n pygame.quit()\n sys.exit(0)", "def character(game):\n\n while True:\n game.window.clear()\n\n game.window.addstr('{} the level {} adventurer'.format(game.player.name,\n game.player.level()))\n game.window.addstr('\\n\\nWielding a {} in the right hand'.format(\n game.player.get_slot('right hand').name))\n game.window.addstr('\\n\\n{} hp'.format(game.player.health))\n\n key = game.window.getkey()\n\n if key == 'q':\n break", "def quit():\n print(\"Thank you. Have a nice day:\")\n sys.exit()", "def main():\n player = Player(LivingRoom())\n escaping = True\n\n print('Alright kid, it\\'s you and me on a grand adventure. We\\'re '\n 'currently in the {}, and I can see {} possible exits. You can '\n 'search the room or try exploring, if you like.'\n .format(player.location.name, player.location.exits))\n\n while escaping:\n # need to replace hard list with extract from player.actions\n action = input('\\nWhat now?\\n\\n1. Search\\t2. Grab\\t3. Gurgle\\n>')\n\n if action in player.actions.keys():\n player.actions[action]()", "def next_action():\n while True:\n next = input('Enter Q to quit programme. M to return to main menu \\n')\n if next.lower() == 'q':\n logout()\n elif next.lower() == 'm':\n hr_main()\n is_invalid()", "def game(self):\n sender = self.sender()\n if(sender.text() == \" \"):\n sender.setText(\"x\" if self.firstPlayer else \"0\")\n self.firstPlayer = not(self.firstPlayer)\n res = self.checkForResult()\n if(res[0] == True):\n self.endGame(res[1])", "def Quit(self, event):\n pass", "def _exit_exam(self):\n self.finger.back()\n self._goto(\"exit_exam\")\n self.finger.back()", "def my_quit_function():\n pygame.quit()\n sys.exit()", "def vampire():\n\tprint \"\"\"\nThe vampire is sleeping. You walk up to it and can hear it breathe. You can\npoke it, or try and suck its blood.\n\"\"\"\n\t\t\n\twhile True:\n\t\tvampire_action = raw_input(\"> \")\n\t\t\n\t\tif vampire_action == \"poke\":\n\t\t\tvampire_awakens()\n\t\t\n\t\telif vampire_action == \"suck\":\n\t\t\tprint \"\"\"\nYou realize that's not how vampirism works.\n\"\"\"\n\t\t\tvampire()\n\t\t\t\n\t\telif vampire_action == \"blood\":\n\t\t\tprint \"\"\"You realize that's not how vampirism works.\n\"\"\"\n\t\t\tvampire()\n\t\t\n\t\telif vampire_action == \"back\":\n\t\t\tgreat_hall_return()\n\t\t\n\t\telse:\n\t\t\tprint \"I don't understand that.\"", "def shed():\n choices = {\"\\033[1;32mLeave\\033[0m the \\033[1;32mshed\\033[0m\" : [\"leave\", \"shed\"]}\n print_pause(\"\\nThis is a decrepit, ramshackle garden shed.\")\n print_pause(\"To the left you see an old wheelbarrow, piled high with\"\n \" rusted and useless tools.\")\n print_pause(\"To the right is a grimy window, through which you can see the\"\n \" farmhouse.\")\n if \"keys\" not in inventory:\n print_pause(\"A rather large set of keys is hanging on a hook by the\"\n \" door on a rusty nail.\")\n print_pause(\"It's a good thing your tetanus shot is up to date.\")\n choices.update({\"Pick up the \\033[1;32mkeys\\033[0m\" : [\"keys\", \"pick up keys\"]})\n if \"sword\" not in inventory:\n print_pause(\"An unusual glint among the rusty junk catches your eye.\")\n print_pause(\"Someone has jammed a sword in a barrel in the corner.\")\n choices.update({\"Pick up the \\033[1;32msword\\033[0m\" : [\"sword\", \"pick up sword\"]})\n action = valid_input(choices)\n if action == \"Leave the shed\":\n clearing()\n elif action == \"Pick up the keys\":\n print_pause(\"You carefully pick up the set of keys.\")\n inventory.append(\"keys\")\n shed()\n elif action == \"Pick up the sword\":\n print_pause(\"You pick up the sword.\")\n print_pause(\"Unlike everything else in here it's not covered\"\n \" in rust, and it seems pretty sharp.\")\n inventory.append(\"sword\")\n shed()", "def use(self):\n print_items()\n while True:\n print(\"Type 'back' to go back.\")\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == \"little key\":\n print(\"You open the cabinet door.\")\n print(\"In it, there is a golden key.\")\n gk = GoldenKey('golden key')\n gk.take()\n break\n else:\n print(\"That is the wrong item!\")\n else:\n print(\"You have not found the item yet.\")", "def stand(self):\n self.endgame()", "def shut_down():\n #return_to_center()\n stop_move()\n qpt.close()", "async def play(self, ctx, *args, **kwargs) -> bool:\n \n if not hasattr(self, \"question\"):\n await self.generate_question()\n \n alphabet = list(\"ABCD\")\n embed = ctx.bot.Embed(ctx, title=\"Geography Quiz!\", description=self.question + \"\\n\" + \"\\n\".join(\n [f\"{alphabet[choice]}. **{self.choices[choice]}**\" for choice in range(4)]\n ))\n response = await embed.send()\n message = Message(state=embed.ctx._state, channel=ctx.channel, data=response)\n del embed, response\n \n WaitFor = ctx.bot.WaitForMessage(ctx, check=(lambda x: x.channel == ctx.channel and x.author == ctx.author and len(x.content) == 1 and (x.content.upper() in alphabet)))\n _input = await WaitFor.get_message()\n del WaitFor\n \n if not message:\n await message.edit(embed=Embed(title=f'Quiz ended. No response from {ctx.author.display_name}.', color=Color.red()))\n return\n \n if alphabet.index(_input.content.upper()) == self.correct_order:\n await message.edit(embed=Embed(title=f'Congratulations! {ctx.author.display_name} is correct!', color=Color.green()))\n return True\n await message.edit(embed=Embed(title=f'Sorry, {ctx.author.display_name}! The answer is {alphabet[self.correct_order]}. {self.choices[self.correct_order]}', color=Color.red()))\n return False", "def menu_quit():\n return \"Quit\"", "def win(self):\n self.die()", "def quit(self, reason=\"\", *args, **kwargs):\n pass", "def Quit(self):\n loop.quit()", "def endGame(self):\n #self.active = False\n self.inGame = False\n self.hand = []\n self.position = None", "def do_quit(self,line):\n self.quit()", "def clearQuitFlag():\n simuConfig[\"FLAG.QUIT\"] = False", "def endgame(winner):", "def play_QLEARN_game(self):\n self.reset()\n table = qTable(self.grid.length, self.grid.height, 0.9, 0.9)\n self.num = 0\n while(not self._exit):\n pg.event.pump()\n self.clock.tick(self.actions_per_second)\n self.check_for_exit()\n self.handle_keyboard_input()\n # performs the Q learning for the snake\n self.perform_QLearn_actions(table)\n self.check_for_end_game()\n self.render()\n\n self.cleanup()", "def do_quit(self, line):\r\n print(\"Quitting......\")\r\n return True", "def use(self):\n while True:\n print(\"Type 'back' to go back.\")\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == \"golden key\":\n print(\"You open the door.\")\n rm = Rooms(\"lilian's office\")\n rm.room_wall()\n Lo = LiOffice(\"lilian's office\")\n Lo.choose_wall()\n else:\n print(\"That is the wrong item!\")\n else:\n print(\"You have not found the item yet.\")", "def quit_program():\n print(\"Thank you for your time. Goodbye.\")\n exit()" ]
[ "0.6897181", "0.6780871", "0.6693137", "0.65562737", "0.65255046", "0.6351009", "0.6307511", "0.6168882", "0.61455756", "0.61273867", "0.61242676", "0.61096215", "0.60773265", "0.60688776", "0.6056135", "0.6048833", "0.6048833", "0.6048833", "0.6034417", "0.6029101", "0.60159624", "0.6003893", "0.60031277", "0.59841174", "0.5980228", "0.59669524", "0.59608483", "0.59502816", "0.5948639", "0.5948639", "0.5948639", "0.5948639", "0.59480983", "0.5943426", "0.5938676", "0.5918651", "0.59120685", "0.59120685", "0.59120685", "0.5893351", "0.58932525", "0.5891505", "0.58905727", "0.5886378", "0.58743805", "0.5859335", "0.5850415", "0.58426386", "0.5841971", "0.5838983", "0.58103395", "0.58092624", "0.57801396", "0.5776708", "0.577376", "0.57733625", "0.5772543", "0.5766754", "0.575507", "0.5745543", "0.5745543", "0.57413673", "0.57391894", "0.57338464", "0.57197267", "0.5718517", "0.5718267", "0.5718267", "0.5718267", "0.5708302", "0.5699559", "0.56964856", "0.5694851", "0.569434", "0.5693886", "0.56784225", "0.5675025", "0.5670513", "0.56626743", "0.56452453", "0.5643603", "0.5641273", "0.5637594", "0.5631919", "0.5630464", "0.56285", "0.56254345", "0.5616696", "0.5602945", "0.55883986", "0.558819", "0.5579305", "0.5572352", "0.55709726", "0.55658954", "0.556435", "0.5561648", "0.5560369", "0.55594605", "0.5557952" ]
0.62268347
7
Catches all gamerelated events
def events(self): # catch all events here for event in pg.event.get(): if event.type == pg.QUIT: self.quit_game() if event.type == pg.KEYDOWN: if event.key == pg.K_ESCAPE: menu.paused = True menu.pause_menu() #code gets stuck in this call until a button is pressed in the pause menu self.clock=pg.time.Clock() if event.key == pg.K_h: self.draw_debug = not self.draw_debug if event.key == pg.K_o: if self.flashlight.on:#turning off flashlight self.darkness.on = True self.battery.duration-=pg.time.get_ticks()-self.battery.last_update self.flashlight.on=False else: #turning on flashlight self.darkness.on = False self.battery.last_update=pg.time.get_ticks() self.flashlight.on=True #darkness condition if self.transition: self.darkness_transition(self.player) self.kidnap(self.player) # win condition if pg.sprite.spritecollide(self.player, self.win, False, collide_hit_rect): menu.win_menu() #got hit condition hit=pg.sprite.spritecollide(self.player, self.threat, False, collide_hit2_rect) if hit: self.hit(self.player, hit[0]) #mirror self.portal(self.player) self.portal(self.monster)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __handle_events(self):\r\n for event in pygame.event.get():\r\n self.controller.handle_event(event)", "def process_events(self):\n gameevents = copy.copy(self.gameevents)\n del self.gameevents[:]\n while len(gameevents) > 0:\n currentevent = gameevents.pop(0)\n ticks = currentevent.ticks\n time = currentevent.time\n eid = currentevent.eid\n game = currentevent.game\n command = currentevent.command\n obj = currentevent.obj\n target = currentevent.target\n type = currentevent.type\n if self.config['Logging']['logging'] and currentevent.log:\n self.log.write(\"%s\\t%f\\t%d\\t%d\\t%d\\t%s\\t%s\\t%s\\n\" % (type, time, ticks, game, eid, command, obj, target))\n if command == \"press\":\n if obj == \"pause\":\n self.gametimer.pause()\n self.state = self.STATE_PAUSED\n elif obj == \"unpause\":\n self.state = self.STATE_PLAY\n self.gametimer.unpause()\n elif obj == \"quit\":\n self.lc.stop()\n elif obj == \"left\":\n self.ship.turn_left_flag = True\n elif obj == \"right\":\n self.ship.turn_right_flag = True\n elif obj == \"thrust\":\n self.ship.thrust_flag = True\n elif obj == \"fire\":\n self.ship.fire()\n elif obj == \"iff\":\n #print len(self.mine_list)\n #don't do anything if there's no mine on the screen\n if len(self.mine_list) == 0:\n pass\n elif self.mine_list[0].tagged == \"fail\":\n self.gameevents.add(\"tag\", \"already_failed\")\n elif self.mine_list[0].tagged == \"disable\":\n self.gameevents.add(\"tag\", \"already_disabled\")\n elif self.mine_list[0].tagged == \"tagged\":\n self.gameevents.add(\"tag\", \"already_tagged\")\n #if the mine is untagged and this is the first tap\n elif self.mine_list[0].tagged == \"untagged\" and self.mine_list.iff_flag == False:\n if self.score.iff in self.mine_list.foe_letters:\n self.gameevents.add(\"first_tag\", \"foe\")\n else:\n self.gameevents.add(\"first_tag\", \"friend_fail\")\n #if the mine is a foe, untagged, and this is the second tap, check timer, set intrvl\n elif self.mine_list[0].tagged == \"untagged\" and self.mine_list.iff_flag:\n self.score.intrvl = self.mine_list.iff_timer.elapsed()\n if (self.mine_list.iff_timer.elapsed() > self.config['Mine']['intrvl_min']) and (self.mine_list.iff_timer.elapsed() < self.config['Mine']['intrvl_max']):\n self.gameevents.add(\"second_tag\", \"foe\")\n else:\n self.gameevents.add(\"second_tag\", \"out_of_bounds\")\n elif obj == \"shots\":\n if not self.bonus_captured:\n self.bonus_captured = True\n if self.config['General']['bonus_system'] == \"standard\":\n #if current symbol is bonus but previous wasn't, set flag to deny bonus if next symbol happens to be the bonus symbol\n if (self.bonus.current_symbol == self.bonus.bonus_symbol) and (self.bonus.prior_symbol != self.bonus.bonus_symbol):\n self.bonus.flag = True\n self.gameevents.add(\"flagged_for_first_bonus\")\n if (self.bonus.current_symbol == self.bonus.bonus_symbol) and (self.bonus.prior_symbol == self.bonus.bonus_symbol):\n #bonus available, check flag to award or deny bonus\n if self.bonus.flag:\n self.gameevents.add(\"attempt_to_capture_flagged_bonus\")\n else:\n self.capturedBonuses += 1\n self.gameevents.add(\"shots_bonus_capture\")\n self.gameevents.add(\"score+\", \"shots\", self.config['Score']['bonus_missiles'])\n self.gameevents.add(\"score+\", \"bonus\", self.config['Score']['bonus_points'] / 2)\n self.bonus.flag = True\n else: #AX-CPT\n if self.bonus.axcpt_flag == True and (self.bonus.state == \"iti\" or self.bonus.state == \"target\") and self.bonus.current_pair == \"ax\":\n self.snd_bonus_success.play()\n self.capturedBonuses += 1\n self.gameevents.add(\"shots_bonus_capture\")\n self.gameevents.add(\"score+\", \"shots\", self.config['Score']['bonus_missiles'])\n if self.config['General']['next_gen']:\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['bonus_points'] / 2)\n else:\n self.gameevents.add(\"score+\", \"bonus\", self.config['Score']['bonus_points'] / 2)\n elif self.bonus.axcpt_flag:\n self.bonus.axcpt_flag = False\n self.snd_bonus_fail.play()\n self.gameevents.add(\"shots_bonus_failure\")\n if self.config['General']['next_gen']:\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['bonus_points'] / 2)\n else:\n self.gameevents.add(\"score-\", \"bonus\", self.config['Score']['bonus_points'] / 2)\n elif obj == \"pnts\":\n if not self.bonus_captured:\n self.bonus_captured = True\n if self.config['General']['bonus_system'] == \"standard\":\n #if current symbol is bonus but previous wasn't, set flag to deny bonus if next symbol happens to be the bonus symbol\n if (self.bonus.current_symbol == self.bonus.bonus_symbol) and (self.bonus.prior_symbol != self.bonus.bonus_symbol):\n self.bonus.flag = True\n self.gameevents.add(\"flagged_for_first_bonus\")\n if (self.bonus.current_symbol == self.bonus.bonus_symbol) and (self.bonus.prior_symbol == self.bonus.bonus_symbol):\n #bonus available, check flag to award or deny bonus\n if self.bonus.flag:\n self.gameevents.add(\"attempt_to_capture_flagged_bonus\")\n else:\n self.capturedBonuses += 1\n self.gameevents.add(\"pnts_pnts_capture\")\n self.gameevents.add(\"score+\", \"bonus\", self.config['Score']['bonus_points'])\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['bonus_points'])\n self.bonus.flag = True\n else: #AX-CPT\n if self.bonus.axcpt_flag == True and (self.bonus.state == \"iti\" or self.bonus.state == \"target\") and self.bonus.current_pair == \"ax\":\n self.snd_bonus_success.play()\n self.capturedBonuses += 1\n self.gameevents.add(\"pnts_bonus_capture\")\n if self.config['General']['next_gen']:\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['bonus_points'])\n else:\n self.gameevents.add(\"score+\", \"pnts\", self.config['Score']['bonus_points'])\n self.gameevents.add(\"score+\", \"bonus\", self.config['Score']['bonus_points'])\n elif self.bonus.axcpt_flag:\n self.bonus.axcpt_flag = False\n self.snd_bonus_fail.play()\n self.gameevents.add(\"pnts_bonus_failure\")\n if self.config['General']['next_gen']:\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['bonus_points'] / 2)\n else:\n self.gameevents.add(\"score-\", \"bonus\", self.config['Score']['bonus_points'] / 2)\n elif command == \"destroyed\":\n if obj == \"ship\":\n self.deaths += 1\n self.reset_position()\n self.reset_mines()\n elif command == \"bonus_available\":\n self.totalBonuses += 1\n elif command == \"first_tag\":\n if obj == \"foe\":\n self.mine_list.iff_flag = True\n self.mine_list.iff_timer.reset()\n elif len(self.mine_list) > 0:\n self.mine_list[0].tagged = \"fail\"\n elif command == \"second_tag\":\n self.mine_list.iff_flag = False\n if obj == \"foe\" and len(self.mine_list) > 0:\n self.mine_list[0].tagged = \"tagged\"\n elif command == \"release\":\n if obj == \"left\":\n self.ship.turn_left_flag = False\n elif obj == \"right\":\n self.ship.turn_right_flag = False\n elif obj == \"thrust\":\n self.ship.thrust_flag = False\n elif command == \"warp\":\n self.gameevents.add(\"score-\", \"pnts\", self.config['Score']['warp_penalty'])\n self.gameevents.add(\"score-\", \"flight\", self.config['Score']['warp_penalty'])\n elif command == \"activate\":\n if obj == \"bonus\":\n self.bonus.visible = True\n self.bonus.timer.reset()\n self.bonus.get_new_symbol()\n self.gameevents.add(\"new_bonus\", self.bonus.current_symbol, self.bonus.prior_symbol)\n if self.bonus.current_symbol == self.bonus.prior_symbol == self.bonus.bonus_symbol:\n self.gameevents.add(\"bonus_available\")\n #\"reset\" the bonus flag (which prevents premature capture) if symbol is not bonus\n if self.bonus.current_symbol != self.bonus.bonus_symbol:\n self.bonus.flag = False\n elif command == \"deactivate\":\n if obj == \"bonus\":\n self.bonus.visible = False\n self.bonus.timer.reset()\n elif command == \"spawn\":\n self.totalMines += 1\n self.mine_list.flag = True\n self.mine_list.timer.reset()\n self.mine_list.add()\n if self.mine_list[0].iff in self.mine_list.foe_letters:\n self.gameevents.add(\"new_mine\", \"foe\")\n else:\n self.gameevents.add(\"new_mine\", \"friend\")\n elif command == \"timeout\":\n self.mine_list.flag = False\n self.mine_list.iff_flag = False\n self.mine_list.timer.reset()\n if len(self.mine_list) > 0:\n del self.mine_list[0]\n self.score.iff = ''\n self.score.intrvl = 0\n self.gameevents.add(\"score-\", \"mines\", self.config['Score']['mine_timeout_penalty'])\n elif command == \"score++\":\n if obj == \"bonus_points\":\n self.gameevents.add(\"score+\", \"pnts\", int(target))\n elif command == \"score+\":\n self.score.__setattr__(obj, self.score.__getattribute__(obj) + float(target))\n if self.score.shots > self.config['Missile']['missile_max']:\n self.score.shots = self.config['Missile']['missile_max']\n elif command == \"score-\":\n self.score.__setattr__(obj, self.score.__getattribute__(obj) - float(target))\n elif command == \"collide\":\n self.process_collision(obj, target)\n elif command == \"joyaxismotion\":\n if obj == 0:\n self.ship.joy_turn = target\n elif obj == 1:\n self.ship.joy_thrust = target", "def _gather_events(self, newframe_event):\n if not self.closed:\n for pg_event in pg.event.get():\n event = self._pygame_event_to_event(pg_event)\n if event is not None:\n self.event_hub.raise_event(event)\n self._add_animation_events()", "def events(self):", "def events(self) -> None:\n\n for event in pg.event.get():\n if event.type == TIMER:\n if self.game_state == InGameState.RUNNING:\n self.timer -= 1\n else:\n self.display_timer -= 1\n\n if event.type == pg.QUIT:\n self.quit()\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n self.quit()", "def handleEvents(self, events):\n pass", "def event_loop(self):\n for event in pg.event.get():\n self.keys = pg.key.get_pressed()\n if event.type == pg.QUIT or self.keys[pg.K_ESCAPE]:\n self.done = True\n self.cannon.get_event(event, self.objects)", "def get_pygame_events(self):\n for event in pygame.event.get():\n if event.type in self.registered_pygame_handlers:\n for handler in self.registered_pygame_handlers[event.type]:\n\n if (event.type == pygame.KEYDOWN or\n event.type == pygame.KEYUP):\n handler(event.key, event.mod)\n else:\n handler()", "def visit_event(self, event):", "def event_loop(self):\n for event in pygame.event.get():\n self.scene.get_event(event)", "def slurp_events(self):\n while self.has_event():\n self.get_event()", "def _add_event_detect(self):\n for gpio_channel in self.registered_gpio:\n self.gpio_add_event_detect(gpio_channel, bouncetime=300)", "def check_events(rocket):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, rocket)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, rocket)", "def do_input_events(self):\r\n for event in EventStream.allNext(self.streams):\r\n if self.handler.event(event) and self.unhandledHandler:\r\n self.unhandledHandler(event)", "def on_event(self, event):\n pass", "def on_event(self, event):\r\n pass", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.set_selected(self.mouse_on_grid())\n if self.get_selected() is not None and event.type == pygame.KEYDOWN:\n self.event_seletect_moved(event)\n self.event_cell_update(event)", "def InitOtherEvents(self):\n\n pass", "def __handle_ingame_events(self, events: List[pygame.event.Event]) -> None:\n for event in events:\n if event.type == HIGHSCORE_NEW_PONIT:\n self.highscore += 0.5\n if event.type == PIPE_SPAWN:\n self.__spawn_pipe()", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)", "def process_event(self, event):\r\n pass", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def event(self,events):\n for event in events:\n if event.type == KEYDOWN:\n if event.key == K_RETURN:#starts the game\n self.game.gotoMain()\n #print \"r\"\n if event.key == K_ESCAPE:#quits the game\n sys.exit(0)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit() \n if event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)", "def process_events(self):\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n if event.type == pygame.MOUSEBUTTONDOWN:\n if self.game_over:\n self.__init__()\n\n return False", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n # if the exit button on screen is clicked close the program\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def event_receive(self,event):\n\n pass", "def handle_events(self):\n keys = pygame.key.get_pressed()\n if self.game_manager.game_state == GameState.Running:\n if self.arcade:\n self.game_manager.control_players_arcade(self.joysticks) \n else:\n self.game_manager.control_players(keys)\n elif self.arcade:\n self.ui.arcade_control(self.joysticks[1])\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.game_manager.game_state = GameState.Quit \n if self.game_manager.game_state == GameState.Finished or\\\n self.game_manager.game_state == GameState.Menu :\n if event.type == pygame.KEYDOWN and not self.arcade:\n self.ui.control(event.key)\n #self.start_new_game(GameMode.EatToSurvive)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_event(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_event(event)", "def handle_events(self):\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if self.state == FIRST_ENTER:\n if event.type == pygame.KEYDOWN and event.key == pygame.K_KP_ENTER:\n self.state = GAME\n self.player.event = event\n elif self.state == GAME:\n self.player.event = event\n elif self.state in [END, WIN]:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_KP_ENTER:\n self.state = GAME\n elif event.key == pygame.K_n:\n self.running = False", "def handleEvent(self, event):\n pass", "def on_event(self, event):", "def collect_new_events(self) -> list:\n self.logger.debug('Collecting new events...')\n events = self.build_events()\n if not events:\n self.logger.debug('No new events.')\n for event in events:\n self.logger.info('A new event has been detected: {}'.format(event))\n self._buffer_buisy_mutex.acquire()\n self._events_buffer.append(event)\n self._buffer_buisy_mutex.release()", "def collect_events(self, inputs, ew):\n input_module.collect_events(self, inputs, ew)", "def _check_events(self):\t\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tself._check_keydown_events(event)\n\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tself._check_keyup_events(event)\n\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tself._check_retry_button(pygame.mouse.get_pos())", "def handle_events(self):\n for event in pymlgame.get_events():\n if event.type == pymlgame.E_NEWCTLR:\n print('new ctlr with uid:', event.uid)\n elif event.type == pymlgame.E_KEYDOWN:\n if event.button == pymlgame.CTLR_UP:\n if self.snake.direction != DOWN:\n self.snake.direction = UP\n elif event.button == pymlgame.CTLR_DOWN:\n if self.snake.direction != UP:\n self.snake.direction = DOWN\n elif event.button == pymlgame.CTLR_LEFT:\n if self.snake.direction != RIGHT:\n self.snake.direction = LEFT\n elif event.button == pymlgame.CTLR_RIGHT:\n if self.snake.direction != LEFT:\n self.snake.direction = RIGHT\n elif event.type == pymlgame.E_PING:\n print('ping from', event.uid)", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n # Modify game response when player presses a key\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = True\n elif event.key == pygame.K_q:\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self.bomb_status = False\n self.settings.debomb()\n self._fire_bullet()\n mixer.music.load('E:\\Sky-Fall\\SOUNDS\\shots.ogg')\n mixer.music.play()\n elif event.key == pygame.K_b:\n self.bomb_status = True\n self.settings.bomb()\n self._fire_bullet() \n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)", "def _play_new_event(self, event: Event, event_types_listeners):\n for leaf in event_types_listeners[event.type]:\n if self._should_ignore_events_on_leaf(leaf, event_types_listeners):\n continue\n self.__try_register_freezer(event, leaf)\n leaf.handle_event(event)", "def handle_event(self, event):\n pass", "def notify_all(self, event: GameEvent):\n for listener in self._listeners:\n listener.notify(event)", "def handle_events(self):\n for_removal = []\n\n for event in self.time_events:\n if event.next < datetime.datetime.now():\n if event.type == \"periodic\":\n event.next += event.interval\n else:\n for_removal.append(event)\n try:\n event.action()\n except:\n self.log.exception(\"Error happened in a timed event\")\n\n for item in for_removal:\n self.time_events.remove(item)", "def _handleEvents(self):\n\n pygame.event.pump()\n keyboardState = pygame.key.get_pressed()\n for key in Game.BoundControls:\n Game.ControlState[Game.Controls[key]] = keyboardState[key]\n if Game.ControlState[Game.Quit]:\n self._quit()", "def logevents(self, events, request = None):\n for event in events:\n self.logevent(event, request)", "def check_events(self):\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n self.ai_game.quit()\r\n elif event.type == pg.KEYDOWN:\r\n self._check_keydown_events(event)\r\n elif event.type == pg.KEYUP:\r\n self._check_keyup_events(event)\r\n elif event.type == pg.MOUSEBUTTONDOWN:\r\n mouse_pos = pg.mouse.get_pos()\r\n self._check_button(mouse_pos)", "async def events(self) -> Iterable[Event]:", "def _processEvent(self):\n\t\ttry:\n\t\t\t# Run CUSUM+ to detect changes in level\n\t\t\tself.__FitEvent()\n\t\texcept:\n\t\t\traise", "def handle_event(self, event):", "def _check_events(self):\n for event in pygame.event.get():\n # quit stuff\n if event.type == pygame.QUIT:\n sys.exit()\n # mouse click for 'PLAY' button\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)\n\n # checks for key down/up events and sends it to appropriate method\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)", "def process(self, event):\n pass", "def hook_events(self):\n wxMediator.hook_events(self)\n EVT_MINE(self, wxEVT_SOCKET_DATA, self.on_data)\n EVT_MINE(self, wxEVT_NEW_LISTEN_CONN, self.new_listen_conn)\n EVT_MINE(self, wxEVT_NEW_TALK_CONN, self.new_talk_conn)", "def handle_events(self) -> None:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEMOTION:\n self.mouse_pos = event.pos\n elif event.type == MOUSEBUTTONDOWN:\n self.mouse_pos = event.pos\n self.mouse_clicked = True\n elif self._focused_button is not None and event.type == KEYDOWN:\n self._handle_key_press(event)", "def resetHandlers(self):\n def stop(e):\n raise StopIteration\n self._eventHandlers = {QUIT: stop}\n pygame.event.set_allowed(None) # this should block all event types\n self.addHandlers({}) # then add them back in selectively", "def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False", "def notify(self, event):\n\n if isinstance(event, TickEvent):\n # Called for each game tick. We check our keyboard presses here.\n\n # check for quit operation\n for event in pygame.event.get():\n keys = pygame.key.get_pressed()\n if keys[pygame.K_ESCAPE] or event.type == pygame.QUIT:\n self.eventManager.Post(QuitEvent())\n\n\n if isinstance(event, ParachutistReachedSeaLevelEvent):\n self.handleParachutistLanding(event)\n # position = self.model.getPosition()\n # if position[0] == event.xPosition:\n # self.eventManager.UnregisterListener(self)", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.window.open = False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n self.window.open = False\n if event.key == K_SPACE:\n self.restart()\n if event.key == K_f:\n self.window.switch(self.default_size)\n if event.type == VIDEORESIZE:\n self.window.screen = pygame.display.set_mode(\n (event.w, event.h), RESIZABLE)\n if event.type == MOUSEMOTION:\n pass\n if event.type == MOUSEBUTTONDOWN and event.button == 1:\n self.click(event.pos)", "def handle_events(self, events):\n for event in events:\n event_type = event['type']\n if event_type == types.SO_CHANGE:\n for key in event['data']:\n self.data[key] = event['data'][key]\n self.on_change(key)\n\n elif event_type == types.SO_REMOVE:\n key = event['data']\n assert key in self.data, (key, self.data.keys())\n del self.data[key]\n self.on_delete(key)\n\n elif event_type == types.SO_SEND_MESSAGE:\n self.on_message(event['data'])\n else:\n assert False, event", "def EventFrame (self):\n pass", "def process_events(self, events):\n for game_event in events:\n if game_event:\n game_event = self._send_event(game_event)\n if game_event:\n yield game_event", "def doEvent(self, source):\n pass", "def main(self):\n while 1:\n events = get_gamepad()\n for event in events:\n\n if(event.ev_type == \"Absolute\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.absolute_switch[ self.map[GAMEPAD][event.code] ](event.state)\n\n\n if(event.ev_type == \"Key\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.btn_switch[ self.map[GAMEPAD][event.code] ](self.map[GAMEPAD][event.code], event.state)\n \n\n\n\n #print(event.ev_type, event.code, event.state)", "def event_loop(self):\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.done = True\n elif event.type == pg.KEYDOWN:\n self.keys = pg.key.get_pressed()\n self.toggle_show_fps(event.key)\n elif event.type == pg.KEYUP:\n self.keys = pg.key.get_pressed()\n self.toggle_fullscreen(event.key)\n self._scene.get_event(event)", "def event_handler(self):\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == MOUSEBUTTONDOWN and event.button == LEFT_CLICK:\r\n self.left_mouse_down_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button == LEFT_CLICK:\r\n self.left_mouse_up_handler(event)\r\n elif event.type == MOUSEBUTTONDOWN and event.button == RIGHT_CLICK:\r\n self.right_mouse_down_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button == RIGHT_CLICK:\r\n self.right_mouse_up_handler(event)\r\n elif event.type == MOUSEMOTION:\r\n self.mouse_motion_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button in [2, 4, 5]:\r\n self.shortcut_click(event)", "def handle_event(context: GameContext, event: pygame.event.Event) -> None:\n handle_win(context, event)\n handle_bullet_hit(context, event)\n handle_bullets_fired(context, event)", "def parse_events(self, clock: pygame.time.Clock):\n events = pygame.event.get()\n key_pressed = pygame.key.get_pressed()\n for event in events:\n if event.type == pygame.QUIT or key_pressed[K_q] or key_pressed[K_ESCAPE]:\n return False, VehicleControl()\n if event.type == pygame.JOYHATMOTION:\n hori, vert = self.joystick.get_hat(0)\n if vert > 0:\n self.max_throttle = np.clip(self.max_throttle + self.gear_throttle_step, 0, 1)\n elif vert < 0:\n self.max_throttle = np.clip(self.max_throttle - self.gear_throttle_step, 0, 1)\n\n if hori > 0:\n self.steering_offset = np.clip(self.steering_offset + self.gear_steering_step, -1, 1)\n elif hori < 0:\n self.steering_offset = np.clip(self.steering_offset - self.gear_steering_step, -1, 1)\n\n if self.use_joystick:\n self.throttle, self.steering = self._parse_joystick()\n else:\n self.throttle, self.steering = self._parse_vehicle_keys(key_pressed)\n\n return True, VehicleControl(throttle=np.clip(self.throttle, -self.max_throttle, self.max_throttle),\n steering=np.clip(self.steering, -self.max_steering, self.max_steering))", "def event_pattern(self):\n pass # pragma: no cover", "def check_events(ai, var, screen, ship, shots, enemies, charges, shields, blasters, hub):\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tsys.exit()\r\n\t\telif event.type == pygame.KEYDOWN:\r\n\t\t\tkey_down(event, ai, var, screen, ship, shots, enemies, charges, shields, hub)\r\n\t\telif event.type == pygame.KEYUP:\r\n\t\t\tkey_up(event, ai, var, screen, ship, charges, shields, blasters, hub)", "def hook_events(self):\n EVT_MINE(self, wxEVT_CORRECT_UTTERANCE, self.on_correct_utterance)\n EVT_MINE(self, wxEVT_CORRECT_RECENT, self.on_correct_recent)\n EVT_MINE(self, wxEVT_REFORMAT_RECENT, self.on_reformat_recent)", "def _read_all_events(self):\n try:\n while True:\n data = self._f.read(struct.calcsize(JS_EVENT_FMT))\n jsdata = struct.unpack(JS_EVENT_FMT, data)\n self.__updatestate(jsdata)\n except IOError as e:\n if e.errno != 11:\n logger.info(str(e))\n self._f.close()\n self._f = None\n raise IOError(\"Device has been disconnected\")\n except TypeError:\n pass\n except ValueError:\n # This will happen if I/O operations are done on a closed device,\n # which is the case when you first close and then open the device\n # while switching device. But, in order for SDL2 to work on Linux\n # (for debugging) the device needs to be closed before it's opened.\n # This is the workaround to make both cases work.\n pass", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n return\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.running = False\n return\n elif event.key == pygame.K_LEFT:\n if (self.xv, self.yv) != (1, 0):\n self.xv, self.yv = -1, 0\n return\n elif event.key == pygame.K_RIGHT:\n if (self.xv, self.yv) != (-1, 0):\n self.xv, self.yv = 1, 0\n return\n elif event.key == pygame.K_UP:\n if (self.xv, self.yv) != (0, 1):\n self.xv, self.yv = 0, -1\n return\n elif event.key == pygame.K_DOWN:\n if (self.xv, self.yv) != (0, -1):\n self.xv, self.yv = 0, 1\n return", "def _on_walk(self):\n pass", "def _handle_events(screen, hexmap: HexMap):\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n # Exit on Escape or Q press.\n if event.key == pygame.K_ESCAPE or event.key == pygame.K_q:\n exit()\n # Redraw the map on R press.\n elif event.key == pygame.K_r:\n _draw_map(screen)\n # Print hex's coordinates on click.\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1: # LMB press\n tile = hexmap.pixel2hex(*event.pos)\n if tile:\n x, y = tile.doubled\n print(f\"Clicked tile at {(x, y)}. Neighbors:\")\n neighbors = hexmap.find_neighbors(tile)\n for neighbor in neighbors:\n x, y = neighbor.doubled\n print((x, y))\n print()", "def _event(self, level=None, message=None):\n for i in eventhandlers:\n if level == 'write':\n i.write( object_definition=self, message=message )\n else:\n i.debug( object_definition=self, message=message )", "def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keyDown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyUP_events(event)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)", "def unknown_event(self, event):\r\n pass", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.context.open=False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n self.context.open=False\n if event.key == K_SPACE:\n self.setMode((self.mode+1)%3)\n #if event.key == K_f:\n # pygame.display.toggle_fullscreen()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 4: self.context.draw.plane.zoom([1.1,1.1])\n if event.button == 5: self.context.draw.plane.zoom([0.9,0.9])", "def _check_for_events(self):\n\n for event in pygame.event.get():\n # if user exits out of window during pause menu\n if event.type == pygame.QUIT:\n sys.exit()\n\n # if user presses escape to unpause\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.__pauseMenuActive = False\n\n # if exit game button is pressed\n if self.__exitGameButton.isButton_pressed_event_handler(event) == True:\n sys.exit()\n\n # if resume button is pressed\n if self.__resumeButton.isButton_pressed_event_handler(event) == True:\n self.__pauseMenuActive = False\n\n # if main menu button is pressed\n if self.__mainMenuButton.isButton_pressed_event_handler(event) == True:\n self.__pauseMenuActive = False\n self.__toMainMenu = True", "def catchGameEvents(self, is_player, fpsclock, screen):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.exit()\n return True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n return self.pauseMenu(fpsclock, screen)\n if is_player:\n self.playEvents(event)\n return False", "def event11512060():\n header(11512060, 1)\n chr.disable(CHR.CapriciousThrall)\n end_if_this_event_on()\n end_if_event_flag_on(EVENT.CapriciousThrallDead)\n\n if_event_flag_on(0, EVENT.CapriciousThrallActive)\n chr.disable(CHR.SilverKnightArcherNearThrall)\n\n if_event_flag_on(1, EVENT.CapriciousThrallActive)\n if_host(1)\n if_player_inside_region(1, REGION.CapriciousThrallTrigger)\n if_condition_true(0, 1)\n\n # Ambush.\n flag.enable(EVENT.ThrallAmbushOngoing) # Ambush is ongoing. Note this MUST be enabled before the flag below.\n flag.enable(11512060) # One-off ambush is done.\n flag.enable(11502003) # Thrall won't appear in Sen's.\n flag.enable(11502004) # Thrall won't appear in Sen's.\n obj.enable(1511974)\n sfx.create_map_sfx(1511975)\n obj.enable(1511976)\n sfx.create_map_sfx(1511977)\n obj.enable(1511978)\n sfx.create_map_sfx(1511979)\n chr.enable(CHR.CapriciousThrall)\n anim.force_animation(CHR.CapriciousThrall, ANIM.ThrallAmbushAttack)\n wait(0.5)\n sound.enable_map_sound(1513804)\n boss.enable_boss_health_bar(CHR.CapriciousThrall, TEXT.CapriciousThrallName)\n wait(100.0) # Battle timer.\n end_if_event_flag_on(11512061) # Already dead and handled.\n boss.disable_boss_health_bar(CHR.CapriciousThrall, TEXT.CapriciousThrallName)\n sound.play_sound_effect(CHR.CapriciousThrall, SoundType.s_sfx, 777777777) # For effect.\n wait(3.0) # so sound effect can build up and slightly mask the abrupt music stop\n sound.disable_map_sound(1513804)\n anim.force_animation(CHR.CapriciousThrall, ANIM.ThrallRetreat)\n wait(1.4)\n chr.disable(CHR.CapriciousThrall)\n obj.disable(1511974)\n sfx.delete_map_sfx(1511975)\n obj.disable(1511976)\n sfx.delete_map_sfx(1511977)\n obj.disable(1511978)\n sfx.delete_map_sfx(1511979)\n message.status_explanation(TEXT.ThrallHasFled)\n flag.enable(11512008) # Message won't appear when you come back.", "def connect_default_events(self):\n self.connect_event('motion_notify_event', self.onmove)\n self.connect_event('button_press_event', self.press)\n self.connect_event('button_release_event', self.release)\n self.connect_event('draw_event', self.update_background)\n self.connect_event('key_press_event', self.on_key_press)\n self.connect_event('key_release_event', self.on_key_release)\n self.connect_event('scroll_event', self.on_scroll)", "def test_gameHandleEvents(self):\n # this kinda gonna be reiterating the other tests??\n # the tests of all the individual methods below make this test work\n pass", "def handle_event(self, event):\n self.give_sub_event.handle_event(event)", "def processEvents(self):\n self.framelist = sorted(self.framelist, key=lambda event: event.timestamp, reverse=True)\n self.framequeue = sorted(self.framequeue, key=lambda event: event.timestamp, reverse=True)\n self.packetqueue = sorted(self.packetqueue, key=lambda event: event.timestamp, reverse=True)\n \n print len(self.framequeue)\n print len(self.packetqueue)\n \n while len(self.framequeue) > 0 or len(self.packetqueue) > 0:\n self.getNextEvent().processEvent(self, self.decisionAlg)", "def check_events(rk_settings, screen, rock, bullets):\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tsys.exit()\r\n\t\t\r\n\t\telif event.type == pygame.KEYDOWN:\r\n\t\t\tcheck_keydown_events(event, rk_settings, screen, rock, bullets)\r\n\t\t\t\t\r\n\t\telif event.type == pygame.KEYUP:\r\n\t\t\tcheck_keyup_events(event, rock)", "def __show_all_events(self):\n for event in self.events_list:\n self.__print_events_info(event)\n print()", "def handle_new_events(self, events):\n for event in events:\n self.events.append(\n self.create_event_object(\n event[0],\n event[1],\n int(event[2])))", "def on_event(self, events):\n raise NotImplemented(\"on_event method should be implemented.\")", "def _notify_handlers(self):\n\n # Notify all handlers \n for handler_callback in self._registered_handlers:\n try:\n handler_callback(self._balloon_position)\n except Exception as e:\n # A receiver failed, catch and move on\n pass", "def _check_events(self):\n\t\t# Watch for keyboard and mouse events.\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tself._check_keydown_events(event)\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tself._check_keyup_events(event)\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tmouse_pos = pygame.mouse.get_pos()\n\t\t\t\tself._check_play_button(mouse_pos)", "def event0():\n header(0, 0)\n\n if DEBUG.GET_CHTHONIC_SPARK:\n item.award_item_to_host_only(1600)\n if DEBUG.GET_DARKMOON_SEANCE_RING:\n item.award_item_to_host_only(1600310)\n if DEBUG.GWYNDOLIN_DEAD:\n flag.enable(EVENT.GwyndolinDead)\n item.award_item_to_host_only(2600)\n if DEBUG.ORNSTEIN_AND_SMOUGH_DEAD:\n flag.enable(EVENT.OrnsteinAndSmoughDead)\n if DEBUG.DARK_ANOR_LONDO:\n flag.enable(EVENT.OrnsteinAndSmoughDead)\n flag.enable(EVENT.GwyndolinDead)\n flag.enable(EVENT.DarkAnorLondo)\n if DEBUG.GET_LAUTREC_BLACK_EYE_ORB:\n item.award_item_to_host_only(2034)\n if DEBUG.CAPRICIOUS_THRALL_ACTIVE:\n flag.enable(EVENT.CapriciousThrallActive)\n if DEBUG.GET_BUTTERFLY_SOUL:\n item.award_item_to_host_only(2530)\n item.award_item_to_host_only(0)\n if DEBUG.DISABLE_FOG_ARCHER:\n chr.disable(CHR.SilverKnightArcherNearBossFog)\n if DEBUG.JAREEL_DEAD:\n flag.enable(EVENT.JareelDead)\n\n skip_if_event_flag_off(1, EVENT.OrnsteinAndSmoughDead)\n map.register_bonfire(11510920, 1511950)\n for bonfire_flag, bonfire_id, kindle_level in zip((11510992, 11510984, 11510976), (1511960, 1511961, 1511962),\n (10, 0, 0)):\n map.register_bonfire(bonfire_flag, bonfire_id, initial_kindle_level=kindle_level)\n map.register_ladder(11510010, 11510011, 1511140)\n map.register_ladder(11510012, 11510013, 1511141)\n\n # Make elevator work immediately (and skip cutscene).\n flag.enable(11510305)\n\n flag.disable(11510304)\n skip_if_client(2)\n obj.disable(1511994)\n sfx.delete_map_sfx(1511995, False)\n obj.disable(1511310)\n for hitbox_id in (1513301, 1513302, 1513303):\n hitbox.disable_hitbox(hitbox_id)\n skip_if_event_flag_off(1, 11510300)\n skip_if_event_flag_off(6, 11510303)\n flag.disable(11510301)\n flag.disable(11510302)\n flag.enable(11510303)\n anim.end_animation(1511300, 53)\n hitbox.enable_hitbox(1513303)\n skip(13)\n skip_if_event_flag_off(6, 11510302)\n flag.disable(11510301)\n flag.enable(11510302)\n flag.disable(11510303)\n anim.end_animation(1511300, 50)\n hitbox.enable_hitbox(1513302)\n skip(6)\n skip_if_event_flag_off(5, 11510301)\n flag.enable(11510301)\n flag.disable(11510302)\n flag.disable(11510303)\n anim.end_animation(1511300, 51)\n hitbox.enable_hitbox(1513301)\n\n obj.disable(1511450)\n flag.disable(11510460)\n run_event_with_slot(11510090, 0, (1511700, 1511701, 1512600, 1512601))\n run_event_with_slot(11510090, 1, (1511702, 1511703, 1512602, 1512603))\n\n for event_id in (11515040, 11515041, 11515042):\n run_event(event_id)\n\n run_event(11510200) # Rotating lever to open palace.\n run_event(11510205) # (New) Rotating lever to open palace in Dark Anor Londo (Jareel must be dead).\n run_event(11510201) # Palace locked from the outside.\n run_event(11510100) # Break chandelier.\n run_event(11510210) # Open one-way gate to blacksmith.\n run_event(11510211) # Blacksmith gate is locked.\n run_event(11510220) # First activation of gondola. (Now pre-enabled.)\n run_event(11510300) # Main gondola activation.\n run_event(11510319) # Gondola flags.\n run_event(11510340) # Gondola navimesh.\n run_event(11510350) # Gondola sync.\n run_event(11510310) # Gondola lever can't be pushed.\n run_event(11515250) # Painting Guardian ambush.\n run_event(11515251) # Provoke a Silver Knight.\n run_event(11510110) # Open door to Sun Chamber. (Now requires key.)\n run_event(11510111) # (New) Sun Chamber is locked.\n run_event(11510400) # Trigger Dark Anor Londo.\n run_event(11510401) # Disable Darkmoon Tomb statue.\n run_event(11510230) # Enter Painted World if you have the Painted Doll.\n run_event(11510240) # Return to Sen's Fortress.\n run_event(11515050) # Offend Pale Demon and cut off Fortress return.\n run_event(11510120) # Enable special effect 4501 in Darkmoon Tomb.\n run_event(11510130) # (Updated) Control Dark Anor Londo enemies.\n # (Gone) Player always respawns at 'Anor Londo' bonfire in Dark Anor Londo.\n run_event(11510460) # Kneel to Darkmoon Covenant.\n run_event(11510462) # Two-frame sync for above.\n run_event(11510461) # Kneel to Darkmoon Covenant, simple version.\n run_event(11510140) # Move your bloodstain out of endless Gwyndolin corridor when you win.\n run_event(11510150) # Trigger flag for quivering Black Eye Orb.\n run_event(11512008) # (New) Message that Thrall has fled higher again.\n\n run_event(11512043) # (NEW) Monitor resting at Sun Chamber bonfire for warping (11512045).\n run_event(11512044) # (NEW) Monitor resting at Gwyn's Altar bonfire for warping (11512046).\n\n run_event(151)\n run_event(11510215)\n\n # Sentinel shield parts.\n for slot, sentinel_id in zip(range(14), range(1510400, 1510414)):\n run_event_with_slot(11515060, slot, (sentinel_id,))\n\n # Gargoyle tails removed.\n\n # One-way shortcut doors.\n run_event_with_slot(11510260, 0, (11510251, 1512251, 1512250), 'iii')\n run_event_with_slot(11510260, 1, (11510257, 1512253, 1512252), 'iii')\n run_event_with_slot(11510260, 2, (11510258, 1512255, 1512254), 'iii')\n\n # ORNSTEIN AND SMOUGH / GWYN, LORD OF LIGHT\n\n sound.disable_map_sound(1513800) # Ornstein and Smough.\n sound.disable_map_sound(1513805) # Gwyn.\n\n # GWYN:\n run_event(11512200) # Gwyn trigger.\n run_event(11512201) # Gwyn death.\n skip_if_event_flag_on(22, EVENT.AnorLondoGwynWarp) # Skip O&S events (light and dark). Keep an eye on length.\n\n skip_if_event_flag_off(10, EVENT.OrnsteinAndSmoughDead)\n # Already dead:\n anim.force_animation(1511401, 0, loop=True) # Start elevators\n anim.force_animation(1511402, 0, loop=True)\n run_event(11515392)\n for fog_wall, fog_sfx in zip((1511990, 1511992, 1511988), (1511991, 1511993, 1511989)):\n obj.disable(fog_wall)\n sfx.delete_map_sfx(fog_sfx, False)\n skip(11)\n\n # Alive:\n for relative_id in (5390, 5391, 5393, 5392, 1, 5394, 5395, 5396, 5397, 5398, 5399):\n run_event(BASE_FLAG + relative_id)\n\n # FORSAKEN KNIGHT ORNSTEIN & SUN-EATER SMOUGH\n\n run_event(11515492) # Trigger. Handles all other events within.\n run_event(11512001) # Die.\n\n # DARK SUN GWYNDOLIN\n\n sound.disable_map_sound(1513802)\n skip_if_event_flag_off(6, EVENT.GwyndolinDead)\n # Already dead:\n run_event(11515382)\n obj.disable(1511890)\n sfx.delete_map_sfx(1511891, False)\n obj.disable(1511892)\n sfx.delete_map_sfx(1511893, False)\n skip(13)\n # Alive:\n # Disable Jareel fog (otherwise visible in boss start cutscene).\n obj.disable(1511970)\n sfx.delete_map_sfx(1511971, False)\n obj.disable(1511972)\n sfx.delete_map_sfx(1511973, False)\n for relative_id in (5380, 5381, 5383, 5382, 900, 5384, 5385, 5386, 450):\n run_event(BASE_FLAG + relative_id)\n\n # NEW: Abyssal King Jareel.\n sound.disable_map_sound(1513803)\n skip_if_event_flag_off(6, EVENT.JareelDead)\n # Already dead:\n run_event(11515372)\n obj.disable(1511970)\n sfx.delete_map_sfx(1511971, False)\n obj.disable(1511972)\n sfx.delete_map_sfx(1511973, False)\n skip(7)\n # Alive:\n run_event(11515370)\n run_event(11515371)\n run_event(11515373)\n run_event(11515372)\n run_event(11515374)\n run_event(11515375)\n run_event(11510901)\n\n # Open three doors for enemies (I think).\n for relative_door_id, base_slot in zip((251, 257, 258), (0, 20, 40)):\n run_event_with_slot(11510710, base_slot, (BASE_FLAG + relative_door_id, 6750,\n 1512000 + relative_door_id, 1512000 + relative_door_id - 1))\n for i, relative_enemy_id in enumerate((300, 301, 302, 305, 320, 321, 322, # Silver Knights\n 323, 324, 325, 326, 327, 328, 329, 500,\n 177, 178, 179, 180, 181, 181, 182, 183, # Darkwraiths\n 184, 185, 186, 187, 188, 189, 190)):\n run_event_with_slot(\n 11510710, base_slot + i + 1, (BASE_FLAG + relative_door_id, 1510000 + relative_enemy_id,\n 1512000 + relative_door_id, 1512000 + relative_door_id - 1))\n\n # Mimic triggers.\n for slot, relative_mimic_id in enumerate(range(4)):\n run_event_with_slot(11515200, slot, (1510200 + relative_mimic_id,))\n run_event_with_slot(11515210, slot, (1510200 + relative_mimic_id,))\n run_event_with_slot(11515220, slot, (1510200 + relative_mimic_id,))\n run_event_with_slot(11515230, slot, (1510200 + relative_mimic_id,))\n run_event_with_slot(11515240, slot, (1510200 + relative_mimic_id, 1512010 + relative_mimic_id))\n run_event_with_slot(11510850, slot, (1510200 + relative_mimic_id,))\n run_event_with_slot(11515190, slot, (1510200 + relative_mimic_id,))\n\n # Treasure chests.\n for i in range(1, 21):\n if i == 12 or i == 19:\n continue\n run_event_with_slot(11510600, i, (1511650 + i, 11510600 + i))\n anim.end_animation(1511662, 0) # Gwyn's chest already looted\n # Only activate chapel chest before Dark Anor Londo (replaced by Mimic).\n skip_if_event_flag_on(1, EVENT.DarkAnorLondo)\n run_event_with_slot(11510600, 19, (1511669, 11510619))\n\n # Non-respawning enemies.\n run_event_with_slot(11510860, 0, (1510250, 0)) # Haunting Semblance\n run_event_with_slot(11510860, 3, (6640, 0)) # Dark Anor Londo Knight 1\n run_event_with_slot(11510860, 4, (6650, 0)) # Dark Anor Londo Knight 2\n run_event_with_slot(11510870, 0, (CHR.DarkmoonGuardian,))\n\n # NEW: Allied Silver Knights and Sentinels stop respawning in Dark Anor Londo if killed (unless Jareel is dead).\n for slot, enemy_id in enumerate(DarkAnorLondoAllies):\n run_event_with_slot(11512050, slot, (enemy_id,))\n run_event_with_slot(11512150, slot, (enemy_id,)) # They also turn hostile again if attacked in Dark AL.\n\n # NEW: Darkwraiths stop respawning in Dark Anor Londo if killed (unless Jareel is alive).\n for slot, enemy_id in enumerate(Darkwraiths):\n run_event_with_slot(11512100, slot, (enemy_id,))\n\n # NEW: Scripted rampart battle between archers and Darkwraiths. Also disables gravity\n # for the high archer.\n run_event(11512040)\n\n # NEW: Scripted battle between Darkwraith and Pale Demons.\n run_event(11512041)\n\n # NEW: Angry Giant Blacksmith in Dark Anor Londo.\n run_event(11512042)\n\n # NEW: Capricious Thrall one-off attack on the rooftop.\n sound.disable_map_sound(1513804)\n obj.disable(1511974)\n sfx.delete_map_sfx(1511975, False)\n obj.disable(1511976)\n sfx.delete_map_sfx(1511977, False)\n obj.disable(1511978)\n sfx.delete_map_sfx(1511979, False)\n run_event(11512060) # Trigger and timer.\n run_event(11512061) # Death.", "def event0():\n header(0, 0)\n end_if_client()\n\n if DEBUG.GET_MASTER_KEY:\n flag.disable(50004066)\n item.award_item_to_host_only(4073)\n if DEBUG.HAS_RUSTBONE:\n flag.enable(EVENT.HasBonerust)\n if DEBUG.SPEED_UP_PLAYER:\n chr.set_special_effect(CHR.Player, 2370)\n if DEBUG.GET_CHTHONIC_SPARK:\n flag.disable(50001510) # Thrall Spark drop flag.\n item.award_item_to_host_only(ITEMLOT.ThrallReward)\n\n for flag_id in (760, 762, 765):\n flag.disable(flag_id)\n\n # Display a message after an event flag is enabled (with optional delay).\n run_event_with_slot(260, 0, args=(11810000, 10010600, 0), arg_types='iif') # Arrival in Lordran.\n run_event_with_slot(260, 1, args=(257, 10010610, 0), arg_types='iif') # Rite of Kindling.\n run_event_with_slot(260, 2, args=(EVENT.ObtainedChthonicSpark, 10010620, 0), arg_types='iif') # Chthonic Spark.\n run_event_with_slot(260, 3, args=(11412053, 10010621, 0), arg_types='iif') # Chthonic Spark stolen.\n run_event_with_slot(260, 4, args=(EVENT.LordvesselReceived, TEXT.LordvesselWarpUnlocked, 0), arg_types='iif')\n\n # Assorted events (see documentation). Mostly monitoring states. 710 monitors warping ability.\n for event_id in (761, 763, 290, 701, 702, 717, 718,\n 706, 740, 750, 752, 757, 758, 759,\n 754, 770, 772, 730, 731, 766, 710):\n run_event(event_id)\n\n # Monitor Lord Souls/Shard possession. Doesn't include Dark Remnant.\n run_event_with_slot(711, 0, args=(2500, 711)) # Gravelord Nito\n run_event_with_slot(711, 1, args=(2501, 712)) # Bed of Chaos\n run_event_with_slot(711, 2, args=(2502, 713)) # Four Kings\n run_event_with_slot(711, 3, args=(2503, 714)) # Seath the Scaleless\n\n run_event(715) # Player has Gwyn's Soul.\n run_event(716) # Player has Sunlight Spear.\n run_event(11512000) # (New) Player has been given Lordvessel.\n\n # Monitor Estus upgrade level.\n for slot, args in enumerate(zip(range(202, 215, 2), range(203, 216, 2))):\n run_event_with_slot(8131, slot, args)\n\n run_event(819) # Monitor repair box sync.\n\n run_event(2540) # (New) Ring of the Embraced punishes you if removed.\n run_event(2541) # (New) Ring of Temptation activates after 15 seconds.\n run_event(2542) # (New) Ring of Temptation takes your souls and breaks if you die.\n run_event(2543) # (New) Ring of the Evil Eye kill reward.\n run_event(2544) # (New) Twilight Ring effect starts and ends.\n run_event(2545) # (New) Twilight Ring effect waxes and wanes.\n run_event(2546) # (New) Bond to Beyond has a 5% chance of giving one soft humanity.\n run_event(2547) # (New) Contract and heal Bonerust (11302050)\n run_event(2548) # (New) Kills heal with Nahr Alma pact.\n run_event(2549) # (New) Ring of Condemnation recharges.\n run_event(11502020) # (New) Lithic Witness event.\n run_event(11502023) # (New) Beyond Witness event.\n\n # (New) Toggles availability of full bonfire menu based on Spark possession.\n run_event(11512005)\n\n # BOSS DROPS\n\n for slot, args in enumerate((\n # boss_dead_flag, immediate_item_lot, delayed_item_lot_1, delayed_item_lot_2\n (2, ITEMLOT.AriamisReward, 9020, 9030),\n (11010901, ITEMLOT.TaurusDemonReward, 9000, 9030),\n (11010904, ITEMLOT.ProfaneImageReward, 0, 0),\n (3, ITEMLOT.BellGargoylesReward, 9020, 0),\n (4, ITEMLOT.CrossbreedPriscillaReward, 9020, 0),\n (11200900, ITEMLOT.MoonlightButterflyReward, 9000, 0),\n (11200901, ITEMLOT.GravestalkersReward, 9030, 0),\n (5, ITEMLOT.AbyssArtoriasReward, 9000, 0),\n (6, ITEMLOT.PinwheelReward, 9000, 9030),\n (7, ITEMLOT.NitoReward, 9000, 9030),\n (9, ITEMLOT.QuelaagReward, 9020, 0),\n (11410902, ITEMLOT.CeaselessDischargeReward, 9000, 9030),\n (11412055, ITEMLOT.JeremiahReward, 9000, 0),\n (11410901, ITEMLOT.CentipedeDemonReward, 9000, 9030),\n (10, ITEMLOT.BedOfChaosReward, 9000, 9030),\n (11, ITEMLOT.SensGolemReward, 9000, 0),\n (11510900, ITEMLOT.GwyndolinReward, 0, 0),\n (11510901, ITEMLOT.JareelReward, 0, 0),\n (11510902, ITEMLOT.OrnsteinReward, 9000, 0),\n (11510903, ITEMLOT.SmoughReward, 9000, 0),\n (11012012, ITEMLOT.ThrallReward, 0, 0),\n (13, ITEMLOT.FourKingsReward, 9010, 0),\n (14, ITEMLOT.SeathReward, 9000, 0),\n (11800001, ITEMLOT.GwynCinderReward, 0, 0),\n (16, ITEMLOT.AsylumDemonReward, 9000, 0),\n (11810901, ITEMLOT.StrayDemonReward, 9000, 9030),\n (11810902, ITEMLOT.AsylumTyrantReward, 9000, 9030),\n (11210000, ITEMLOT.SanctuaryGuardianReward, 9000, 0),\n (11210001, ITEMLOT.ArtoriasReward, 0, 0),\n (11212006, ITEMLOT.ManusReward, 9040, 0),\n (11210004, ITEMLOT.KalameetReward, 0, 0),\n (11212008, ITEMLOT.TwilightVagrantReward, 0, 0),\n (11512201, ITEMLOT.GwynLightReward, 0, 0),\n )):\n run_event_with_slot(1950, slot, args)\n\n # (New) Monitor Velka's pact. (1910 is enabled in Firelink Shrine.)\n run_event(1915) # Monitor pact breaking.\n run_event(1916) # Monitor Seath punishment.\n run_event(1917) # Monitor Nito punishment.\n run_event(1918) # Monitor Jeremiah punishment.\n\n # (New) Monitor challenge pacts.\n run_event(1900) # Kremmel.\n run_event(1901) # Zandroe.\n run_event(1902) # Caitha.\n run_event(1903) # Nahr Alma.\n run_event(1904) # Quella permanent Abyss warp.\n run_event(1905) # Monitor Etched Ring removal and curse player (non-Quella).\n run_event(1906) # Quella ring removal.\n\n run_event(1920) # (New) Return Xanthous Crown on next load when dropped. Uses 1921.\n run_event(1922) # (New) Warp to special Painted World event when Soul of Ariamis is consumed.\n run_event(1923) # (New) Award Chaos Fire Whip when Soul of the Exile is consumed.\n run_event(1924) # (New) Skeletons in Tomb go back to rest when you load a map other than Tomb or Catacombs.\n run_event(1925) # (New) Manages Dark Ember damage boost stacks.\n run_event(11025400) # (New) Manages Ruinous Hand kill charge-up.\n run_event(1926) # (New) Trigger Ruinous Hand explosion at full charge.\n run_event(1927) # (New) HP penalty for being hollow (25%).\n\n run_event(2510) # (New) Sable Rune control.\n run_event(2511) # (New) Lustrous Rune control.\n run_event(2512) # (New) Wraith Rune control.\n run_event(2513) # (New) Scintilla Rune control.\n run_event(2514) # (New) Omphalic Rune control.\n run_event(2515) # (New) Omphalic Rune kill counter and death trigger.\n run_event(2516) # (New) Pale White Rune control.\n run_event(2517) # (New) Reaper's Rune trigger.\n run_event(2518) # (New) Reaper's Rune kill counter.\n run_event(2519) # (New) Rhythm Rune triggers.\n run_event(2520) # (New) Ransackers Rune trigger.\n # (New) Ransackers Rune item map checks. (2521-2530) (No Kiln, no Asylum.)\n for slot, (block, area) in enumerate(((10, 0), (10, 1), (10, 2), (11, 0), (12, 0), (12, 1),\n (13, 0), (13, 1), (13, 2), (14, 0), (14, 1), (15, 0),\n (15, 1), (16, 0), (17, 0))):\n args = tuple([block, area] + [50000 + 100 * slot + 10 * i for i in range(0, 10)])\n run_event_with_slot(2521, slot, args=args, arg_types='BBiiiiiiiiii')\n \n # Activate Runes.\n for slot, rune in enumerate(range(9)):\n run_event_with_slot(2600, slot, args=(90 + rune, 11025350 + rune))\n\n # Monitor availability of bonfire options\n for slot, args in enumerate(zip(range(2600, 2610), range(250, 260))):\n run_event_with_slot(250, slot, args)\n\n # Remove Embers from inventory when given to blacksmiths. These are removed aggressively and repeatedly!\n for slot_args in zip((0, 1, 2, 6, 7, 8, 9, 10, 12),\n zip((350, 351, 352, 356, 357, 358, 359, 360, 362),\n (800, 801, 802, 806, 807, 808, 809, 810, 812))):\n run_event_with_slot(350, slot_args[0], slot_args[1])\n\n # (NEW) Chthonic Spark version of the above event, which also requires Vamos to be alive.\n run_event_with_slot(363, 0, args=(363, 813))\n\n # Monitor reinforcement material possession.\n for slot, args in enumerate(zip(range(1000, 1131, 10), range(780, 794))):\n run_event_with_slot(780, slot, args)\n\n # Monitor covenant membership.\n for slot, args in enumerate(zip(range(0, 10), range(850, 860))):\n run_event_with_slot(870, slot, args)\n\n # Covenant joining events. (args = trigger_flag, player_animation, rotation_target, looping_animation)\n for slot, args in enumerate(zip(range(840, 850), (7905, 7905, 7905, 7905, 7898, 7905, 7905, 7913, 7905, 7905),\n (6370, 6072, 6080, 6001, 10000, 6340, 6341, 10000, 6380, 1400700),\n (-1, -1, -1, -1, 7896, -1, -1, 7911, -1, -1))):\n run_event_with_slot(840, slot, args)\n\n # Monitor NG+ level. Uses flags 690 (NG) to 705 (NG+15).\n run_event_with_slot(690, 0, args=(600, 4, 16, 1175))\n\n run_event(719) # Monitor possession of any spell.\n run_event(720) # Monitor possession of any pyromancy.\n\n # Monitor whether shops are sold out.\n # NOTE: This all suggests that shopkeeper flags are in the 7000 range for their area. Avoid!\n run_event(721) # Big Hat Logan in Duke's Archives.\n run_event(722) # Quelana of Izalith.\n run_event(723) # Griggs at Firelink Shrine.\n run_event(724) # Male Undead Merchant. (I don't think this does anything.)\n run_event(725) # Checks if you've bought 2+ items from Logan in Duke's Archives.\n run_event(726) # Checks if you've bought 2+ items from Ingward in New Londo Ruins.\n run_event(727) # Checks flags in Ash Lake / Great Hollow. Not sure who this is.\n\n run_event(745) # Cut Shiva questline I think.\n run_event(818) # Black Eye Orb quivers in Anor Londo.\n run_event(810) # Monitor possession of Lautrec's Black Eye Orb.\n # Lautrec frees himself from New Londo if both item flags below are enabled.\n run_event_with_slot(812, 0, args=(51400150,)) # Monitor possession of Blighttown Fire Keeper Soul (moved).\n run_event_with_slot(812, 1, args=(51010050,)) # Monitor possession of Undead Parish Humanity (still on altar).\n run_event(822) # Disable flag 830 half a second after leaving the Kiln. (Frampt pickup.)\n run_event(823) # Disable flag 831 half a second after leaving the Kiln. (Kaathe pickup.)\n\n # (New) Monitor dead NPCs for Twilight Vagrant. Counts friendly or hollow death, unless noted otherwise.\n for slot, npc_dead_flag in enumerate((\n 1073, # 2051: Oscar (friendly) (must be enabled in tutorial)\n 1097, # 2052: Big Hat Logan\n 1115, # 2053: Griggs\n 1005, # 2054: Solaire (note this won't trigger if he is killed when Hollow, unlike other NPCs)\n 1254, # 2055: Laurentius\n 1462, # 2056: Crestfallen Warrior\n 1575, # 2057: Lautrec\n 1604, # 2058: Shiva\n 1628, # 2059: Patches\n 1899, # 2060: Havel\n 1864, # 2061: Ciaran (in Oolacile and/or with Nito)\n 1823, # 2062: Hawkeye Gough\n 5, # 2063: Artorias (in Darkroot)\n )):\n run_event_with_slot(11212050, slot + 1, args=(npc_dead_flag,))\n\n # (New) Monitor Tomb of the Giants presence to send Giant Skeletons back to sleep.\n run_event(11310201)\n\n # (New) Monitor picking up Chthonic Spark for the first time to display message.\n run_event(11512004)\n\n # EVENT REWARDS (covenants, storylines)\n\n run_event_with_slot(910, 0, args=(11400591, 1280)) # Joining Chaos Servants.\n run_event_with_slot(911, 0, args=(11010591, 1000, 1), arg_types='iiB')\n run_event_with_slot(911, 1, args=(11510590, 1010, 1), arg_types='iiB')\n run_event_with_slot(911, 2, args=(11700591, 1020, 1), arg_types='iiB')\n run_event_with_slot(911, 3, args=(11000591, 1030, 1), arg_types='iiB')\n run_event_with_slot(911, 4, args=(11400590, 1040, 1), arg_types='iiB')\n run_event_with_slot(911, 5, args=(11410594, 1050, 1), arg_types='iiB')\n run_event_with_slot(911, 6, args=(11020594, 1060, 1), arg_types='iiB')\n run_event_with_slot(911, 7, args=(11020595, 1070, 1), arg_types='iiB')\n run_event_with_slot(911, 8, args=(11810590, 1082, 1), arg_types='iiB')\n run_event_with_slot(911, 9, args=(11810591, 1080, 1), arg_types='iiB')\n run_event_with_slot(911, 10, args=(11510592, 1090, 1), arg_types='iiB')\n run_event_with_slot(911, 11, args=(11600592, 1100, 1), arg_types='iiB')\n run_event_with_slot(911, 12, args=(11020602, 1110, 1), arg_types='iiB')\n run_event_with_slot(911, 13, args=(11010594, 1120, 1), arg_types='iiB')\n run_event_with_slot(911, 14, args=(11010595, 1130, 1), arg_types='iiB')\n run_event_with_slot(911, 15, args=(11020599, 1140, 1), arg_types='iiB')\n run_event_with_slot(911, 16, args=(11020607, 1150, 1), arg_types='iiB')\n run_event_with_slot(911, 17, args=(11200592, 1160, 1), arg_types='iiB')\n run_event_with_slot(911, 18, args=(11200593, 1170, 1), arg_types='iiB')\n run_event_with_slot(911, 19, args=(11200594, 1180, 1), arg_types='iiB')\n run_event_with_slot(911, 20, args=(11300590, 1190, 1), arg_types='iiB')\n run_event_with_slot(911, 21, args=(11300591, 1200, 1), arg_types='iiB')\n run_event_with_slot(911, 22, args=(11310590, 1210, 1), arg_types='iiB')\n run_event_with_slot(911, 23, args=(11310592, 1220, 1), arg_types='iiB')\n run_event_with_slot(911, 24, args=(11310593, 1230, 1), arg_types='iiB')\n run_event_with_slot(911, 25, args=(11310594, 1240, 1), arg_types='iiB')\n run_event_with_slot(911, 26, args=(11320590, 1250, 1), arg_types='iiB')\n run_event_with_slot(911, 27, args=(11320581, 1260, 1), arg_types='iiB')\n run_event_with_slot(911, 28, args=(11320593, 1270, 1), arg_types='iiB')\n run_event_with_slot(911, 29, args=(11400592, 1290, 1), arg_types='iiB')\n run_event_with_slot(911, 30, args=(11400594, 1300, 1), arg_types='iiB')\n run_event_with_slot(911, 31, args=(11400596, 1310, 1), arg_types='iiB')\n run_event_with_slot(911, 32, args=(11400597, 1320, 1), arg_types='iiB')\n run_event_with_slot(911, 33, args=(11400598, 1330, 1), arg_types='iiB')\n run_event_with_slot(911, 34, args=(11400599, 1340, 1), arg_types='iiB')\n run_event_with_slot(911, 35, args=(11510595, 1350, 1), arg_types='iiB')\n run_event_with_slot(911, 36, args=(11510596, 1360, 1), arg_types='iiB')\n run_event_with_slot(911, 37, args=(11510597, 1370, 1), arg_types='iiB')\n run_event_with_slot(911, 38, args=(11600594, 1380, 1), arg_types='iiB')\n run_event_with_slot(911, 39, args=(11600595, 1390, 1), arg_types='iiB')\n run_event_with_slot(911, 40, args=(11600596, 1400, 1), arg_types='iiB')\n run_event_with_slot(911, 41, args=(11010598, 1410, 0), arg_types='iiB')\n run_event_with_slot(911, 42, args=(11210590, 1500, 1), arg_types='iiB')\n run_event_with_slot(911, 43, args=(11210593, 1510, 1), arg_types='iiB')\n run_event_with_slot(911, 44, args=(11210594, 1520, 1), arg_types='iiB')\n run_event_with_slot(911, 45, args=(11600580, 1401, 1), arg_types='iiB')\n run_event_with_slot(911, 46, args=(11600581, 1402, 1), arg_types='iiB')\n run_event_with_slot(911, 47, args=(11600582, 1403, 1), arg_types='iiB')\n run_event_with_slot(911, 48, args=(11600583, 1404, 1), arg_types='iiB')\n run_event_with_slot(890, 0, args=(11310580, 1221, 1), arg_types='iiB') # 911 ran out of slots (up against 960).\n run_event_with_slot(890, 1, args=(11510580, 1361, 1), arg_types='iiB')\n run_event_with_slot(890, 2, args=(11510581, 1371, 1), arg_types='iiB')\n run_event_with_slot(890, 3, args=(11320592, 1261, 1), arg_types='iiB')\n\n # DIRECT NPC DEATH REWARDS (960-969)\n run_event_with_slot(960, 0, args=(1315, 6180, 1100)) # Ingward (Key to the Seal)\n run_event_with_slot(960, 1, args=(1402, 6230, 6230)) # Undead Merchant (Orange Soapstone)\n # run_event_with_slot(960, 2, args=(1198, 6080, 1140)) # Petrus (Lift Chamber Key) (dies before killing Rhea)\n # run_event_with_slot(960, 3, args=(1196, 6080, 1140)) # Petrus (Lift Chamber Key) (dies after killing Rhea)\n\n # NEW GAME PLUS: Bring covenant ranks up to date, and prevent gifts from being re-awarded.\n run_event_with_slot(8200, 0, args=(3, 5500, 50000120, 11010594))\n run_event_with_slot(8200, 1, args=(3, 5510, 50000130, 11010595))\n run_event_with_slot(8200, 2, args=(2, 103, 50000160, 11200592))\n run_event_with_slot(8200, 3, args=(3, 240, 50000170, 11200593))\n run_event_with_slot(8200, 4, args=(2, 124, 50000180, 11200594))\n run_event_with_slot(8200, 5, args=(0, 453000, 50000220, 11310592))\n run_event_with_slot(8200, 6, args=(3, 5100, 50000225, 11310580))\n run_event_with_slot(8200, 7, args=(3, 5110, 50000230, 11310593))\n run_event_with_slot(8200, 8, args=(3, 114, 50000265, 11320581))\n run_event_with_slot(8200, 9, args=(3, 377, 50000260, 11320592))\n run_event_with_slot(8200, 10, args=(3, 378, 50000270, 11320593))\n run_event_with_slot(8200, 11, args=(3, 4500, 50000310, 11400596))\n run_event_with_slot(8200, 12, args=(3, 4520, 50000320, 11400597))\n run_event_with_slot(8200, 13, args=(3, 4510, 50000330, 11400598))\n run_event_with_slot(8200, 14, args=(2, 130, 50000350, 11510595))\n run_event_with_slot(8200, 15, args=(3, 113, 50000360, 11510596))\n run_event_with_slot(8200, 16, args=(2, 102, 50000365, 11510580))\n run_event_with_slot(8200, 17, args=(3, 5910, 50000370, 11510597))\n run_event_with_slot(8200, 18, args=(0, 1366000, 50000375, 11510581))\n run_event_with_slot(8200, 19, args=(0, 904000, 50000380, 11600594))\n run_event_with_slot(8200, 20, args=(3, 102, 50000390, 11600595))\n run_event_with_slot(8200, 21, args=(0, 210000, 50000400, 11600596))\n run_event_with_slot(8200, 22, args=(1, 40000, 50000410, 11600580))\n run_event_with_slot(8200, 23, args=(1, 41000, 50000420, 11600581))\n run_event_with_slot(8200, 24, args=(1, 42000, 50000430, 11600582))\n run_event_with_slot(8200, 25, args=(1, 43000, 50000440, 11600583))\n\n # Same as above, but for other special rewards.\n run_event_with_slot(8300, 0, args=(ItemType.good, 100, 50000000)) # White Sign Soapstone\n run_event_with_slot(8300, 1, args=(ItemType.good, 101, 51100330)) # Red Sign Soapstone\n run_event_with_slot(8300, 2, args=(ItemType.good, 102, 50000390)) # Red Eye Orb\n run_event_with_slot(8300, 3, args=(ItemType.good, 106, 11017020)) # Orange Guidance Soapstone\n run_event_with_slot(8300, 4, args=(ItemType.good, 108, 11607020)) # Book of the Guilty\n run_event_with_slot(8300, 5, args=(ItemType.good, 112, 11407080)) # Servant Roster\n run_event_with_slot(8300, 6, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n run_event_with_slot(8300, 7, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n run_event_with_slot(8300, 8, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n run_event_with_slot(8300, 9, args=(ItemType.good, 2508, 11007010)) # Unknown - seems unused.\n\n # NOTE: Flag 8310 onwards is used for NPC humanity registration.\n\n # Same as above for DLC items.\n run_event_with_slot(8090, 0, args=(ItemType.good, 510, 11217010))\n run_event_with_slot(8090, 1, args=(ItemType.good, 511, 11217020))\n run_event_with_slot(8090, 2, args=(ItemType.good, 512, 11217030))\n run_event_with_slot(8090, 3, args=(ItemType.good, 513, 11217040))\n run_event_with_slot(8090, 4, args=(ItemType.good, 514, 11217050))\n\n # (New) Same as above, but for Runes and other new items.\n run_event_with_slot(11022100, 0, args=(ItemType.good, 900, 51010020))\n run_event_with_slot(11022100, 1, args=(ItemType.good, 901, 51510690))\n run_event_with_slot(11022100, 2, args=(ItemType.good, 902, 51200120))\n run_event_with_slot(11022100, 3, args=(ItemType.good, 903, 51410030))\n run_event_with_slot(11022100, 4, args=(ItemType.good, 904, 51810080))\n run_event_with_slot(11022100, 5, args=(ItemType.good, 905, 51700020))\n run_event_with_slot(11022100, 6, args=(ItemType.good, 906, 51300220))\n run_event_with_slot(11022100, 7, args=(ItemType.good, 907, 51300221))\n run_event_with_slot(11022100, 8, args=(ItemType.good, 908, 51210290))\n run_event_with_slot(11022100, 9, args=(ItemType.ring, 133, 50000650)) # Velka gift (Ring of Condemnation)\n run_event_with_slot(11022100, 10, args=(ItemType.ring, 124, 50001780)) # Twilight Vagrant drop (Twilight Ring)\n run_event_with_slot(11022100, 11, args=(ItemType.ring, 105, 50004900)) # Lithic Bond\n run_event_with_slot(11022100, 12, args=(ItemType.ring, 107, 50004910)) # Serous Bond\n run_event_with_slot(11022100, 13, args=(ItemType.ring, 106, 50004920)) # Empyrean Bond\n run_event_with_slot(11022100, 14, args=(ItemType.ring, 108, 50004930)) # Bond to Beyond\n # Leaving slots 11022100-11022119 dedicated to this.\n\n # (NEW) Remove some additional new items in NG+.\n run_event_with_slot(11022120, 0, args=(ItemType.ring, 152)) # Ashen Ring\n run_event_with_slot(11022120, 1, args=(ItemType.ring, 151)) # Gwynevere's Ring\n run_event_with_slot(11022120, 2, args=(ItemType.good, 220)) # Silver Pendant\n run_event_with_slot(11022120, 3, args=(ItemType.armor, 294000)) # Xanthous Crown (true)\n run_event_with_slot(11022120, 4, args=(ItemType.ring, 149)) # Darkmoon Seance Ring", "def visit_group(self, group):\n for obj in self.event_json['events']:\n event_id = obj['id']\n event = self.world.events[event_id]\n group.add(event)", "def run():\n\n while True:\n\n # get event, blah\n event_name, event_data = revent.get_event(block=True, timeout=5)\n\n if event_name is not None:\n print 'received: %s' % event_name\n\n if event_name.endswith('_oembed_details'):\n handle_new_oembed_details(event_data)\n\n elif event_name == 'new_tweet':\n handle_new_tweet(event_data)\n\n # and we're done\n assert revent.verify_msg(event_name, event_data), \\\n \"Could not verify %s\" % event_name", "def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events", "def listener(self, event):\n print \"TB:@%s arrived event %s\" % (event.time, event) \n informFunction = self._informFunc\n informFunction((event.time, event.state))\n return []", "def _handle_event(event):\n if event.device.id_string != self._event.device.id_string:\n return\n\n self.apply_event(event)", "def process_input(self):\n for event in pygame.event.get():\n\n if self.joystick and self.state == self.STATE_PLAY:\n\n if event.type == pygame.JOYAXISMOTION:\n self.gameevents.add(\"joyaxismotion\", event.axis, event.value, type='EVENT_USER')\n elif event.type == pygame.JOYBUTTONDOWN:\n if event.button == self.fire_button:\n self.gameevents.add(\"press\", \"fire\", type='EVENT_USER')\n elif event.button == self.IFF_button:\n self.gameevents.add(\"press\", \"iff\", type='EVENT_USER')\n elif event.button == self.shots_button:\n self.gameevents.add(\"press\", \"shots\", type='EVENT_USER')\n elif event.button == self.pnts_button:\n self.gameevents.add(\"press\", \"pnts\", type='EVENT_USER')\n elif event.type == pygame.JOYBUTTONUP:\n if event.button == self.fire_button:\n self.gameevents.add(\"release\", \"fire\", type='EVENT_USER')\n elif event.button == self.IFF_button:\n self.gameevents.add(\"release\", \"iff\", type='EVENT_USER')\n elif event.button == self.shots_button:\n self.gameevents.add(\"release\", \"shots\", type='EVENT_USER')\n elif event.button == self.pnts_button:\n self.gameevents.add(\"release\", \"pnts\", type='EVENT_USER')\n\n else:\n\n if event.type == pygame.KEYDOWN:\n\n if (pygame.key.get_mods() & self.modifier):\n if event.key == pygame.K_q:\n self.gameevents.add(\"press\", \"quit\", type='EVENT_USER')\n\n if event.key == pygame.K_RETURN:\n\n if self.state == self.STATE_INTRO:\n self.state = self.STATE_SETUP\n\n elif self.state == self.STATE_SETUP:\n self.state = self.STATE_GAMENO\n\n elif self.state == self.STATE_GAMENO:\n if self.mine_exists:\n self.state = self.STATE_SETUP_IFF\n else:\n self.state = self.STATE_PREPARE\n\n elif self.state == self.STATE_IFF:\n self.state = self.STATE_PREPARE\n\n elif self.state == self.STATE_SCORES:\n self.state = self.STATE_SETUP\n\n elif self.state == self.STATE_PLAY:\n\n if event.key == self.thrust_key:\n self.gameevents.add(\"press\", \"thrust\", type='EVENT_USER')\n elif event.key == self.left_turn_key:\n self.gameevents.add(\"press\", \"left\", type='EVENT_USER')\n elif event.key == self.right_turn_key:\n self.gameevents.add(\"press\", \"right\", type='EVENT_USER')\n elif event.key == self.fire_key:\n self.gameevents.add(\"press\", \"fire\", type='EVENT_USER')\n elif event.key == self.IFF_key:\n self.gameevents.add(\"press\", \"iff\", type='EVENT_USER')\n elif event.key == self.shots_key:\n self.gameevents.add(\"press\", \"shots\", type='EVENT_USER')\n elif event.key == self.pnts_key:\n self.gameevents.add(\"press\", \"pnts\", type='EVENT_USER')\n elif event.key == self.pause_key and self.config['General']['allow_pause']:\n self.gameevents.add(\"press\", \"pause\", type='EVENT_USER')\n else:\n self.gameevents.add(\"press\", event.key, \"user\", type='EVENT_SYSTEM')\n \n elif self.state == self.STATE_PAUSED and event.key == self.pause_key:\n self.gameevents.add(\"press\", \"unpause\", type='EVENT_USER')\n \n else:\n self.gameevents.add(\"press\", event.key, \"user\", type='EVENT_SYSTEM')\n\n elif event.type == pygame.KEYUP:\n\n if self.state == self.STATE_PLAY:\n\n if event.key == self.thrust_key:\n self.gameevents.add(\"release\", \"thrust\", type='EVENT_USER')\n elif event.key == self.left_turn_key:\n self.gameevents.add(\"release\", \"left\", type='EVENT_USER')\n elif event.key == self.right_turn_key:\n self.gameevents.add(\"release\", \"right\", type='EVENT_USER')\n elif event.key == self.fire_key:\n self.gameevents.add(\"release\", \"fire\", type='EVENT_USER')\n elif event.key == self.IFF_key:\n self.gameevents.add(\"release\", \"iff\", type='EVENT_USER')\n elif event.key == self.shots_key:\n self.gameevents.add(\"release\", \"shots\", type='EVENT_USER')\n elif event.key == self.pnts_key:\n self.gameevents.add(\"release\", \"pnts\", type='EVENT_USER')", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def run(self, event):\n pass" ]
[ "0.68227965", "0.6619201", "0.65891063", "0.64815664", "0.641884", "0.6384413", "0.616646", "0.6130458", "0.6088237", "0.6078368", "0.60664743", "0.60335886", "0.60190856", "0.59726304", "0.59557456", "0.59455985", "0.5929909", "0.59173644", "0.59143096", "0.5911435", "0.5911386", "0.5880077", "0.5880077", "0.585659", "0.58542097", "0.5830124", "0.5801894", "0.5764059", "0.5763933", "0.5761054", "0.5759978", "0.5756524", "0.5745947", "0.57308036", "0.5686797", "0.5662277", "0.56595623", "0.5649904", "0.5638823", "0.5617212", "0.5605928", "0.5568029", "0.55633014", "0.55611354", "0.5560937", "0.55573034", "0.5550538", "0.5547888", "0.5539855", "0.5527179", "0.55250907", "0.5524776", "0.5523774", "0.5514004", "0.5495019", "0.5490839", "0.5474638", "0.5472736", "0.5472001", "0.54564106", "0.54455525", "0.5444161", "0.544142", "0.5441257", "0.54266834", "0.54254854", "0.5410337", "0.5409423", "0.5406129", "0.5399633", "0.5384807", "0.5372638", "0.53701395", "0.53687143", "0.5359313", "0.5358416", "0.53569186", "0.5348136", "0.53405625", "0.53299606", "0.5320898", "0.5316986", "0.53157437", "0.5314255", "0.52916795", "0.52798796", "0.5278682", "0.52760094", "0.5271787", "0.5270266", "0.5259237", "0.5254354", "0.5254146", "0.52502143", "0.5249028", "0.523527", "0.522907", "0.522714", "0.522714", "0.5225372" ]
0.54172313
66
Updates the frame of the game
def update(self): self.moving_sprites.update() self.static_sprites.update() self.camera.update(self.player)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_display(self):\r\n\r\n # The display.update() Updates the screen, making the new frame replace the old one. \r\n pg.display.update()\r\n \r\n # clock.tick sets a framerate for the game.\r\n # This is to make the game run at a stable fps \r\n self.clock.tick(cng.FRAMERATE)", "def update(self):\r\n pygame.display.update()\r\n return", "def runFrame(self):\n self._drawFrame(self._advanceTime())", "def _update_screen(self):\n self.screen.fill((250,250,250))\n self.rocket.blitme()\n pygame.display.flip()", "def update(self):\n self.t = time()\n self.frame += 1\n self.loop(self)\n self.draw_bg()\n self.draw_C()\n if self.cursor:\n self.draw_rect(*self.pos, RED, 2)\n self.draw_grid()\n self.draw_T()\n self.show_info()\n for (surf, rect) in self.surf_list:\n self.screen.blit(surf, rect)\n pygame.display.update()\n self.clock.tick(self.fps)", "def update(self):\n self.board.update()", "def updateWorld(self):\n\t self.screen.clear()\n self.update()\n self.screen.refresh()", "def update(self):\r\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)", "def game_updated(self):\n\n # replace with your game updated logic\n self.update_board()", "def update(self):\n self._num_frames += 1", "def updateScreen(self) -> None:\n\n # fill game display black\n self.surface.fill(Colors.Black)\n\n # draw players and ball\n self.drawImageOnSurface(self.player_one)\n self.drawImageOnSurface(self.player_two)\n self.drawImageOnSurface(self.ball)\n\n # draw all the spacer images\n for image in self.spacers:\n self.drawImageOnSurface(image)\n\n # draw scores and format the scores in byte representation\n self.drawTextOnSurface(format(self._score[0], \"04b\"),\n (Configuration.windowWidth / 4, Configuration.windowHeight / 2), Colors.ByteGreen,\n font=self.font)\n self.drawTextOnSurface(format(self._score[1], \"04b\"),\n (3 * Configuration.windowWidth / 4, Configuration.windowHeight / 2), Colors.ByteGreen,\n font=self.font)\n\n super().updateScreen() # call the parent method to update the screen", "def update(self):\n self._pygame.event.get()", "def update_frame(self):\n if self.should_reset_camera:\n self.ren.ResetCamera()\n self.should_reset_camera = False\n self.interactor.Render()\n app.processEvents()", "def update_frame(self):\n if self.should_reset_camera:\n self.ren.ResetCamera()\n self.should_reset_camera = False\n self.interactor.Render()\n app.processEvents()", "def onFrameUpdated(self):\n pass", "def _refresh_render(self):\n current_frame = self.frame\n self.frame = int(1E6)\n self.frame = current_frame", "def update(self):\n frame = str(self.image_number//10)\n if self.image_number < 30: # Not yet on the tenth frame\n self.image_number += 1\n else: # Reset back to 0\n self.image_number = 0\n\n image_location = os.path.join(\"assets\", \"player\" + frame + \".png\") # Get image path\n self.image = pygame.image.load(image_location).convert_alpha() # Load image\n\n # Keyboard events\n keys_pressed = pygame.key.get_pressed()\n if keys_pressed[pygame.K_UP]:\n self.move(0, -5)\n if keys_pressed[pygame.K_LEFT]:\n self.move(-5, 0)\n if keys_pressed[pygame.K_RIGHT]:\n self.move(5, 0)\n if keys_pressed[pygame.K_DOWN]:\n self.move(0, 5)\n\n # Mouse events\n mouse_pos = pygame.mouse.get_pos() # Get position of mouse as a tuple representing the\n # (x, y) coordinate\n\n mouse_buttons = pygame.mouse.get_pressed()\n if mouse_buttons[0]: # If left mouse pressed\n self.teleport(mouse_pos[0], mouse_pos[1])\n if mouse_buttons[2]: # If right mouse pressed\n self.teleport(mouse_pos[0], mouse_pos[1])", "def update_window(self, window, frame):\n self.draw_eyes()\n self.show(window, frame)\n self.new_frame()", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n\n # Draw score information\n self.scoreboard.show_score()\n\n if not self.stats.game_active:\n self.play_button.draw_button()", "def run(self):\n while not self.done:\n dt = self.clock.tick(self.fps)\n self.event_loop()\n self.update(dt)\n self.draw()\n pygame.display.flip()\n # pygame.display.update() # can be used to update only part of the screen", "def _update_screen(self):\n\t\tself.screen.fill(self.settings.bg_color)\n\t\tself.pigeon.blitme()\n\t\tfor dropping in self.droppings.sprites():\n\t\t\tdropping.draw_dropping()\n\t\tself.autos.draw(self.screen)\n\n\t\t# Draw the score information.\n\t\tself.sb.show_score()\n\n\t\t# Draw the play button if the game is inactive.\n\t\tif not self.stats.game_active:\n\t\t\tself.play_button.draw_button()\n\n\t\t# Make the most recently drawn screen visible.\n\t\tpygame.display.flip()", "def update_screen(self, ai_game):\r\n self.surface.fill(self.settings.bg_color)\r\n self.ship.blitme()\r\n for bullet in self.ship_bullets.sprites():\r\n bullet.draw_bullet()\r\n for bullet in self.alien_bullets.sprites():\r\n bullet.draw_bullet()\r\n self.aliens.draw(self.surface)\r\n self.explosions.draw(self.surface)\r\n\r\n # Draw the score information.\r\n self.sb.show_score()\r\n\r\n # Draw the difficulty buttons if the game is inactive.\r\n if not self.stats.game_active:\r\n for button in self.buttons:\r\n button.draw_button()\r\n\r\n # Draw the game over message if appropriate\r\n if self.stats.game_over:\r\n self.surface.blit(self.game_over_text, self.game_over_text_rect)\r\n\r\n # Make the most recently drawn screen visible.\r\n self.screen.blit(self.surface, (0, 0))\r\n pg.display.flip()", "def update_screen(self):\r\n\r\n # Redraw the screen during each pass through the loop.\r\n self._screen.fill(self._bb_settings.bg_color)\r\n\r\n # Redraw all markers around edge of board\r\n\r\n # Draw the play button if the game is inactive\r\n if self._stats.get_status() == \"Start_game\":\r\n for button in self._play_mode_button_list:\r\n button.draw_button()\r\n elif self._stats.get_status() == \"replay\":\r\n for button in self._replay_button_list:\r\n button.draw_button()\r\n else:\r\n self.blitme()\r\n shoot_markers = self.get_entry_exit()\r\n atom_markers = self.get_atom_guess()\r\n for marker in shoot_markers.values():\r\n marker[1].draw_marker()\r\n for atom in atom_markers.values():\r\n atom.draw_marker()\r\n # Make the most recently drawn screen visible.\r\n pygame.display.flip()", "def update(self, time_passed):\n pygame.sprite.Sprite.update(self, time_passed)\n self.rect.midbottom = self.toScreenCoordinate()", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n # ignore unresolved reference below, we're using the method from Bullets, not Sprite. Pycharm...sigh.\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n\n # drawing the aliens\n self.aliens.draw(self.screen)\n\n # drawing information about the score\n self.sb.show_score()\n\n # draws play-button on inactive game state\n if not self.stats.game_active:\n self.play_button.draw_button()\n\n pygame.display.flip()", "def update(self):\n\n if self.rect.y <= 0:\n logger(\"resetting to bottom edge\")\n self.rect.y = 0\n\n if self.rect.y > SCREEN_HEIGHT-self.height:\n logger(\"resetting to top edge\")\n self.rect.y = SCREEN_HEIGHT-self.height\n\n self.rect.x += self.change_x\n self.rect.y += self.change_y\n # logger(\"Player X Position: {0}\".format(self.rect.x), \"Player Y Position: {0}\".format(self.rect.y))", "def update(self):\n \n if games.keyboard.is_pressed(games.K_LEFT):\n #check if we reach the edge of the screen, so we do not pass border\n if self.x == 20 or self.x == games.screen.width-20:\n self.x = 40\n else:\n self.x -= 2\n \n if games.keyboard.is_pressed(games.K_RIGHT):\n #check if we reach the edge of the screen, so we do not pass border\n if self.x == 20 or self.x == games.screen.width-20:\n self.x = games.screen.width-40\n else:\n self.x += 2 \n \n if games.keyboard.keypress(games.K_z):\n self.fire_bullet()\n \n self.get_hit()\n \n if self.score.value == 0:\n self.end_game()", "def update(self):\n \n # Creates a surface on which 3 hearts can be blitted\n self.image = pygame.Surface((150, 50))\n \n # Blits each of the remaining Player lives' onto this surface\n for life in range(self.__num_lives): \n \n # Each heart image is 50x50 pixels, and so they are blitted 50 pixels apart\n self.image.blit(self.image_heart, (life*50, 0))\n \n self.image.set_colorkey((0, 0, 0))\n self.rect = self.image.get_rect()\n \n # The surface's rect is set in the top-right corner of the screen\n self.rect.x = 650\n self.rect.y = 0", "def update(self):\n self.screen.fill(blackColor)\n self.model.blocks.draw(self.screen)\n self.model.players.draw(self.screen)\n self.model.buttons.draw(self.screen)\n self.model.Coins.draw(self.screen)\n p1Score = myfont.render(\"Player 1 Score:\"+str(self.model.player1.score), 1, whiteColor)\n self.screen.blit(p1Score,(100,15))\n if self.model.playernum==2:\n p2Score = myfont.render(\"Player 2 Score:\"+str(self.model.player2.score), 1,whiteColor)\n self.screen.blit(p2Score,(1200,15))\n timerLimit = myfont.render(\"Time Limit:\" +str(self.model.time), 1, whiteColor)\n self.screen.blit(timerLimit, (700, 15)) \n pygame.display.update()", "def update(self):\n message = \"SCORE = %d\" % self.score\n self.image = self.__font.render(message, 1, (255, 255, 255))\n self.rect = self.image.get_rect()\n self.rect.centerx = 240\n self.rect.centery = 50", "def _update_screen(self):\n # Redraw the screen during each pass of the loop\n self.screen.fill(self.bg_color)\n self.ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n if not self.stats.game_active:\n self.play_button.draw_button()\n\n #Draw the scoreboard\n self.sb.show_score()\n\n # Make the most recently drawn screen visible\n pygame.display.flip()", "def update(self):\n Enemy.update(self)\n self.update_movement()\n self.update_firing()\n self.surf = self.animation.next_animation()", "def _update_screen(self):\n self.screen.fill(self.settings.bg_colour)\n # Draw ship on the screen\n self.ship.blitme()\n # Draw all bullets in the sprites group on the screen\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.stars.draw(self.screen)\n self.sb.show_score()\n if not self.stats.game_active:\n self.play_button.draw_button()\n pygame.display.flip()", "def main(self):\n update = self.update\n draw = self.draw\n screen = self.screen\n flip = pg.display.update\n clock = time.time\n frame_length = (1. / self.fps)\n time_since_draw = 0\n last_update = clock()\n fps_timer = 0\n frames = 0\n\n while not self.done:\n clock_tick = clock() - last_update\n last_update = clock()\n time_since_draw += clock_tick\n update(clock_tick)\n if time_since_draw >= frame_length:\n time_since_draw -= frame_length\n draw(screen)\n flip()\n frames += 1\n\n fps_timer, frames = self.handle_fps(clock_tick, fps_timer, frames)\n time.sleep(.01)", "def draw_frame(self):\n self.render_surface.fill((135, 206, 235))\n # self.render_surface.fill((33, 38, 63))\n self.render_surface.blit(\n self.moon,\n (self.RENDER_SURFACE_WIDTH - 150, 80),\n special_flags=pygame.BLEND_ADD,\n )\n\n # draw background\n self.draw_background()\n\n self.render_surface.blit(\n self.assets.get_character_image(self.player),\n self.camera.translate(self.player.rect),\n )\n\n for enemy in self.enemies:\n pygame.draw.rect(\n self.render_surface, enemy.color, self.camera.translate(enemy.rect)\n )\n self.draw_enemy_health(enemy)\n\n # code to mask perticular block type.\n # for i in self.chunked_map.get_blocks():\n # if i.block_type == 4:\n # pygame.draw.rect(\n # self.render_surface, (255, 255, 255), self.camera.translate(i.rect)\n # )\n\n # draw tiles\n tiles = filter(\n lambda tile: not isinstance(tile, Reward) or tile.is_valid,\n self.chunked_map.get_blocks(),\n )\n tiles = map(self.get_tile_blit_seq, tiles)\n self.render_surface.blits(tiles)\n\n # draw particles\n for particle in self.particle_system.get_active_particles():\n pygame.draw.circle(\n self.render_surface,\n particle.color,\n self.camera.translate_xy(particle.center),\n particle.radius,\n )\n\n # self.draw_fps()\n # self.draw_score()\n self.draw_player_health()\n if self.player.attack_arc_end_deg != 300:\n self.draw_attack_arc(self.player)\n\n for enemy in filter(lambda e: e.attack_arc_end_deg != 300, self.enemies):\n self.draw_attack_arc(enemy)\n\n if not self.player.read_to_take_damage:\n red_s = pygame.Surface(\n (self.RENDER_SURFACE_WIDTH, self.RENDER_SURFACE_HEIGHT)\n )\n red_s.fill((100, 0, 0))\n self.render_surface.blit(red_s, (0, 0), special_flags=pygame.BLEND_ADD)", "def _update_frame(self):\n # check if continue\n if self._keep_updating:\n self.__frame = self._cam.get_display_frame()\n if self.__frame is not None:\n self._cvn_camera_viewfinder.create_image(0, 0, image=self.__frame, anchor=tk.NW)\n\n self._root.after(self._delay, self._update_frame)", "def run(self):\r\n \r\n if not self.gameOver:\r\n screen.fill(COLOR3)\r\n self.board.drawBoard()\r\n self.handleEvents()\r\n for piece in self.board.pieces.values():\r\n piece.update()\r\n else:\r\n self.resetGame()\r\n pygame.display.update()", "def update(self):\n message = \"LIFE = %d\" % self.life\n self.image = self.__font.render(message, 1, (255, 255, 255))\n self.rect = self.image.get_rect()\n self.rect.centerx = 750\n self.rect.centery = 50", "def update(self):\n self.rect = (self.x, self.y, self.width, self.height)", "def display_frame(self, screen):\n screen.fill(WHITE)\n\n if not self.game_over:\n self.all_sprites_list.draw(screen)\n\n pygame.display.flip()", "def play(self):\n frame_time = 0\n last_angle = 5\n while self.RENDER_FRAME:\n frame_time += self.clock.get_time()\n if frame_time > 15:\n frame_time = 0\n self.event_handler()\n self.update_entities()\n self.draw_frame()\n\n scaled_surface = pygame.transform.scale(\n self.render_surface, (self.DISPLAY_WIDTH, self.DISPLAY_HEIGHT)\n )\n self.display.blit(scaled_surface, (0, 0))\n\n pygame.display.update()\n self.clock.tick()", "def update(self):\r\n if self.left<0:\r\n self.left=0\r\n if self.right>games.screen.width:\r\n self.right=games.screen.width\r\n if games.keyboard.is_pressed(games.K_LEFT):\r\n self.x-=5\r\n if games.keyboard.is_pressed(games.K_RIGHT):\r\n self.x+=5\r\n self.checkball()\r\n self.points.right=games.screen.width-5", "def render(self):\r\n pygame.display.flip()\r\n self.screen.fill(self.bgColor)\r\n\r\n self.paddle_1.show_paddle(self.screen, self.fgColor)\r\n self.paddle_2.show_paddle(self.screen, self.fgColor)\r\n self.ball.show_ball(self.screen, self.fgColor)\r\n\r\n self.clock.tick(self.framerate)", "def update(self):\n tic = time.time()\n if self.playing:\n self.show_frame()\n add_delay = np.maximum(1, self.delay - int(1000 * (time.time() - tic)))\n self.window.after(ms=add_delay, func=self.update)", "def __draw_game(self) -> None:\n self.__draw_window()\n self.pipes.draw(self.win)\n self.player.draw(self.win)\n pygame.display.update()", "def render_screen(self):\n pygame.display.update(self.screen_rect)\n return", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.sideways_ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n pygame.display.flip()", "def update(self):\n\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n pygame.mixer.music.stop()\n self.dead = True\n\n if self.toggle_death:\n if self.cur_frame_score == 0.0:\n score_val = int(self.ai_settings.alien_points * 10.0)\n\n score_str = \"{:,}\".format(score_val)\n self.score_image = self.font.render(score_str, True, self.text_color)\n\n # Display the score at the top right of the screen.\n self.score_rect = self.score_image.get_rect()\n self.score_rect.centerx = self.rect.centerx\n self.score_rect.centery = self.rect.centery\n\n self.cur_frame_score += 1.0\n\n if self.cur_frame_score == self.max_frame_score:\n self.dead = True\n else:\n # Update the sprite animation.\n speed_increase = 2.0 + self.ai_settings.alien_speed_factor\n\n if speed_increase > 8.0:\n speed_increase = 8.0\n\n self.cur_frame += speed_increase\n\n self.x += speed_increase\n self.rect.x = self.x\n\n while self.cur_frame > self.max_frames:\n self.cur_frame -= self.max_frames\n\n if self.cur_frame < 40.0:\n self.image = self.image1\n elif self.cur_frame >= 40.0:\n self.image = self.image2", "def _update_screen(self):\n\t\tself.screen.fill((255, 255, 255))\n\n\t\tself._check_collisions()\n\t\tself._update_objects()\n\t\tself._blit_objects()\n\n\t\tpygame.display.flip()", "def game_draw(self):\n pass", "def update_state(self, clock, bg_image, allsprites):\n # ensure we don't get more than 60fps\n clock.tick(60)\n # update the background\n self.screen.blit(bg_image, (0, 0))\n # update the greeting\n self.draw_greeting()\n # update the sprite(s)\n allsprites.update()\n allsprites.draw(self.screen)\n # draw some snow\n for s in self.snow:\n s.draw(self.screen)", "def update_screen(self, tick: bool = True) -> None:\n self.handle_user_quit()\n self.redraw_screen()\n\n # Update the display and tick when needed\n pygame.display.update()\n if tick:\n self.fps_clock.tick(Game.TICK_RATE)", "def renderFrame(self):\n assert self.notify.debugStateCall(self, 'loginFSM', 'gameFSM')\n\n # Make sure any textures are preloaded before we render.\n gsg = base.win.getGsg()\n if gsg:\n render2d.prepareScene(gsg)\n\n base.graphicsEngine.renderFrame()", "def tick (self):\n\t\n\t\tself.display.clear ()\n\t\tself.draw ()\n\t\tfor sprite in self.sprites:\n\t\t\tsprite.drawToDisplay (self.display)\n\t\tself.display.showFrame ()", "def update(self):\r\n self.frame_cnt += 1\r\n\r\n if self.frame_cnt % CannonBall.FRAMES_PER_STEP == 0:\r\n self.x += self.d_x\r\n self.y += self.d_y\r\n self.rect = self.image.get_rect().move(self.x, self.y)\r\n self.steps_taken += 1\r\n\r\n if self.steps_taken >= CannonBall.STEPS_TO_DEST:\r\n self.kill()", "def display_frame(self, windowSurface):\r\n \r\n if self.game_over:\r\n # The user will click to restart the game\r\n GameOverScreen(windowSurface, \"game over.png\")\r\n basicFont = pygame.font.SysFont(\"Lunchtime Doubly So\", 150)\r\n if len(str(self.player.coincount)) > 1:\r\n drawText(str(self.player.coincount), basicFont, windowSurface, 445, 400, DARKGREEN)\r\n else:\r\n drawText(str(self.player.coincount), basicFont, windowSurface, 470, 400, DARKGREEN)\r\n\r\n elif self.game_complete:\r\n #game complete. User clicks to restart\r\n GameOverScreen(windowSurface, \"game complete.png\")\r\n basicFont = pygame.font.SysFont(\"Lunchtime Doubly So\", 100)\r\n drawText(str(self.player.coincount), basicFont, windowSurface, 530, 547, DARKGREEN)\r\n\r\n else:\r\n # draw the player and the platforms onto the surface\r\n self.all_sprites.draw(windowSurface)\r\n #Draw lives, level, and coins collected\r\n DrawLives(windowSurface, 850, 10, self.player.lives)\r\n CoinCount(windowSurface, self.player.coincount)\r\n NextLevel(windowSurface, self.player.level)\r\n\r\n #draw appropriate images for each level\r\n if self.player.level == 2:\r\n image = pygame.image.load(\"fireworks.png\")\r\n windowSurface.blit(image, [420, 5])\r\n windowSurface.blit(image, [570, 5])\r\n elif self.player.level == 3:\r\n image = pygame.image.load(\"enemy1.png\")\r\n windowSurface.blit(image, [400, 5])\r\n windowSurface.blit(image, [540, 5])\r\n \r\n # draw the window onto the screen\r\n pygame.display.update()", "def run_game(self):\n # redraw the screen during each pass thought the loop and Set the background color from settings.py.\n\n while True:\n self._check_events()\n self.ship.update(self.time_delta)\n self.bullets.update(self.time_delta)\n self._update_bullets()\n self._update_screen()", "def change_frame(self, frame):\r\n pass", "def update(self):\n events = pygame.event.get()\n self.plane_update()\n self.bullet_update(events)\n self.background_update()\n self.enemy_update(events)", "def update(self, time, frame, face_position = None):\n\t\tself.face_position = face_position or self.face_position", "def update(self):\r\n if self.count:\r\n if self.count>=games.screen.fps*1.5:\r\n self.dx=self.dx1\r\n self.dy=self.dy1\r\n self.count=None\r\n if self.left<0:\r\n self.sidebounce()\r\n if self.right>games.screen.width:\r\n self.sidebounce()\r\n if self.top<0:\r\n self.vertbounce()\r\n if self.bottom>games.screen.height:\r\n self.die()\r\n if self.count:\r\n self.count+=1", "def update_scoreboard(self):\n self.clear()\n self.goto(-(WIDTH//6), (HEIGHT//2-30))\n self.write(self.l_score, align = 'center', font = ('Courier', 20, 'normal'))\n self.goto((WIDTH//6), (HEIGHT//2-30))\n self.write(self.r_score, align = 'center', font = ('Courier', 20, 'normal'))", "def loop(self, frame):\n self.root = frame\n self.drawUI()\n cv2.imshow('Fotopasca', self.root)", "def update(self):\n \n self.rect.x += self.change_x\n self.rect.y += self.change_y\n \n if self.rect.x < 0:\n self.rect.x = 0\n if self.rect.x > screen_width - 60:\n self.rect.x = screen_width - 60\n if self.rect.y < 0:\n self.rect.y = 0 \n \n if self.rect.y > screen_height - 60:\n self.rect.y = screen_height - 60", "def update(self):\n\t\tif self.moving_right and self.rect.right < self.screen_rect.right:\n\t\t\tself.x += self.settings_sky.ava_speed\n\t\tif self.moving_left and self.rect.left > 0:\n\t\t\tself.x -= self.settings_sky.ava_speed\n\t\tif self.moving_up and self.rect.top > 0:\n\t\t\tself.y -= self.settings_sky.ava_speed\n\t\tif self.moving_down and self.rect.bottom < self.screen_rect.bottom:\n\t\t\tself.y += self.settings_sky.ava_speed\n\n\t\t#Update rect object from self.x and self.y\n\t\tself.rect.x = self.x\n\t\tself.rect.y = self.y", "def update(self) -> None:\n self._state = self._player.update_state()\n self.__update_title()", "def gameUpdate():\n score_txt = font.render('Score: ' + str(score), True, white)\n hscore_txt = font.render('High Score: ' + str(high_score), True, white)\n pg.draw.rect(game_disp, red, food_pos)\n game_disp.blit(score_txt, (0,0))\n game_disp.blit(hscore_txt, (canv_w - 150, 0))\n snakePrint()\n screen.blit(game_disp, (0,0))\n pg.display.update()", "def update_current_screen(self):\n\t\tself.current_screen.update()", "def advance_frame():\n # pylint: disable=global-statement\n global current_frame, current_loop\n current_frame = current_frame + 1\n if current_frame >= frame_count:\n current_frame = 0\n current_loop = current_loop + 1\n sprite_group[0][0] = current_frame", "def render(self, frame: Frame):\n\n cv2.imshow(winname=self.title, mat=frame)\n cv2.waitKey(delay=self.delay)\n\n if self.step:\n while cv2.waitKey(delay=0) != self.step_key:\n continue", "def update(self):\r\n # Update the decimal position of the kame.\r\n self.y -= self.speed_factor\r\n # Update the rect position.\r\n self.rect.y = self.y", "def draw_game(self) -> None:\n\n self.screen.fill(THECOLORS['royalblue4'])\n self.our_board.draw(self.screen)\n self.game_status.draw(self.screen)\n self.heading_bar.draw(self.screen)\n\n if self.our_game_state == STATE_PREPARING:\n self.start_game_button.draw(self.screen)\n elif not self.our_game_state == STATE_READY_TO_START:\n self.their_board.draw(self.screen)", "def draw(self):\n self.screen.blit(self.bg_img4, (0,0))\n self.all_sprites.draw(self.screen)\n self.final_score = \"{0}\".format(self.wall.score)\n msg = self.font.render(\"Lives: {0}\".format(self.paddle.lives),1,pygame.Color(\"white\"))\n self.screen.blit(msg,(15,15))\n score = self.font.render(\"Score:\" + self.final_score, 1,pygame.Color(\"white\"))\n self.screen.blit(score,(Constant.screen_width - 150 ,15))", "def update(self):\n\n # Track FPS count\n if self.fps_counter + 1 >= 60:\n self.fps_counter = 0\n\n self.fps_counter += 1\n\n # Update movement animation and position\n if self.moving_right:\n self.image = self.animated_right[self.fps_counter // 30]\n self.x += self.movement_speed\n\n if self.moving_left:\n self.image = self.animated_left[self.fps_counter // 30]\n self.x -= self.movement_speed\n\n if self.moving_up:\n self.image = self.animated_up[self.fps_counter // 30]\n self.y -= self.movement_speed\n\n if self.moving_down:\n self.image = self.animated_down[self.fps_counter // 30]\n self.y += self.movement_speed\n\n self.rect.x, self.rect.y = self.x, self.y", "def update_display(self):\n self._clear_screen()\n print('Your score is {}'.format(self._roboc.score))\n print(self._roboc.currentmaze)\n print(\"Your viewpoint:\")\n print(self._roboc.get_hidden_game(4))", "def loop(self):\n self.screen.fill((0, 0, 0))\n self.clock.tick(FU_FRAME_RATE)\n self.level.update_loop(self.screen, self.clock)\n self.handle_events()", "def draw(self):\r\n self.scr.fill(SCREEN_COLOR)\r\n self.label.draw()\r\n pygame.display.flip()", "def _drawFrame(self):\n\n self._clearScreen()\n \n for object in Object.Objects:\n self._drawObject(object)\n\n for entity in Entity.Entities:\n self._drawObject(entity)\n\n self._drawObject(Game.Player)", "def main(self,Surf):\n while True:\n if self.state == \"GAME\":\n self.event_loop()\n self.update(Surf)\n elif self.state == \"QUIT\":\n break\n pg.display.update()\n self.Clock.tick(65)", "def update(self):\n if self.value:\n self.counter += 1\n self.__dx = 0 \n if self.counter == 5:\n self.image = self.blow_up\n if self.counter == 15:\n self.image = self.blow_up_2\n if self.counter == 25:\n self.value = False\n self.image = self.normal\n self.counter = 0 \n self.__dx = 9 \n self.rect.centerx = 540\n \n if self.rect.left <= 0:\n self.rect.left = 0\n if self.rect.right >= self.screen.get_width():\n self.rect.right = self.screen.get_width()", "def update(self):\n pygame.event.pump()\n self.pos_x += 0\n if (pygame.key.get_pressed()[pygame.K_w]) and self.pos_y > 0:\n self.pos_y -= 1\n if (pygame.key.get_pressed()[pygame.K_a]) and self.pos_x > 0:\n self.pos_x -= 1\n if (pygame.key.get_pressed()[pygame.K_d]) and self.pos_x < 1080:\n self.pos_x += 1\n if (pygame.key.get_pressed()[pygame.K_s]) and self.pos_y < 360:\n self.pos_y += 1", "def redraw(self):\r\n self.c.update()", "def draw(self):\n\t\tself.screen.fill(pygame.Color('black'))\n\t\tfor column in self.model.blocks:\n\t\t\tfor block in column:\n\t\t\t\tr = pygame.Rect(block.left,\n\t\t\t\t\t\t\t\tblock.top,\n\t\t\t\t\t\t\t\tblock.size,\n\t\t\t\t\t\t\t\tblock.size)\n\t\t\t\tpygame.draw.rect(self.screen, block.color,r)\n\t\tpygame.display.update()", "def update(self):\n pygame.event.pump()\n self.pos_x -= 1.5", "def render(self):\n if self.main_menu.active:\n self.main_menu.draw()\n elif self.game_over.active:\n self.game_over.draw()\n else:\n self.screen.fill(BACKGROUND_COLOR)\n\n # Score\n score_surf = self.score.render(\n \"Score {} \".format(self.GAME_SCORE), True, (255, 255, 255)\n )\n self.screen.blit(score_surf, (self.screen_rect.width - 100, 5))\n\n self.timer.draw(self.screen)\n self.board.draw(self.screen)\n\n pg.display.update()", "def update(self) -> None:\n\n # Update player 2 information\n self.player2.update()\n self.player2_bullet.update()\n self.player2_bullet.draw(self.screen)\n self.player2.draw(self.screen)\n\n # Call the superclass update\n super().update()", "def update_frame(self, frame):\n\n t = datetime.now()\n delta_t = t - self.dpar.frame_timestamp[0]\n fps = self.dpar.update_fps(1./delta_t.total_seconds())\n\n self.dpar.frame_timestamp[0] = t\n\n if self.config.black_correct:\n cframe = self.ffc.black_correct(frame)\n else:\n cframe = frame\n\n self.dpar.latest_frame = np.copy(cframe)\n \n if self.dpar.cap_live_swap:\n pix, gray = self._get_pixmap(cframe[::4,::4], self.dpar.iwindow[0])\n self.cap_screen.cap_title = self._live_title(fps)\n self.cap_screen.setPixmap(pix)\n else: \n pix, gray = self._get_pixmap(cframe, self.dpar.iwindow[0])\n self.live_screen.live_title = self._live_title(fps)\n self.live_screen.setPixmap(pix)\n\n self.draw_histogram()\n\n\n if self.recording_sequence:\n\n # MRP ToDo update these tags properly.\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n ifi_ms = 1000. / self.camera.actual_frame_rate\n ts_ms = np.int(np.round(ifi_ms * self.seq_frame_num))\n\n self.ifd.update_tags((self.seq_frame_num, 0), et, 0, ts_ms, 99)\n\n cap_image = np.copy(self.dpar.latest_frame).astype(np.uint16)\n #cv2.imwrite(cfn, (cap_image << (16 - self.camera.pixel_bits)).astype(np.uint16))\n\n \"\"\"\n Perform the TIFF windowing and then rebinning (compress) according to config file options\n \"\"\"\n x0 = max(0, (cap_image.shape[1] - config.tiff_seq_x_window) // 2)\n x1 = cap_image.shape[1] - x0\n y0 = max(0, (cap_image.shape[0] - config.tiff_seq_y_window) // 2)\n y1 = cap_image.shape[0] - y0\n cap_image = cap_image[y0:y1, x0:x1]\n\n shift_bits = 16 - self.camera.pixel_bits\n if config.tiff_seq_rebin > 1: # not tested for r ne 2\n r = config.tiff_seq_rebin\n cap_image = cap_image.reshape((cap_image.shape[0] // r, r, cap_image.shape[1] // r, -1)).sum(axis=3).sum(axis=1)\n extra_bits = 2 * (r.bit_length() -1)\n shift_bits = max(0, shift_bits - extra_bits)\n\n\n #im = PIL.Image.fromarray(gray)\n im = PIL.Image.fromarray((cap_image << shift_bits).astype(np.uint16))\n\n im.save(self.tiff_out, tiffinfo=self.ifd, compression=TIFF_COMPRESSION)\n self.tiff_out.newFrame()\n self.seq_frame_num += 1\n self.seq_frame_label.setText(str(self.seq_frame_num))\n\n if self.recording_video:\n # cframe is int16\n #f8 = ((cframe >> (self.camera.pixel_bits - 8)) & 0xff).astype(np.uint8)\n #Style 1:\n #fc = np.stack((f8, f8, f8), axis=-1)\n #self.rv_vout.write(fc)\n #Style 2&3:\n self.rv_vout.write(gray)\n self.recorded_video_frame_number += 1\n #Style 4: (16-bit)\n #self.rv_vout.write(cframe)\n\n #if self.recorded_video_frame_number == 20:\n # self.record_video() # turn off", "def update(self, game):\n self.rect = pygame.Rect(self.x - self.r, self.y - self.r, 2 * self.r, 2 * self.r)\n self.x += self.vx * game.delta\n self.y += self.vy * game.delta\n\n \"\"\"Do not let Player get out of the Game window\"\"\"\n if self.x < self.r:\n if self.vx < 0:\n self.vx = -self.vx\n self.x = self.r\n if self.y < self.r:\n if self.vy < 0:\n self.vy = -self.vy\n self.y = self.r\n if self.x > game.width - self.r:\n if self.vx > 0:\n self.vx = -self.vx\n self.x = game.width - self.r\n if self.y > game.height - self.r:\n if self.vy > 0:\n self.vy = -self.vy\n self.y = game.height - self.r\n\n \"\"\"Bounce conditions for ball\"\"\"\n if pygame.sprite.collide_rect(self, game.main_platform):\n self.y -= 7\n self.vy = -self.vy\n self.vx += game.main_platform.vx\n\n \"\"\"Displacement of ball from striking with platform\"\"\"\n for z in game.platforms:\n if pygame.sprite.collide_rect(self, game.platforms[z]):\n self.y += 7\n self.vy = -self.vy\n game.to_remove.add(z)\n\n \"\"\"Losing after striking red platform\"\"\"\n for z in game.platformsx:\n if pygame.sprite.collide_rect(self, game.platformsx[z]):\n game.draw_lose_screen()", "def update(self, new_gameStateData):\r\n pass", "def update(self):\n gear_message = \"GEAR COLLECTED: %d\" % \\\n self.__num_gear_collected + \"/4\"\n self.image = self.__font.render(gear_message, 1, (255, 255, 255))\n self.rect = self.image.get_rect()\n \n # This message is positioned in the top-left corner of the screen\n self.rect.topleft = (10, 10)", "def update(self):\n\n # Update guess tracker\n for i in range(atoms):\n\n ident = 'tracker' + str(i + 1)\n\n if i < len(game.guesslist):\n color = scheme.red\n else:\n color = scheme.white\n\n self.ids[ident].color = color\n\n # Update score\n self.ids.score.text = str(game.score)\n\n # Check for end game conditions! Make button (in)visible.\n if len(game.guesslist) == atoms:\n self.ids.end_button.disabled = False\n self.ids.end_button.opacity = 1\n else:\n self.ids.end_button.disabled = True\n self.ids.end_button.opacity = 0", "def update_pygame():\n global elapsed\n sprite_group.update(False)\n handle_collisions()\n elapsed += pygame_clock.get_time()\n if elapsed >= 1000:\n set_caption()\n elapsed -= 1000", "def update_fps(self):\n self.fps.tick()\n\n\trange_str = \"\"\n gd = self.main_curve_dialog.curve.get_data()[1]\n\trange_str = \"Max: %s, Min: %s, Avg: %0.5s \" \\\n\t\t % (numpy.max(gd), numpy.min(gd), numpy.average(gd))\n\n\n fps_text = \"%s Update: %s FPS\" % (range_str, self.fps.rate())\n self.action_fps_display.setText(fps_text)", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def _update_screen(self):\n self.screen.fill(self.rain_settings.bg_color)\n self.rain.draw(self.screen)\n\n pygame.display.flip()", "def draw(self):\n self.screen.blit(self.background, (0, 0))\n self.screen.blit(self.border, (0, 0))\n self.screen.blit(self.border, (LEVEL_WIDTH - PLAYFIELD_PADDING[0], 0))\n for y in xrange(0, BLOCK_NUM_HEIGHT):\n for x in xrange(0, BLOCK_NUM_WIDTH):\n if self.blocks[y][x] == '0':\n pass\n else:\n self.screen.blit(self.block_types[self.blocks[y][x]],\n (PLAYFIELD_PADDING[0] + x * Block.WIDTH,\n PLAYFIELD_PADDING[1] + y * Block.HEIGHT))\n self.screen.blit(self.editor_cursor_block,\n self.position_grid_to_screen(self.editor_cursor_position))\n self.screen.blit(self.label_help_top, self.editor_help_top_padding)\n self.screen.blit(self.label_current_block_type, self.editor_info_padding)\n self.screen.blit(self.block_types[self.available_block_types[self.current_block_type]],\n (self.editor_info_padding[0] + 100, self.editor_info_padding[1]))\n # print str(self.editor_cursor_position) + \" \" +\n # str(self.position_grid_to_screen(self.editor_cursor_position))", "def _update_screen(self):\n self.screen.fill(self.bg_colour)\n\n if not self.waiting:\n self._check_cells()\n self._update_cells()\n for row in self.cells:\n for cell in row:\n cell.draw_cell()\n \n pygame.display.flip()", "def run(self):\n while self.mode is WorldMode.run:\n self.handle_events()\n self.update()\n self.draw()\n pygame.display.update()\n self.fps_clock.tick(FPS)", "def update(self):\n self.image = self.__font.render(str(self.__score), 1, (255, 255, 255))", "def update(self, dt):\n self.current_time = pg.time.get_ticks()\n if self._scene.quit:\n pg.mouse.set_visible(True)\n self.done = True\n elif self._scene.done:\n self.change_scene()\n self._scene.update(dt)\n self._scene.draw(self.screen)" ]
[ "0.75599945", "0.74793476", "0.7177903", "0.7115879", "0.70343995", "0.6902082", "0.6901963", "0.6897741", "0.6863001", "0.6841528", "0.681301", "0.68126184", "0.6780625", "0.6780625", "0.67711943", "0.6767823", "0.67476755", "0.6745845", "0.6714142", "0.6668295", "0.6652482", "0.66519153", "0.6634897", "0.6619838", "0.66177386", "0.6614684", "0.6614122", "0.6607326", "0.66037005", "0.6597193", "0.6574008", "0.6563084", "0.6553555", "0.6529659", "0.6521917", "0.65152836", "0.6503687", "0.6497894", "0.6492097", "0.64810205", "0.64762646", "0.64753574", "0.6462605", "0.64617753", "0.6456267", "0.6435455", "0.643364", "0.64305454", "0.6422447", "0.6407044", "0.640164", "0.64011717", "0.6367951", "0.63642806", "0.6353263", "0.63531595", "0.63516223", "0.632535", "0.6322208", "0.63131034", "0.63036406", "0.6283733", "0.62789214", "0.6274345", "0.62678474", "0.62592465", "0.625392", "0.62482405", "0.62449557", "0.62402564", "0.62391794", "0.6237962", "0.62099975", "0.6203797", "0.62034637", "0.6199717", "0.61986417", "0.6194079", "0.6185294", "0.6184826", "0.6184283", "0.61792684", "0.6178555", "0.6171781", "0.61685383", "0.61675763", "0.6166819", "0.6163394", "0.6154522", "0.6152191", "0.6152088", "0.61503357", "0.6149689", "0.61480916", "0.6137817", "0.6123618", "0.612023", "0.61184704", "0.6117715", "0.61171424" ]
0.638703
52
Draws a grid onto the map
def draw_grid(self): for x in range(0, WIDTH, TILESIZE): pg.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT)) for y in range(0, HEIGHT, TILESIZE): pg.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drawGrid(self):\n\n if self.orientation == \"isometric\":\n for vline in range(0, self.map_array.shape[0]):\n line = self.canvas.create_line(iso(vline*self.cell_width, 0),\n iso(vline*self.cell_width, self.map_array.shape[0]*self.cell_height))\n self.canvas_objects.append(line)\n\n for hline in (range(0, self.map_array.shape[1])):\n line = self.canvas.create_line(iso(0, hline*self.cell_height),\n iso(self.map_array.shape[1]*self.cell_width, hline*self.cell_height))\n self.canvas_objects.append(line)\n self.canvas.bind(\"<Button-1>\", self.paintCells)\n self.canvas.bind(\"<Enter>\", self.drawFrame)\n self.canvas.bind(\"<Leave>\", self.killFrame)\n self.canvas.bind(\"<Motion>\", self.showFrame)", "def draw_grid(self):\n self.screen.draw_many_tiles(tile for tile in self.iter_grid_tiles())\n pass", "def drawGrid(self,gridLines=True):\n if not self.changed: self.edit()\n cGrid = Fmap.GRID\n cBorder = Fmap.BORDER\n if gridLines: #--Some fools don't want the grid!\n #--Grid\n for uv in range(-25,26,5):\n xy = 512/2 - 9*uv + 4\n self.drawRect(cGrid,0,xy,512,xy+1)\n self.drawRect(cGrid,xy,0,xy+1,512)\n #--Grid axes\n xy = 512/2 + 4\n self.drawRect(cBorder,0,xy,512,xy+1)\n self.drawRect(cBorder,xy,0,xy+1,512)\n #--Border\n self.drawBorder(cBorder,0,0,512,512,4)", "def draw_grid(self) -> None:\n for x in range(0, WIDTH, TILE_SIZE):\n pg.draw.line(self.screen, LIGHT_GREY, (x, INFO_HEIGHT), (x, HEIGHT))\n for y in range(INFO_HEIGHT, INFO_HEIGHT + HEIGHT, TILE_SIZE):\n pg.draw.line(self.screen, LIGHT_GREY, (0, y), (WIDTH, y))", "def draw_grid(self):\n pygame.draw.rect(self.screen, BLACK,\n (*grid_pos, WIDTH - 150, HEIGHT-150), 2)\n for x in range(9):\n pygame.draw.line(\n self.screen,\n BLACK,\n (grid_pos[0] + (x * cell_size), grid_pos[1]),\n (grid_pos[0] + (x * cell_size), grid_pos[1] + 450),\n 2 if x % 3 == 0 else 1\n )\n pygame.draw.line(\n self.screen,\n BLACK,\n (grid_pos[0], grid_pos[1] + (x * cell_size)),\n (grid_pos[0] + 450, grid_pos[1] + (x * cell_size)),\n 2 if x % 3 == 0 else 1\n )", "def draw_grid(self, verbosity=0):\n log.debug(\"Drawing grid\")\n (x0, y0) = self.origin\n color = (191, 191, 191)\n\n (w, h) = self.surface.get_size()\n\n i = x0\n while True:\n (x, ignore) = self.map_to_screen((i, 0))\n if x > w:\n break\n pygame.draw.line(self.surface, color, (x, 0), (x, h), 1)\n i += 10\n\n j = y0\n while True:\n (ignore, y) = self.map_to_screen((0, j))\n if y > h:\n break\n pygame.draw.line(self.surface, color, (0, y), (w, y), 1)\n j -= 10", "def display_map(grid):\n fig, ax = plt.subplots(figsize=(7, 7))\n\n major_ticks_x = np.arange(0, LENGTH_case + 1, 5)\n minor_ticks_x = np.arange(0, LENGTH_case + 1, 1)\n major_ticks_y = np.arange(0, WIDTH_case + 1, 5)\n minor_ticks_y = np.arange(0, WIDTH_case + 1, 1)\n ax.set_xticks(major_ticks_x)\n ax.set_xticks(minor_ticks_x, minor=True)\n ax.set_yticks(major_ticks_y)\n ax.set_yticks(minor_ticks_y, minor=True)\n ax.grid(which='minor', alpha=0.2)\n ax.grid(which='major', alpha=0.5)\n ax.set_ylim([0, WIDTH_case])\n ax.set_xlim([0, LENGTH_case])\n ax.grid(True)\n\n # Select the colors with which to display obstacles and free cells\n cmap = colors.ListedColormap(['white', 'red'])\n\n # Displaying the map\n ax.imshow(grid, cmap=cmap)\n plt.title(\"Map : free cells in white, occupied cells in red\");\n\n return fig, ax", "def draw_grid(self):\n if self.grid_center == True:\n (n, m) = (self.n, self.m)\n (dx, dy) = (self.dx // 2, self.dy // 2)\n else:\n (n, m) = (self.n + 1, self.m + 1)\n (dx, dy) = (0, 0)\n\n x0 = self.x0 + dx\n y0 = self.y0 + dy\n\n # vertical lines\n for j in range(m):\n p0 = (x0 + j * self.dx, y0)\n p1 = (x0 + j * self.dx, y0 + (n-1) * self.dy)\n pygame.draw.line(self.screen, self.grid_col, p0, p1, self.grid_d) \n # horizontal lines\n for i in range(n):\n p0 = (x0, y0 + i * self.dy)\n p1 = (x0 + (m-1) * self.dx, y0 + i * self.dy)\n pygame.draw.line(self.screen, self.grid_col, p0, p1, self.grid_d)", "def drawGrid(self):\n for div in range(NBCELL):\n sec = SSIZE*div\n self.can.create_line(0, sec, GSIZE, sec, width=3, fill=GFILL)\n self.can.create_line(sec, 0, sec, GSIZE, width=3, fill=GFILL)", "def draw_grid(self):\n\n # Draw horizontal lines\n for row in range(self.num_rows + 1):\n left = row_column_to_pixels(row, 0)\n right = row_column_to_pixels(row, self.num_cols)\n pygame.draw.line(self.screen, COLOR_MAP['gray'], left, right)\n\n # Draw vertical lines\n for col in range(self.num_cols + 1):\n top = row_column_to_pixels(0, col)\n bottom = row_column_to_pixels(self.num_rows, col)\n pygame.draw.line(self.screen, COLOR_MAP['gray'], top, bottom)", "def draw_grid(self):\n plt.imshow(py.array(\n map(lambda x: map(lambda y: mplc.colorConverter.to_rgb(colord[y]), x), self.create_grid(self.graph))),\n interpolation='nearest')\n plt.show()", "def draw_grid(self):\n for square in range(COLS+1):\n #vertical lines\n start_pos = (helpers.get_col_left_p(square),helpers.get_row_top_p(0))\n end_pos = (helpers.get_col_left_p(square),helpers.get_row_top_p(ROWS))\n pygame.draw.line(g.screen,WHITE,start_pos,end_pos)\n for square in range(ROWS+1):\n #horizontal lines\n start_pos = (helpers.get_col_left_p(0),helpers.get_row_top_p(square))\n end_pos = (helpers.get_col_left_p(COLS),helpers.get_row_top_p(square))\n pygame.draw.line(g.screen,WHITE,start_pos,end_pos)", "def draw_grid_map(img, grid_map, stride):\n image = img_from_array(img)\n draw = ImageDraw.Draw(image)\n counter = 0\n for grid in grid_map:\n draw.rectangle((\n grid[0] + stride // 2 - 2,\n grid[1] + stride // 2 - 2,\n grid[2] + stride // 2 + 2,\n grid[3] + stride // 2 + 2), fill=(255, 255, 255, 0))\n counter += 1\n plt.figure()\n plt.imshow(image)\n plt.show()", "def draw_map(self):\n self.vis.draw_map()", "def draw_grid(self):\n\n screen.fill(GREY)\n\n for row in self.grid:\n for cell in row:\n if cell.root:\n color = GREEN\n elif cell.goal:\n color = RED\n elif cell.value:\n color = DARK_BLUE\n elif cell.visited:\n color = LIGHT_BLUE\n elif cell.f:\n color = LIGHT_GREEN\n elif cell.wall:\n color = GRAY\n else:\n color = WHITE\n\n pygame.draw.rect(screen, color, cell.rect)\n\n x, y = cell.rect.x, cell.rect.y\n\n if cell.g:\n self.draw_score(x + 2, y + 2, cell.g)\n if cell.h:\n self.draw_score(x + 18, y + 2, cell.h)\n if cell.f:\n self.draw_score(x + 2, y + self.cell_size - 10, cell.f)", "def draw_grid(self, tile_img, tiles):\n #debug_print(\"drawing level\", data)\n img = Surface((self.xsize * SIZE, self.ysize * SIZE))\n for pos, char in self:\n rect = get_tile_rect(pos)\n img.blit(tile_img, rect, tiles[char])\n return img", "def draw_grid(self) -> None:\n grid = self.life.curr_generation\n for row in range(self.cell_height):\n for column in range(self.cell_width):\n if grid[row][column] == 1:\n color = \"green\"\n else:\n color = \"white\"\n pygame.draw.rect(\n self.screen,\n pygame.Color(color),\n (column * self.cell_size, row * self.cell_size, self.cell_size, self.cell_size),\n )", "def draw_grid(grid):\n \n # Tile size variables\n tile_width = STAGE_WIDTH / GRID_WIDTH\n tile_height = STAGE_HEIGHT / GRID_HEIGHT\n \n for i in range(GRID_WIDTH):\n for j in range(GRID_HEIGHT):\n elev = grid[i][j]\n rect_x = i * tile_width\n rect_y = j * tile_height\n pygame.draw.rect(STAGE, get_color(elev),\n (rect_x, rect_y, tile_width, tile_height))", "def draw(self, verbosity=0):\n\n # Calculate overall scale and position of the map\n self.update_bounds()\n # Draw the dungeon background (everything behind the grid)\n self.draw_background(verbosity)\n # Draw the grid\n self.draw_grid(verbosity)\n # Draw the dungeon foreground (everything in front of the grid)\n self.draw_foreground(verbosity)\n\n pygame.display.flip()", "def draw(self):\n for x in range(self.numRows):\n print self.grid[x]", "def draw_grid(self, surface):\n\n # put platform to the left\n (top, left) = get_surface_pos(self.flower_spawn_pos[0])\n surface.blit(self.platform, ((top-RADIUS, left-RADIUS), (0, 0)))\n\n unit_cell = [(.5 * RADIUS, 0),\n (1.5 * RADIUS, 0),\n (2 * RADIUS, SQRT3 / 2 * RADIUS),\n (1.5 * RADIUS, SQRT3 * RADIUS),\n (.5 * RADIUS, SQRT3 * RADIUS),\n (0, SQRT3 / 2 * RADIUS)]\n\n r = RADIUS*0.75\n unit_cell_inner = [(.5 * r, 0),\n (1.5 * r, 0),\n (2 * r, SQRT3 / 2 * r),\n (1.5 * r, SQRT3 * r),\n (.5 * r, SQRT3 * r),\n (0, SQRT3 / 2 * r)]\n\n # A point list describing a single cell, based on the radius of each hex\n for cell in self.cells:\n row, col = cell\n # Alternate the offset of the cells based on column\n offset = RADIUS * SQRT3 / 2 if col % 2 else 0\n # Calculate the offset of the cell\n top = offset + SQRT3 * row * RADIUS\n left = 1.5 * col * RADIUS\n # Create a point list containing the offset cell\n points = [(x + left, y + top) for (x, y) in unit_cell]\n points_inner = [(RADIUS/4 + x + left, RADIUS/4 + y + top) for (x, y) in unit_cell_inner]\n # Draw the polygon onto the surface\n\n if self.cell_state[cell]:\n pygame.draw.polygon(surface, (255, 204, 0), points, 0)\n pygame.draw.polygon(surface, (255, 255, 0), points_inner, 0)\n else:\n pygame.draw.polygon(surface, (125, 125, 0), points, 0)\n\n pygame.draw.polygon(surface, (0,0,0), points, 2)", "def drawGrid(self):\n\n # get attrs\n self.__setupGridLineOpacity()\n\n # setup GL\n GL.glPolygonMode(GL.GL_FRONT, GL.GL_LINE)\n GL.glLineWidth(1)\n GL.glColor4f(1, 1, 1, 1)\n\n # enable opacity\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n\n # draw grid\n display_type = self.getGridDisplayType()\n if display_type == 0:\n self.drawLines(*self.getAttributes())\n else:\n self.drawIntersection(*self.getAttributes(), display_type)", "def print_grid(self):\n\t\tclear_screen()\n\n\t\tprint('# DUNGEON MAP #\\n')\n\n\t\tfor r in self.grid_matrix:\n\t\t\tfor c in r:\n\t\t\t\tprint(c, end='')\n\t\t\tprint()\t\t\t\t\t\t# use print('\\n' to space out grid further)\n\n\t\tprint('\\n{} is located at X'.format(self.player.info['Name']))\n\n\t\tpress_enter()", "def fill_grid(self, gx, gy, color=Color['white']):\n area = [gx * self.px, gy * self.py, self.px, self.py]\n pygame.draw.rect(self.display, color, area)", "def draw( self ):\n\n if self.__drawnGrid == 0:\n draw_grid().draw()\n\n self.__drawnGrid = 1\n\n column = 0\n row = 0\n i = 0\n for mark in self.__grid:\n if row == 0:\n turtle.goto(-60+60*column, 60)\n elif row == 1:\n turtle.goto(-60+60*column, 0)\n elif row == 2:\n turtle.goto(-60+60*column, -60)\n\n if isinstance(mark, str):\n if mark.lower() == 'x': \n drawX(i)\n elif mark.lower() == 'o':\n drawO(i)\n\n column += 1\n\n if column == 3:\n column = 0\n row += 1\n\n i+=1\n\n turtle.goto(-60, 60)", "def grid(self, z, x, y, fields, layer):\n logger.debug(_(\"Render grid %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,self.tilesize,[z])\n return self.render_grid(mercator.tile_bbox((z, x, y)), fields, layer)", "def _draw_map(screen):\n my_map = HexMap(80, 80, _hex_size=10)\n my_map.generate_with_random_walk(150, iterations=25)\n for tile in my_map:\n # print(tile)\n color = COLORS[tile.type]\n\n tile_color = _modify_color(color)\n pygame.draw.polygon(screen, tile_color, tile.corners)\n return my_map", "def draw_tiles(self):\n db = self.double_buffer\n if db is not None:\n span_x = self.width\n span_y = self.height\n tiles_x = int(ceil(span_x/256.0))\n tiles_y = int(ceil(span_y/256.0))\n\n cc = cairo.Context(db)\n tiles = self.tile_loader.load_area(self.longitude,self.latitude,self.zoom,tiles_x,tiles_y)\n tile_number=0\n line_number=0\n\n x_center = self.width/2# - 128\n y_center = self.height/2# - 128\n offset_x,offset_y = self.tile_loader.gmap_tile_xy_from_coord(self.longitude,self.latitude,self.zoom)\n\n\n xtiles = len(tiles[0])\n ytiles = len(tiles)\n #print len(tiles),len(tiles[0])\n for line in tiles:\n for tile in line:\n x = (tile_number - int(xtiles/2)) * 256 + x_center\n y = (line_number - int(ytiles/2)) * 256 + y_center\n finalx = x - offset_x #+128\n finaly = y - offset_y #+128\n cc.set_source_surface(tile, finalx+self.dx, finaly+self.dy)\n cc.paint()\n tile_number += 1\n tile_number = 0\n line_number += 1\n\n self.draw_cross(cc,x_center,y_center)\n self.draw_points(cc)\n\n db.flush()\n\n else:\n print('Invalid double buffer')", "def draw_grid(self):\n buf = self.__hbar\n for rInd in range(self.row):\n line = '\\t|'\n for cInd in range(self.col):\n this = ((rInd * self.col) + cInd)\n cell = self.get_cell(this)\n if not cell:\n line += '%s|' % ' '.center(5)\n else:\n if this == self.new_cell:\n tmp = green(str(cell).center(5))\n else:\n tmp = str(cell).center(5)\n line += '%s|' % tmp\n buf += line + '\\n' + self.__hbar\n print(buf)", "def drawGrid(w, rows, surface):\r\n sizeBtwn = w // rows\r\n\r\n x = 0\r\n y = 0\r\n for l in range(rows):\r\n x = x + sizeBtwn\r\n y = y + sizeBtwn\r\n\r\n #line color-white #start end\r\n # pygame.draw.line(surface, (255,255,255), (x,0), (x,w)) #vertical\r\n #pygame.draw.line(surface, (255,255,255), (0,y), (w,y)) #horizontal\r", "def draw(self):\n\n super().draw()\n \n self.dim = self.getdim()\n start_x, start_y, = self.x(), self.y()\n\n for y in range(self.r):\n for x in range(self.c):\n x_pos, y_pos = start_x + (self.dim * x), start_y + (self.dim * y)\n self.tiles[y][x].resize(x_pos, y_pos, self.dim, self.dim)", "def draw_world(grid, r, c, image):\n under = grid[r, c]\n grid[r, c] = AGENT\n image.set_data(grid)\n grid[r, c] = under", "def draw_grid():\r\n screen.fill((0,0,0))\r\n pygame.draw.line(screen, (255,255,255),(WIDTH/3,0),(WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(2*WIDTH/3,0),(2*WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(0,HEIGHT/3),(WIDTH,HEIGHT/3))\r\n pygame.draw.line(screen, (255,255,255),(0,2*HEIGHT/3),(WIDTH,2*HEIGHT/3))", "def draw_board(surface, board, player_number):\n grid_img = pygame.image.load('images/grid.png')\n surface.blit(grid_img, (0, 0))\n for row in range(len(board.get_grid())):\n for col in range(len(board.get_grid()[0])):\n x = col * TILE_SIZE\n y = row * TILE_SIZE\n tile = board.get_coord(row, col)\n if tile is not None:\n draw_tile(surface, tile, x, y, castle_color=('images/' + CASTLES[player_number]))", "def _render_grid(self, r, tile_size):\n\n assert r.width == self.width * tile_size\n assert r.height == self.height * tile_size\n\n # Total grid size at native scale\n widthPx = self.width * CELL_PIXELS\n heightPx = self.height * CELL_PIXELS\n\n r.push()\n\n # Internally, we draw at the \"large\" full-grid resolution, but we\n # use the renderer to scale back to the desired size\n r.scale(tile_size / CELL_PIXELS, tile_size / CELL_PIXELS)\n\n # Draw the background of the in-world cells black\n r.fillRect(\n 0,\n 0,\n widthPx,\n heightPx,\n 0, 0, 0\n )\n\n # Draw grid lines\n r.setLineColor(100, 100, 100)\n for rowIdx in range(0, self.height):\n y = CELL_PIXELS * rowIdx\n r.drawLine(0, y, widthPx, y)\n for colIdx in range(0, self.width):\n x = CELL_PIXELS * colIdx\n r.drawLine(x, 0, x, heightPx)\n\n # Render the grid\n\n grid = self.encode()\n\n for j in range(0, self.width):\n for i in range(0, self.height):\n cell = grid[i,j]\n if cell == 0:\n continue\n\n r.push()\n r.translate(j * CELL_PIXELS, i * CELL_PIXELS)\n if cell == 1:\n self._render_wall(r)\n elif cell == 10 or cell == 12:\n self._render_goal(r, discovered=True)\n elif cell == 100 or cell == 102:\n self._render_goal(r, discovered=False)\n r.pop()\n\n r.pop()", "def draw(self):\n self.drawLine()\n\n for l in range(0, self.height):\n print(\"|\", end='', flush=True)\n for c in range(0, self.width):\n print(\" \" + str(self.grid[l][c]) + \" |\", end='', flush=True)\n print(\"\\n\", end='', flush=True)\n\n self.drawLine()", "def render(self):\n map = {0:'.', 1:'x', 2:'o'} # grid label vs how to plot\n print(''.join(map[i] for i in self.grid[0:3]))\n print(''.join(map[i] for i in self.grid[3:6]))\n print(''.join(map[i] for i in self.grid[6:9]))\n print('====')", "def render(self):\n map = {0:'.', 1:'x', 2:'o'} # grid label vs how to plot\n print(''.join(map[i] for i in self.grid[0:3]))\n print(''.join(map[i] for i in self.grid[3:6]))\n print(''.join(map[i] for i in self.grid[6:9]))\n print('====')", "def draw_grid(self):\n for i in range(N * N + 1):\n color = \"blue\" if i % N == 0 else \"gray\"\n x0 = MARGIN + i * SIDE\n y0 = MARGIN\n x1 = MARGIN + i * SIDE\n y1 = HEIGHT - MARGIN\n self.canvas.create_line(x0, y0, x1, y1, fill=color)\n\n x0 = MARGIN\n y0 = MARGIN + i * SIDE\n x1 = WIDTH - MARGIN\n y1 = MARGIN + i * SIDE\n self.canvas.create_line(x0, y0, x1, y1, fill=color)", "def drawMap(self):\n for position, contain in self.map.items():\n if contain is \"block\":\n self.blocks.add(Block(position[1]*50,position[0]*50))\n elif contain is \"Coins\":\n self.Coins.add(Coins(position[1]*50+10,position[0]*50+10))", "def draw_board(grid, scale, path):\n\n def color_map(tile):\n return COLORS[tile.terrain]\n\n draw_hex_grid(grid, scale, color_map, path, draw_components)", "def draw_board(self):\r\n for i in range(self.size):\r\n for k in range(self.size):\r\n left = k * self.CELL_SIZE + (k+1) * self.BORDER_WIDTH\r\n top = i * self.CELL_SIZE + (i+1) * self.BORDER_WIDTH\r\n rect = pygame.Rect(left, top, self.CELL_SIZE, self.CELL_SIZE)\r\n color = self.BG_COLOR\r\n if self.map[i][k] == self.BLOCK_CHAR:\r\n color = self.BLOCK_COLOR\r\n elif self.map[i][k] == self.START_CHAR:\r\n color = self.START_COLOR\r\n elif self.map[i][k] == self.END_CHAR:\r\n color = self.END_COLOR\r\n elif (k, i) in self.path:\r\n color = self.PATH_COLOR\r\n pygame.draw.rect(self.screen, color, rect)", "def DrawGrid(self, count):\n for i in range(0, self.width, self.incr):\n self.canvas.create_line(i, 100, i, 700, fill = \"#696969\", width = 1)\n for i in range(100, 800, 100):\n self.canvas.create_line(0, i, self.width, i, fill = \"#696969\", width = 1)\n self.canvas.create_rectangle(self.incr * 4, self.height - self.incr * 3.5,\n self.width - self.incr * 4, self.height, fill = \"black\", width = 3)\n for i in range(int(self.height - self.incr * 3.5), self.height, int(self.incr / 4)):\n self.canvas.create_line(self.incr * 4, i, self.width - self.incr * 4,\n i, fill = \"#696969\", width = 1)\n for i in range(self.incr * 4, self.width - self.incr * 4 + 1, int(self.incr / 4)):\n self.canvas.create_line(i, self.height - self.incr * 3.5, i, self.height,\n fill = \"#696969\", width = 1)", "def render_grid(self, surface, color, pos, size):\n\t\tax, ay = pos\n\t\tsx, sy = size\n\t\tbx = ax + sx\n\t\tby = ay + sy\n\n\t\ttsx = sx / self.w\n\t\ttsy = sy / self.h\n\n\t\t# Draw vertical lines.\n\t\tfor x in range(ax, bx, tsx):\n\t\t\tpygame.draw.aaline(\n\t\t\t\t\tsurface, color, \n\t\t\t\t\t(x, ay), (x, by), 1)\n\t\t# Draw horizontal lines.\n\t\tfor y in range(ay, by, tsy):\n\t\t\tpygame.draw.aaline(\n\t\t\t\t\tsurface, color, \n\t\t\t\t\t(ax, y), (bx, y), 1)\n\t\t# Draw a rect around it.\n\t\tpygame.draw.rect(surface, color, (ax, ay, sx, sy), 1)", "def drawRow(gm, Row):\n for Column in range(MapSize):\n img = Images[gm.Grid[Column][Row][-1].Name]\n Screen.fill(Colors[\"white\"], \n [(TileMargin + TileWidth) * Column + TileMargin,\n (TileMargin + TileHeight) * Row + TileMargin,\n TileWidth, TileHeight])\n Screen.blit(img, \n ((TileMargin + TileWidth) * Column + TileMargin, \n (TileMargin + TileHeight) * Row + TileMargin))", "def draw_map(self):\n\n polygon_lats, polygon_longs = self.get_polygon_coordinates()\n car_route = self.get_car_route()\n\n polygon = self.construct_polygon(polygon_lats, polygon_longs)\n\n map_path = f'{self.result_path}/map_{self.postfix}.html'\n\n self.plot_map(df=car_route,\n polygon=polygon,\n lat_col='lat',\n lon_col='long',\n plot_points=True,\n plot_polygon=True,\n plot_heatmap=True,\n file_name=map_path)", "def curses_print_map(self):\n map_window = self.stdscreen.subwin(5,5)\n map_keypad = map_window.keypad(1)\n map_panel = panel.new_panel(map_window)\n\n map_panel.update_panels()\n map_panel.top()\n map_panel.show()\n map_window.clear()\n\n x = 0; y=0; z=0\n\n # Print map phase\n draw_map(self,[x,y,z])\n\n def draw_map(game,loc):\n grid = game.world.grid\n\n z = loc[2] # Load the current floor (z)\n\n for x in range(game.conf.x_dim):\n for y in range(game.conf.y_dim):\n # Draw a map here!\n pass", "def draw_maps(m, y, x):\r\n original_x = x # Saving the orignal value of x coordinate\r\n \r\n # Loop through the map\r\n for j in m: \r\n y += 10 # Increase y coordinate with 10\r\n x = original_x # Reset x coordinate\r\n if isinstance(j, list):\r\n # Draw a color depending on value in map\r\n for i in j: \r\n if i == 0:\r\n pygame.draw.rect(display, green, (x, y, 10, 10))\r\n x += 10\r\n elif i == 1:\r\n pygame.draw.rect(display, grey, (x, y, 10, 10))\r\n x += 10\r\n elif i == 2:\r\n pygame.draw.rect(display, brown, (x, y, 10 , 10))\r\n x += 10\r\n elif i == 3:\r\n pygame.draw.rect(display, white, (x, y, 10, 10))\r\n x += 10", "def create_grid(self):\n for y_iter in range(self.NUM_GRIDS):\n for x_iter in range(self.NUM_GRIDS):\n x, y = x_iter * self.SQUARE_SIZE, y_iter * self.SQUARE_SIZE\n x_stop, y_stop = x + self.SQUARE_SIZE, y + self.SQUARE_SIZE\n cords = x, y, x_stop, y_stop\n self.canvas.create_rectangle(cords, outline=self.color,\n fill=self.default_color)", "def make_grid(self):\n length = self.size / 8\n # draw horizontal lines\n for y in range(0, self.size, length):\n self.window.create_line(0, y, self.size, y, fill = \"blue\")\n \n # draw vertical lines\n for x in range(0, self.size, length):\n self.window.create_line(x, 0, x, self.size, fill = \"blue\")\n\n # draw the axes red\n self.window.create_line(\n 0,\n self.size / 2,\n self.size, \n self.size / 2, \n fill = \"red\"\n )\n self.window.create_line(\n self.size / 2, 0,\n self.size / 2, \n self.size, \n fill = \"red\"\n )\n print(\"Grid Made.\")", "def render(self):\n\n self.baseMap.beginDraw()\n self.baseMap.background(255)\n self.baseMap.endDraw()\n\n numColumns = self.width / self.tileSize\n numRows = self.height / self.tileSize\n\n startX = floor(self.centerX - numColumns / 2.0)\n startY = floor(self.centerY - numRows / 2.0)\n\n endX = ceil(self.centerX + numColumns / 2.0)\n endY = ceil(self.centerY + numRows / 2.0)\n\n self.offsetX = -floor((self.centerX - floor(self.centerX)) * self.tileSize) + \\\n floor(self.width / 2.0) + \\\n floor(startX - floor(self.centerX)) * self.tileSize\n self.offsetY = -floor((self.centerY - floor(self.centerY)) * self.tileSize) + \\\n floor(self.height / 2.0) + \\\n floor(startY - floor(self.centerY)) * self.tileSize\n\n def onTileLoaded(tile, meta):\n self.baseMap.beginDraw()\n x = meta['destX']\n y = meta['destY']\n self.baseMap.image(tile, x, y)\n self.baseMap.endDraw()\n\n for x in xrange(startX, endX):\n for y in xrange(startY, endY):\n # Interpolate the URL for this particular tile.\n # 12/1208/1541.png\n url = self.url % (self.zoom, x, y)\n\n # Compute the x and y coordinates for where this tile will go on the map.\n destX = (x - startX) * self.tileSize + self.offsetX\n destY = (y - startY) * self.tileSize + self.offsetY\n\n # Attempts to load all the images lazily.\n meta = {\n 'url' : url,\n 'destX' : destX,\n 'destY' : destY,\n 'x' : x,\n 'y' : y,\n }\n self.lazyImageManager.addLazyImage(url, onTileLoaded, meta)\n\n # Kick off all the layer rendering.\n for layer in self.layers:\n layer.render()\n\n for marker in self.markers:\n marker.draw()", "def create_map(grid_size):\n STATUS['game_grid'] = [] # Could be a tuple?\n STATUS['grid_size'] = grid_size\n x_coord = 1\n y_coord = 1\n grid_size_counter = grid_size * grid_size\n while grid_size_counter:\n STATUS['game_grid'].append([x_coord, y_coord])\n x_coord += 1\n if x_coord == grid_size + 1:\n y_coord += 1\n x_coord = 1\n grid_size_counter -= 1", "def draw(self):\n\t\tfor i in range(0, self.size):\n\t\t\tprint('\\n' + \"----\" * self.size)\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tprint(self.grid[i][j] + ' |', end=\" \")\n\t\tprint('\\n'+ \"----\" * self.size + '\\n')", "def draw_grid(grid):\n rows = grid.shape[0]\n cols = grid.shape[1]\n for row in range(rows):\n for col in range(cols):\n if grid[row, col] == 0: # empty\n sys.stdout.write(\" . \")\n elif grid[row, col] == 1: # path\n sys.stdout.write(\" X \")\n elif grid[row, col] == 2:\n sys.stdout.write(\" O \")\n else:\n sys.stdout.write(\" @ \")\n\n if col % cols == cols - 1:\n sys.stdout.write(\"\\n\")", "def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")", "def grid(self, (z, x, y)):\n # sources.py -> MapnikRenderer -> grid\n content = self.reader.grid(z, x, y, self.grid_fields, self.grid_layer)\n return content", "def render(self, r, tile_size):\n\n assert r.width == self.width * tile_size\n assert r.height == self.height * tile_size\n\n # Total grid size at native scale\n widthPx = self.width * CELL_PIXELS\n heightPx = self.height * CELL_PIXELS\n\n r.push()\n\n # Internally, we draw at the \"large\" full-grid resolution, but we\n # use the renderer to scale back to the desired size\n r.scale(tile_size / CELL_PIXELS, tile_size / CELL_PIXELS)\n\n # Draw the background of the in-world cells black\n r.fillRect(\n 0,\n 0,\n widthPx,\n heightPx,\n 0, 0, 0\n )\n\n # Draw grid lines\n r.setLineColor(100, 100, 100)\n for rowIdx in range(0, self.height):\n y = CELL_PIXELS * rowIdx\n r.drawLine(0, y, widthPx, y)\n for colIdx in range(0, self.width):\n x = CELL_PIXELS * colIdx\n r.drawLine(x, 0, x, heightPx)\n\n # Render the grid\n for j in range(0, self.height):\n for i in range(0, self.width):\n cell = self.get(i, j)\n if cell == None:\n continue\n r.push()\n r.translate(i * CELL_PIXELS, j * CELL_PIXELS)\n cell.render(r)\n r.pop()\n\n r.pop()", "def draw_room(screen, grid, start_location):\n wall_image = pygame.image.load(\"images/pillar.png\")\n wall_image_transparent = pygame.image.load(\"images/pillar_80.png\")\n floor_image = pygame.image.load(\"images/floor.png\")\n computer_image = pygame.image.load(\"images/desk_computer.png\")\n\n # map_to_image = [floor_image, # 0\n # wall_image, # 1\n # wall_image_transparent, # 2\n # computer_image] # 3\n map_to_image = {\n \"0\": floor_image,\n \"1\": wall_image,\n \"2\": wall_image_transparent,\n \"3\": computer_image,\n \"10\": wall_image # Secret passage\n }\n # better tile management for multiple environments / create multiple environments.\n # 0 = floor, 1 = wall (pillar)\n # First draw floor everywhere\n max_dimensions = grid.shape\n for r in range(max_dimensions[0]):\n for c in range(max_dimensions[1]):\n screen.blit(floor_image, (c * 30 + start_location[0],\n r * 30 + start_location[1]))\n\n for tile_type in [1, 2, 3, 10]:\n the_rows, the_cols = np.where(grid == tile_type)\n for i in range(len(the_cols)):\n screen.blit(map_to_image[str(tile_type)], (the_cols[i] * 30 + start_location[0],\n the_rows[i] * 30 + start_location[1]))", "def drawmaze(self):\n win=GraphWin(\"Perfect Maze\",600,600) \n win.setBackground(\"White\")\n scale=600/self.N #Used to generalize the size difference for the input of larger numbers. The background resolution/ grid size, N\n\n x1=scale\n y1=0\n x2=scale\n y2=scale\n\n ##VERTICAL LINES ####\n for i in range(self.N,0,-1):\n for j in range(1,self.N):\n if self.East[j][i]: #If East is true, draw a line.\n \n line=Line(Point(x1,y1),Point(x2,y2)) #lines | |\n line.setFill(\"red\")\n line.draw(win)\n x1+=scale #Increment causes |->|\n x2+=scale #Increment causes |->|\n y1+=scale #Used to draw two more\n y2+=scale #of the same spaced lines further down.\n x1=scale #Reset\n x2=scale #Reset\n\n\n ##HORIZONTAL LINES##\n x1=0\n y1=scale\n x2=scale\n y2=scale\n\n\n for i in range(self.N,1,-1):\n for j in range(1,self.N+1):\n if self.South[j][i]: #If South is true, draw a line.\n \n line=Line(Point(x1,y1),Point(x2,y2))\n line.setFill(\"red\")\n line.draw(win)\n x1+=scale\n x2+=scale\n y1+=scale\n y2+=scale\n x1=0\n x2=scale\n\n const=scale//5 #Very useful const which helps in placing circles on grid.\n x=scale//2\n y=600-scale//2\n #radius=(scale-(4*scale//self.N))/2\n radius=scale//2-(const)\n start=Point(x,y) #START POINT HERE \n circ=Circle(start,radius)\n circ.setFill(\"Red\")\n label=Text(start,\"Start\")\n label.setFill(\"Black\")\n circ.draw(win)\n label.draw(win)\n #print(self.CurrentCell)\n #Using the current cell from the finished algorithm(last place visited), a circle can be placed at that point.\n endpointx=(self.CurrentCell[0]-1)*scale +scale//2 ####MAKING END POINT X\n endpointy=600-(self.CurrentCell[1]-1)*scale-scale//2 ####MAKING END POINT Y\n endpoint=Point(endpointx,endpointy)\n circ2=Circle(endpoint,radius)\n circ2.setFill(\"White\")\n label2=Text(endpoint,\"End\")\n circ2.draw(win)\n label2.draw(win)\n \n ###############CREATE KEY########################\n \n \n keypointx=(self.MazeKey[0]-1)*scale +scale//2 ####MAKING END POINT X\n keypointy=600-(self.MazeKey[1]-1)*scale-scale//2 ####MAKING END POINT Y\n keypoint=Point(keypointx,keypointy)\n circ3=Circle(keypoint,radius)\n circ3.setFill(\"Blue\")\n label3=Text(keypoint,\"Key\")\n circ3.draw(win)\n label3.draw(win)\n pathcol=\"Yellow\"\n##\n\n \n for i in range(1,len(self.EntirePath)): \n pathpointx=(self.EntirePath[i][0]-1)*scale +scale//2 ####MAKING END POINT X\n pathpointy=600-(self.EntirePath[i][1]-1)*scale-scale//2 ####MAKING END POINT Y\n pathpoint=Point(pathpointx,pathpointy)\n drawpath=Circle(pathpoint,radius)\n drawpath.setFill(pathcol)\n if self.EntirePath[i]==self.KeyPath[-1]:\n pathcol=\"Violet\"\n label4=Text(keypoint,\"Key\")\n label4.draw(win) \n drawpath.draw(win)\n drawpath.setWidth(1)\n sleep(0.1)\n \n #drawpath.draw(win)\n \n label5=Text(endpoint,\"Maze Solved \")\n label5.draw(win)\n circ4=Circle(start,radius)\n circ4.setFill(\"Red\")\n circ4.draw(win) \n label6=Text(start,\"Start \")\n label6.draw(win)", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def draw_board(self):\n pygame.draw.rect(background, BLACK, self.outline, 3)\n # Outline is inflated here for future use as a collidebox for the mouse\n self.outline.inflate_ip(20, 20)\n for i in range(self.size-1):\n for j in range(self.size-1):\n rect = pygame.Rect(5+GRID_SIZE+(GRID_SIZE*i), 5+GRID_SIZE+(GRID_SIZE*j), GRID_SIZE, GRID_SIZE)\n pygame.draw.rect(background, COLOR[BLACK], rect, 1)\n if self.size >= 13:\n for i in range(3):\n for j in range(3):\n coords = (5+4*GRID_SIZE+(GRID_SIZE*6*i), 5+4*GRID_SIZE+(GRID_SIZE*6*j))\n pygame.draw.circle(background, COLOR[BLACK], coords, 5, 0)\n screen.blit(background, (0, 0))\n pygame.display.update()", "def draw(self):\n\n self.updateLazyImageLoading()\n\n image(self.baseMap, 0, 0)\n\n for layer in self.layers:\n layer.draw()\n\n for marker in self.markers:\n marker.draw()", "def draw(self, win):\n # draw grid\n gap = self.width // 9\n for i in range(self.rows + 1):\n if i % 3 == 0 and i != 0:\n thick = 4\n else:\n thick = 1\n pygame.draw.line(win, (0, 0, 0), (0, i * gap), (self.width, i * gap), thick)\n pygame.draw.line(win, (0, 0, 0), (i * gap, 0), (i * gap, self.width), thick)\n\n # draw cubes\n for i in range(self.rows):\n for j in range(self.cols):\n self.cubes[i][j].draw(win)", "def render(win, grid):\n width, height = grid.get_width(), grid.get_height()\n\n win.fill(BLACK)\n draw_main_surface(win, WHITE, (width, height))\n\n for x in range(width):\n for y in range(height):\n cell = grid.get_cell(x, y)\n walls = cell.get_walls()\n\n if cell.is_final_cell():\n render_cell(win, FINAL, (x, y))\n elif cell.is_thawed():\n render_cell(win, THAWED, (x, y))\n\n if not cell.is_empty():\n player = cell.get_content()\n render_player(win, player)\n\n for wall in walls:\n render_wall(win, BLACK, wall, (x, y))\n\n pygame.display.update()", "def draw_final_screen(self):\r\n root = Tk()\r\n MapGUI(root, self)\r\n root.geometry('710x540')\r\n root.mainloop()", "def drawGrid(self):\n # Add vertical minor grids\n path = QPainterPath()\n minLoc = self.xMinGrid + self.minorGrid\n maxLoc = self.xMaxGrid\n gStep = self.minorGrid\n for i in range(minLoc, maxLoc, gStep):\n path.moveTo(i, self.yMinGrid)\n path.lineTo(i, self.yMaxGrid)\n self.addPath(path, self.minorGridPen)\n # Add horizontal minor grids\n path = QPainterPath()\n for i in range(minLoc, maxLoc, gStep):\n path.moveTo(self.xMinGrid, i)\n path.lineTo(self.xMaxGrid, i)\n self.addPath(path, self.minorGridPen)\n # Add vertical minor grids\n path = QPainterPath()\n minLoc = self.xMinGrid\n maxLoc = self.xMaxGrid\n gStep = self.majorGrid\n for i in range(minLoc, maxLoc, gStep):\n path.moveTo(i, self.yMinGrid)\n path.lineTo(i, self.yMaxGrid)\n self.addPath(path, self.majorGridPen)\n # Add vertical minor grids\n path = QPainterPath()\n for i in range(minLoc, maxLoc, gStep):\n path.moveTo(self.xMinGrid, i)\n path.lineTo(self.xMaxGrid, i)\n self.addPath(path, self.majorGridPen)", "def draw_board(self):\n self.window.fill(Colors.WHITE.value)\n self.draw_lines()\n self.draw_obstacles()", "def render(self, mode=\"human\", close=False):\n if close and self._viewer is None:\n if self._viewer is not None:\n self._viewer.close()\n self._viewer = None\n return\n\n screen_width = 600\n screen_height = 600\n if self._viewer is None:\n from gym.envs.classic_control import rendering\n self._viewer = rendering.Viewer(screen_width, screen_height)\n\n # generate the grid\n xs, self._xstep = np.linspace(\n 0, screen_width, self._width + 1, retstep=True)\n ys, self._ystep = np.linspace(\n 0, screen_height, self._height + 1, retstep=True)\n\n # render the grid\n for x in xrange(self._width):\n for y in xrange(self._height):\n l, r, t, b = (0, self._xstep, self._ystep, 0)\n tile = rendering.FilledPolygon([\n (l, b), (l, t), (r, t), (r, b)])\n tile.add_attr(rendering.Transform(translation=(\n x * self._xstep, y * self._ystep)))\n tile.set_color(*CASE_COLORS[chr(self._grid[x, y])])\n self._viewer.add_geom(tile)\n\n # render starting point\n l, r, t, b = (0, self._xstep, self._ystep, 0)\n tile = rendering.FilledPolygon([\n (l, b), (l, t), (r, t), (r, b)])\n tile.add_attr(rendering.Transform(translation=(\n self._trajectory[0][0] * self._xstep,\n self._trajectory[0][1] * self._ystep)))\n tile.set_color(0, 1.0, 1.0)\n self._viewer.add_geom(tile)\n\n # render grid lines\n for x in xs[1:len(xs) - 1]:\n # not including the first and last one\n line = rendering.Line((x, 0), (x, screen_height))\n self._viewer.add_geom(line)\n for y in ys[1: len(ys) - 1]:\n line = rendering.Line((0, y), (screen_width, y))\n self._viewer.add_geom(line)\n\n agent = rendering.make_circle(\n radius=min(\n screen_width / (self._width + 1) / 3,\n screen_height / (self._height + 1) / 3),\n res=30)\n self._agentTrans = rendering.Transform(translation=(\n self._currentPos[0] * self._xstep + (self._xstep / 2),\n self._currentPos[1] * self._ystep + (self._ystep / 2)))\n agent.add_attr(self._agentTrans)\n self._viewer.add_geom(agent)\n\n self._renderTrajectory()\n\n self._agentTrans.set_translation(\n self._currentPos[0] * self._xstep + (self._xstep / 2),\n self._currentPos[1] * self._ystep + (self._ystep / 2))\n\n self._viewer.render(return_rgb_array=(mode == 'rgb_array'))\n\n if close:\n if self._viewer is not None:\n self._viewer.close()\n self._viewer = None\n return", "def grid_init(self):\n # draw.line(surface, color, start_pos, end_pos, width/thickness)\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (0, GameData.square_size),\n (GameData.screen_dim, GameData.square_size),\n GameData.line_width\n )\n # # 2 horizontal\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (0, 2 * GameData.square_size),\n (GameData.screen_dim,2 * GameData.square_size),\n GameData.line_width\n )\n\n # # 1 vertical\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (GameData.square_size, 0),\n (GameData.square_size, GameData.screen_dim),\n GameData.line_width\n )\n # # 2 vertical\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (2 * GameData.square_size, 0),\n (2 * GameData.square_size, GameData.screen_dim),\n GameData.line_width)", "def begin_draw(self):\n pygame.init()\n self.display = pygame.display.set_mode(self.disp_size)\n pygame.display.set_caption('Map Editing')\n font = pygame.font.SysFont(\"arial\", 15)\n strings = [\"Press ESC to Start Drawing Obstacles\",\n \"Click Left to Draw & Right to Erase\",\n \"To finish Drawing,press Escape \",\n \"During search, Escape or Close to Quit\",\n \"you can also draw during the search, but it won't ba saved\"]\n texts = [font.render(s, True, (255, 255, 255)) for s in strings]\n for i, text in enumerate(texts):\n self.display.blit(text, (self.disp_size[0]//20, i*20+self.disp_size[1]//20))\n pygame.display.update()\n main_screen = True\n while main_screen:\n print(\"Waiting for start\")\n event = pygame.event.wait()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n main_screen = False\n self.display.fill([255, 255, 255])\n grid.draw(self.display)\n pygame.display.update()\n print(\"Now painting\")\n while True:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n break\n pos = list((np.array(pygame.mouse.get_pos())/self.block_size).astype(int))\n if pygame.mouse.get_pressed() == (1, 0, 0):\n print(\"Add wall at\", pos)\n grid[pos].type = \"WALL\"\n grid[pos].draw(self.display, self.block_size)\n elif pygame.mouse.get_pressed() == (0, 0, 1):\n print(\"remove wall from\", pos)\n grid[pos].type = \"ROAD\"\n grid[pos].draw(self.display, self.block_size)\n pygame.display.update()", "def drawMap(self, lmap):\n w = lmap.width\n h = lmap.height\n # set size of canvas and create bitmap of same size\n self.config(width=w, height=h, xscrollincrement=1, yscrollincrement=1)\n self.im = PhotoImage(width=w, height=h)\n # copy colors corresponding to lmap characters into bitmap and create on canvas\n for row in range(h):\n for col in range(w):\n if lmap.isKey((col, row)):\n color = 'green3'\n elif lmap.isDoor((col, row)):\n color = 'red'\n else:\n color = self.colorMap(lmap.getCell((col, row)))\n self.im.put(color, (col, row))\n self.original = self.create_image(0, 0, image=self.im, anchor=NW)", "def draw_grid(grid_display, game_grid, box_width, box_height, border_color):\n\n for x in range(0, len(game_grid)):\n for y in range(0, len(game_grid[0])):\n if x == 0 or x == len(game_grid) - 1 or y == 0 or y == len(game_grid[0]) - 1:\n pygame.draw.rect(grid_display, border_color, (x * box_width + 1, y * box_height + 1,\n box_width - 1, box_height - 1))\n elif game_grid[x][y]:\n pygame.draw.rect(grid_display, game_grid[x][y], (x * box_width + 1, y * box_height + 1,\n box_width - 1, box_height - 1))", "def draw_gameBoard(self):\n\n # 15 horizontal lines\n for i in range(9):\n start_pixel_x = (i + 1) * CELL_PIXELS\n start_pixel_y = (0 + 1) * CELL_PIXELS\n end_pixel_x = (i + 1) * CELL_PIXELS\n end_pixel_y = (9 + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # 15 vertical lines\n for j in range(9):\n start_pixel_x = (0 + 1) * CELL_PIXELS\n start_pixel_y = (j + 1) * CELL_PIXELS\n end_pixel_x = (9 + 1) * CELL_PIXELS\n end_pixel_y = (j + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # place a \"star\" to particular intersections\n self.draw_star(3, 3)\n self.draw_star(7, 7)", "def drawCell(self,land,uland,vland,marked):\n from math import sqrt, pow\n #--Tranlate grid point (u,v) to pixel point\n if not self.changed: self.edit()\n #--u/v max/min are grid range of visible map. \n #--wcell is bit width of cell. 512 is bit width of visible map.\n (umin,umax,vmin,vmax,wcell,wmap) = (-28,27,-27,28,9,512)\n if not ((umin <= uland <= umax) and (vmin <= vland <= vmax)):\n return\n #--x0,y0 is bitmap coordinates of top left of cell in visible map.\n (x0,y0) = (4 + wcell*(uland-umin), 4 + wcell*(vmax-vland))\n #--Default to deep\n mapc = [Fmap.DEEP]*(9*9)\n heights = land and land.getHeights()\n if heights:\n #--Land heights are in 65*65 array, starting from bottom left. \n #--Coordinate conversion. Subtract one extra from height array because it's edge to edge.\n converter = [(65-2)*px/(wcell-1) for px in range(wcell)]\n for yc in range(wcell):\n ycoff = wcell*yc\n yhoff = (65-1-converter[yc])*65\n for xc in range(wcell):\n height = heights[converter[xc]+yhoff]\n if height >= 0: #--Land\n (r0,g0,b0,r1,g1,b1,scale) = (66,48,33,32,23,16,sqrt(height/3000.0))\n scale = int(scale*10)/10.0 #--Make boundaries sharper.\n r = chr(max(0,int(r0 - r1*scale)) & ~1)\n else: #--Sea\n #--Scale color from shallow to deep color.\n (r0,g0,b0,r1,g1,b1,scale) = (37,55,50,12,19,17,-height/2048.0)\n r = chr(max(0,int(r0 - r1*scale)) | 1)\n g = chr(max(0,int(g0 - g1*scale)))\n b = chr(max(0,int(b0 - b1*scale)))\n mapc[xc+ycoff] = r+g+b\n #--Draw it\n mapd = self.mapd\n for yc in range(wcell):\n ycoff = wcell*yc\n ymoff = wmap*(y0+yc)\n for xc in range(wcell):\n cOld = mapd[x0+xc+ymoff]\n cNew = mapc[xc+ycoff]\n rOld = ord(cOld[0])\n #--New or old is sea.\n if (ord(cNew[0]) & 1) or ((rOld & 1) and\n (-2 < (1.467742*rOld - ord(cOld[1])) < 2) and\n (-2 < (1.338710*rOld - ord(cOld[2])) < 2)):\n mapd[x0+xc+ymoff] = cNew\n if marked:\n self.drawBorder(Fmap.MARKED,x0+2,y0+2,x0+7,y0+7,1)\n pass", "def drawMap(self, M):\n\n for i in range(M.shape[1]):\n self.ax.plot (M[1, i], M[2, i], 'gx')\n\n self.fig.canvas.draw()", "def render_grid(self, bbox, grid_fields, layer, width=None, height=None):\n width = width or self.tilesize\n height = height or self.tilesize\n self._prepare_rendering(bbox, width=width, height=height)\n grid = mapnik.Grid(width, height)\n mapnik.render_layer(self._mapnik, grid, layer=layer, fields=grid_fields)\n grid = grid.encode()\n return json.dumps(grid)", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def draw_grid(plt):\n x0, x1, x2, x3 = 0, 3057, 6508, 9860\n y0, y1, y2, y3, y4, y5, y6, y7, y8 = 0, 1535, 2041, 2547, 3053, 3559, 4257, 5303, 6978\n alpha, linewidth = 0.3, 0.5\n\n # Vertical Lines\n plt.plot((x0, x0), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x1, x1), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x2, x2), (y0, y5), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x3, x3), (y0, y8), 'black', alpha=alpha, linewidth=linewidth)\n\n # Horizontal Lines\n plt.plot((x0, x3), (y0, y0), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y1, y1), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y2, y2), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y3, y3), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y4, y4), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y5, y5), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x1), (y6, y6), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x1, x3), (y7, y7), 'black', alpha=alpha, linewidth=linewidth)\n plt.plot((x0, x3), (y8, y8), 'black', alpha=alpha, linewidth=linewidth)", "def draw_grids(self, grids):\n\n list_of_grids = is_grids_list_of_grids(grids=grids)\n\n if not list_of_grids:\n\n plt.plot(\n np.asarray(grids)[:, 1],\n np.asarray(grids)[:, 0],\n c=self.colors[0],\n lw=self.width,\n ls=self.style,\n )\n\n else:\n\n color = itertools.cycle(self.colors)\n\n for grid in grids:\n\n if not None in grid:\n if len(grid) != 0:\n plt.plot(\n np.asarray(grid)[:, 1],\n np.asarray(grid)[:, 0],\n c=next(color),\n lw=self.width,\n ls=self.style,\n )", "def draw_on(self, surface):\n for x, y in self.alive_cells():\n #size = (self.box_size, self.box_size)\n #position = (x * self.box_size, y * self.box_size)\n #thickness = 1\n pygame.draw.rect(surface, DARK_RED, (x * self.box_size, y * self.box_size,self.box_size, self.box_size ))", "def draw_dungeon():\n grid_size = STATUS['grid_size']\n cells = STATUS['game_grid']\n player = get_locations()['player']\n print(\" _\" * grid_size)\n tile = \"|{}\"\n for cell in cells:\n x, y = cell # Surely this is just unpacking?\n if x < grid_size:\n line_end = ''\n if cell == player:\n output = tile.format(\"P\")\n else:\n output = tile.format(\"_\")\n else:\n line_end = \"\\n\"\n if cell == player:\n output = tile.format(\"P|\")\n else:\n output = tile.format(\"_|\")\n print(output, end=line_end)", "def _init_draw(self):\n self.model.screen.fill((105,105,105))\n self.model.cells = {}\n cell_size = self.model.cell_length\n for i in range(self.model.height):\n for j in range(self.model.width):\n cell_coord = (i*self.model.cell_length,j*self.model.cell_length)\n self.model.cells[(i,j)] = Cell(self.model.screen,cell_coord,cell_size)\n all_cells = self.model.cells.values()\n for cell in all_cells:\n cell.draw()", "def _init_draw(self):\n self.model.screen.fill((105,105,105))\n self.model.cells = {}\n cell_size = self.model.cell_length\n for i in range(self.model.height):\n for j in range(self.model.width):\n cell_coord = (i*self.model.cell_length,j*self.model.cell_length)\n self.model.cells[(i,j)] = Cell(self.model.screen,cell_coord,cell_size)\n all_cells = self.model.cells.values()\n for cell in all_cells:\n cell.draw()", "def drawMap(self,parentControl):\n if not self.Map:\n print \"Error: no map\"\n return\n \n rows=len(self.Map)\n columns=len(self.Map[0])\n \n listCells = []\n frameCheckerBoard=Frame(parentControl)\n for i in range(0,rows):\n listCells.append([])\n for j in range(0,columns):\n cellType = self.Map[i][j]\n if(cellType == \" \"):\n imgCell = self.createImageCell(frameCheckerBoard, (i, j),self.FloorImage)\n elif(cellType == \"^\"):\n imgCell = self.createImageCell(frameCheckerBoard, (i, j),self.WallImage)\n elif(cellType == \"~\"):\n imgCell = self.createImageCell(frameCheckerBoard, (i, j),self.WaterImage)\n elif(cellType == \"$\"):\n imgCell = self.createImageCell(frameCheckerBoard, (i, j),self.FloorImage)\n imgCell.changeForeground(self.CoinImage)\n elif(re.match(\"[a-zA-Z]\", cellType)):\n imgCell = self.createImageCell(frameCheckerBoard, (i, j),self.FloorImage)\n imgCell.changeForeground(self.PlayerImage)\n listCells[i].append(imgCell)\n frameCheckerBoard.grid(row = 0, column = 0, sticky = N+E+W+S)\n self.drawLegend(parentControl)\n parentControl.rowconfigure(0, weight = 1)\n parentControl.columnconfigure(0, weight = 1)\n \n for x in range(0,rows):\n frameCheckerBoard.rowconfigure(x, weight=1)\n \n for y in range(0,columns):\n frameCheckerBoard.columnconfigure(y, weight=1)\n \n return listCells", "def drawBoard(self):\r\n \r\n for i in range(8):\r\n for j in range(8):\r\n if (i %2 == 0 and j % 2 == 0) or (i % 2 !=0 and j % 2 != 0):\r\n COLOR = COLOR1\r\n else: COLOR = COLOR2\r\n pygame.draw.rect(screen, COLOR, Rect(i*50, j*50, 50, 50))\r\n\r\n self.drawLabels()\r\n \r\n if not self.piecesDrawn:\r\n self.drawPieces()\r\n self.piecesDrawn = True", "def draw(self, **kwargs):\n\n Lons = numpy.ones(self.data.shape)*0.5\n Lats = numpy.ones(self.data.shape)*0.5\n for ix in range(self.ncols):\n for iy in range(self.nrows):\n Lons[iy,ix] = self.xllcorner+float(ix)*self.cellsize\n Lats[iy,ix] = self.yllcorner+float(iy)*self.cellsize\n ContourMin = numpy.min(numpy.where(self.data != self.nodata,self.data, 1000000))\n ContourMax = numpy.max(numpy.where(self.data != self.nodata,self.data, -1000000))*1.10\n if kwargs.has_key('contours'):\n if type( kwargs['contours'] ) == type( 1 ):\n Contours = numpy.arange(ContourMin, ContourMax, (ContourMax-ContourMin)/float( kwargs['contours']+1))\n else:\n Contours = kwargs['contours']\n else:\n Contours = numpy.arange(ContourMin, ContourMax, (ContourMax-ContourMin)/11.)\n if kwargs.has_key('cmap'):\n mycmap = kwargs['cmap']\n else:\n mycmap = 'jet'\n if kwargs.has_key('dmap'):\n dmap = max(0,min(4,kwargs['dmap']))\n else:\n dmap = 4\n # Lambert Conformal Conic map.\n if kwargs.has_key('res'):\n if kwargs['res']=='med':\n mapres='i'\n elif kwargs['res']=='hi':\n mapres='h'\n else:\n mapres = 'l'\n else:\n mapres = 'l'\n if mapres not in ('c','l','i','h'):\n mapres = 'l'\n m = Basemap(llcrnrlon=Lons[0,0], llcrnrlat=Lats[0,0], urcrnrlon=Lons[self.nrows-1,self.ncols-1], urcrnrlat=Lats[self.nrows-1,self.ncols-1],\n projection='lcc',lat_1=30.,lat_2=60.,lon_0=(Lons[0,0]+Lons[self.nrows-1,self.ncols-1])/2.,\n resolution =mapres,area_thresh=1000.)\n # create figure, add axes.\n fig=p.figure()\n ax = fig.add_axes([0.1,0.1,0.7,0.7])\n #make a filled contour plot.\n x, y = m( Lons , Lats)\n CS = m.contourf(x,y,self.data, Contours, cmap=p.get_cmap(mycmap))\n\tpos = ax.get_position()\n\tl, b, w, h = getattr(pos, 'bounds', pos)\n #l,b,w,h=ax.get_position()\n cax = p.axes([l+w+0.075, b, 0.05, h]) # setup colorbar axes\n p.colorbar(drawedges=True, cax=cax) # draw colorbar\n p.axes(ax) # make the original axes current again\n\n if kwargs.has_key('shapefiles'):\n for s in kwargs['shapefiles']:\n try:\n lw = s[3]\n except:\n lw = 0.5\n try:\n clr = s[4]\n except:\n clr='k'\n shp_info = apply(m.readshapefile, (s[0],s[1]),{'drawbounds':s[2], 'linewidth':lw, 'color':clr} )\n # draw coastlines, meridians and parallels.\n if dmap > 1:\n m.drawcoastlines()\n if dmap > 2:\n m.drawcountries()\n if dmap > 3:\n m.drawstates()\n if dmap > 0:\n m.drawparallels(p.arange(10,70,10),labels=[1,1,0,0])\n m.drawmeridians(p.arange(-100,0,10),labels=[0,0,0,1])\n if kwargs.has_key('title'):\n p.title(kwargs['title'])\n else:\n p.title(self.name.title())\n if kwargs.has_key('format'):\n fn = self.name+'.'+kwargs['format']\n if kwargs.has_key('dpi'):\n dots = kwargs['dpi']\n else:\n dots = 100\n try:\n p.savefig(fn,dpi=dots)\n except:\n print 'Error saving to format : ', kwargs['format']\n else:\n p.show()", "def draw_floor(screen, grid, start_location):\n floor_image = pygame.image.load(\"images/floor.png\")\n # better tile management for multiple environments / create multiple environments.\n # 0 = floor, 1 = wall (pillar)\n max_dimensions = grid.shape\n for r in range(max_dimensions[0]):\n for c in range(max_dimensions[1]):\n screen.blit(floor_image, (c * 30 + start_location[0],\n r * 30 + start_location[1]))", "def onPaint(self, event):\n\n dc = wx.PaintDC(self)\n self.drawTilesLayers(dc)", "def draw_grid(self, darken=1):\n if not(0 < darken < 1):\n darken = 1\n for x in range(0, int(self.settings['grid_size'])):\n for y in range(0, int(self.settings['grid_size'])):\n if self.grid[x][y] == g.EMPTY:\n if (x + y) % 2 == 0:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (93 * darken, 216 * darken, 228 * darken), r)\n else:\n rr = pygame.Rect((x * self.block_width, y * self.block_width),\n (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (84 * darken, 194 * darken, 205 * darken), rr)\n elif self.grid[x][y] == g.WALL:\n rr = pygame.Rect((x * self.block_width, y * self.block_width), (self.block_width, self.block_width))\n pygame.draw.rect(self.surface, (175 * darken, 34 * darken, 6 * darken), rr)\n elif self.grid[x][y] == g.PLAYER:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_height))\n pygame.draw.rect(self.surface, (17 * darken, 24 * darken, 47 * darken), r)\n pygame.draw.rect(self.surface, (93, 216, 228), r, 1)\n elif self.grid[x][y] == g.FOOD:\n r = pygame.Rect((x * self.block_width, y * self.block_height),\n (self.block_width, self.block_height))\n pygame.draw.rect(self.surface, (223 * darken, 163 * darken, 49 * darken), r)\n pygame.draw.rect(self.surface, (93, 216, 228), r, 1)", "def create_grid(self):\n row = 0\n col = 0\n for row in range(self._dim):\n for col in range(self._dim):\n x1 = col*self._cell_dim # bottom left\n y1 = row * self._cell_dim # top left\n x2 = x1 + self._cell_dim # bottom right\n y2 = y1 + self._cell_dim # top right\n self.rect[row,col] = self.canvas.create_rectangle(x1,y1,x2,y2, fill=self._primary_color, outline=self._grid_lines_color, tags=\"rect\")\n self.canvas.tag_bind(self.rect[row, col], '<ButtonPress-1>', self.change_cell)\n col = 0\n row += 1\n if self._dim < 50:\n button_size = int(80*(self._dim/50))\n font_size = int(22*(self._dim/50))\n else:\n button_size = 80\n font_size = 18\n x1 = col * self._cell_dim + (((self._dim*self._cell_dim) - button_size*3)//2)\n y1 = row * self._cell_dim + 5\n x2 = x1 + button_size\n y2 = y1 + 20\n self.canvas.create_oval(x1,y1,x2,y2, tags=\"toggle\", fill=self._primary_color)\n self.canvas.create_text(x1+(button_size//2), y1+10, tags=\"toggle-text\", fill=self._secondary_color, text=\"Start\", font=(\"Courier\", font_size))\n self.canvas.tag_bind(\"toggle\", '<ButtonPress-1>', self.toggle_refresh)\n self.canvas.tag_bind(\"toggle-text\", '<ButtonPress-1>', self.toggle_refresh)\n x1 = x2 + 5 # padding between buttons\n x2 = x1 + button_size\n self.canvas.create_oval(x1,y1,x2,y2, tags=\"next\", fill=self._primary_color)\n self.canvas.create_text(x1+(button_size//2), y1+10, tags=\"next-text\", fill=self._secondary_color, text=\"Next\", font=(\"Courier\", font_size))\n self.canvas.tag_bind(\"next\", '<ButtonPress-1>', self.one_step)\n self.canvas.tag_bind(\"next-text\", '<ButtonPress-1>', self.one_step)\n x1 = x2 + 5 # padding between buttons\n x2 = x1 + button_size\n self.canvas.create_oval(x1,y1,x2,y2, tags=\"clear\", fill=self._primary_color)\n self.canvas.create_text(x1+(button_size//2), y1+10, tags=\"clear-text\", fill=self._secondary_color, text=\"Clear\", font=(\"Courier\", font_size))\n self.canvas.tag_bind(\"clear\", '<ButtonPress-1>', self.clear_board)\n self.canvas.tag_bind(\"clear-text\", '<ButtonPress-1>', self.clear_board)\n self.model_refresh()", "def grid_04():\n plot = {\"Walls\": [\"N\", \"S\", \"W\"], \"TARDIS\": False, \"Transmat\": False,\n \"Plot\": f'\\nEerie blue lights lit the cold corridors. To the NORTH, SOUTH, and WEST are solid metal walls.\\n'}\n return plot", "def draw(self, canvas):\n canvas.draw_polygon([self._top_left_, self._top_right_, self._bot_right_, self._bot_left_],\n 3, \"red\")\n # draw_image(image, center_source, width_height_source, center_dest, width_height_dest, rotation=0)\n # print(\"self._tilemap_coord[0]\", self._tilemap_coord[0])\n # print(\"self._tilemap_coord[1]\", self._tilemap_coord[1])\n canvas.draw_image(\n # image\n PLATFORM_TILEMAP,\n # center_source\n [(self._tilemap_coord[0] + 0.5) * shooter_global_variables.TILE_DIM,\n (self._tilemap_coord[1] + 0.5) * shooter_global_variables.TILE_DIM],\n # width_height_source\n [shooter_global_variables.TILE_DIM, shooter_global_variables.TILE_DIM],\n # center_dest\n self._pos_,\n # width_height_dest\n PLATFORM_INFO.get_size())\n canvas.draw_text(str(round(self._pos_[1] / TILE_DIM - 1)) + \", \"\n + str(round(self._pos_[0] / TILE_DIM - 1)),\n [self._top_left_[0] + TILE_DIM / 3, self._pos_[1]], 20, \"white\")\n # draw tilemap here", "def gridDrawer(self):\n\n # Price of laying a cable underneath a house\n priceUnderHouse = 5000\n\n # Price of five batteries\n costBatteries = 25000\n\n print(\"drawing...\")\n\n # Initiate list for coordinates from houses and batteries\n xHouse = []\n yHouse = []\n xBattery = []\n yBattery = []\n\n # Fill lists with coordinates\n for house in self.houses:\n xHouse.append(house.xLocation)\n yHouse.append(house.yLocation)\n\n for battery in self.batteries:\n xBattery.append(battery.xLocation)\n yBattery.append(battery.yLocation)\n\n # Make square figure and draw axis and ticks\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n ax.set_aspect('equal')\n ax.set_xticks(np.arange(0, 51, 1), minor=True)\n ax.set_yticks(np.arange(0, 51, 1), minor=True)\n\n # Draw gridlines\n ax.grid(which='minor', alpha=0.2, linestyle='-')\n ax.grid(which='major', alpha=0.5, linestyle='-')\n\n # Draw connections from houses to batteries in grid,\n # give cable to battery its own color\n totalScore = 0\n colors = [\"firebrick\", \"g\", \"blue\", \"deeppink\", \"darkorange\"]\n\n for battery in self.batteries:\n\n # Make closedSet and set color for cable line\n closedSet = set()\n color = colors[battery.ID]\n\n for houseID in battery.connectedHouses:\n\n # Generate dijkstra path\n (cameFrom, score) = dijkstra.dijkstraSearch(battery, self,\n self.houses[houseID]\n .gridID,\n battery.gridID)\n totalScore += score[battery.gridID] - priceUnderHouse\n\n # Reconstruct the path\n path = dijkstra.reconstructPath(cameFrom,\n self.houses[houseID].gridID,\n battery.gridID)\n\n # Update the costs for the gridpoints\n for point in path:\n # Decrease cost if not 'free'\n if self.gridPoints[point].cost[battery.ID] != 0:\n self.gridPoints[point].cost[battery.ID] -= 9\n\n # Append route of cables to battery\n pathX = []\n pathY = []\n\n for ID in path:\n if ID in closedSet:\n pathX.append(self.gridPoints[ID].xLocation)\n pathY.append(self.gridPoints[ID].yLocation)\n closedSet.add(ID)\n break\n else:\n pathX.append(self.gridPoints[ID].xLocation)\n pathY.append(self.gridPoints[ID].yLocation)\n closedSet.add(ID)\n\n # Draw cables\n plt.plot(pathX, pathY, color, alpha=0.5)\n\n # Draw markers for houses and batteries\n plt.plot(xHouse, yHouse, \"k.\")\n plt.plot(xBattery, yBattery, marker=\"s\", color=\"blue\", ls='None')\n\n # Show battery ID next to battery markers\n for battery in self.batteries:\n ax.annotate(battery.ID, (xBattery[battery.ID],\n yBattery[battery.ID]))\n\n # Show graph and cost of smartGrid\n totalCost = totalScore + costBatteries\n plt.title(\"Cable cost: \" + str(totalScore) + \" Battery cost: 25000 \" +\n \"Total cost: \" + str(totalCost))\n plt.show()", "def render(self):\n for r in range(self.y_size):\n line = ''\n for c in range(self.x_size):\n glyph = self.MAP_GLYPH_TABLE[self.grid_data[r][c]]\n\n # overwrite with player\n if r == self.player_y and c == self.player_x:\n glyph = self.PLAYER_GLYPH_TABLE[self.player_heading]\n\n line += glyph\n print(line)\n\n print('\\n' * (20 - self.y_size))", "def visualize_M_gridworld(self, state=0):\n\n\t\tplt.subplot(221); plt.imshow(self.M[12,0,:].reshape(5,5)), plt.colorbar()\n\t\tplt.subplot(222); plt.imshow(self.M[12,1,:].reshape(5,5)), plt.colorbar()\n\t\tplt.subplot(223); plt.imshow(self.M[12,2,:].reshape(5,5)), plt.colorbar()\n\t\tplt.subplot(224); plt.imshow(self.M[12,3,:].reshape(5,5)), plt.colorbar()\n\t\tplt.show()", "def draw_board(self) -> None:\n # -> establishment of new dimensions for the canvas :\n side_size = self.side_size\n wide, high = side_size * self.n_col, side_size * self.n_row\n self.can.configure(width=wide, height=high)\n # Layout of the grid:\n self.can.delete(tkinter.ALL) # erasing of the past Layouts\n s = side_size\n for _ in range(self.n_row - 1): # horizontal lines\n self.can.create_line(0, s, wide, s, fill=\"white\")\n s += side_size\n s = side_size\n for _ in range(self.n_col - 1): # vertical lines\n self.can.create_line(s, 0, s, high, fill=\"white\")\n s += side_size\n # Layout of all the pawns,\n # white or black according to the state of the game :\n for row in range(self.n_row):\n for col in range(self.n_col):\n x1 = col * side_size + 3 # size of pawns =\n x2 = (col + 1) * side_size - 3 # size of the case - 10\n y1 = row * side_size + 3 #\n y2 = (row + 1) * side_size - 3\n color = self.color(row, col)\n self.can.create_oval(x1, y1, x2, y2, outline=\"grey\",\n width=1, fill=color)", "def draw(self):\n if (libt.map_is_in_fov(self.handler.fov_map, self.x, self.y) or \n self.handler.world.map[self.x][self.y].seen and self.visible_in_fog):\n libt.console_set_default_foreground(self.handler.game_map, self.colour)\n libt.console_put_char(self.handler.game_map, self.x, self.y, \n self.char, libt.BKGND_NONE)", "def create_grid(self):\n\n # If called when a grid already exists create a new grid\n if self.grid:\n self.grid = []\n\n grid_pen = QPen(QColor(215, 215, 215), 1)\n w = 10000\n h = 10000\n self.addLine(-10000, 0, 10000, 0, QPen(QColor(0, 0, 0), 2))\n self.addLine(0, -10000, 0, 10000, QPen(QColor(0, 0, 0), 2))\n\n w = int(w / self.grid_spacing) * self.grid_spacing\n h = int(h / self.grid_spacing) * self.grid_spacing\n for i in range(-w, w, self.grid_spacing):\n if i == 0:\n pass\n else:\n line = self.addLine(-w, i, w, i, grid_pen)\n line.setZValue(-1)\n self.grid.append(line)\n for i in range(-h, h, self.grid_spacing):\n if i == 0:\n pass\n else:\n line = self.addLine(i, -h, i, h, grid_pen)\n line.setZValue(-1)\n self.grid.append(line)\n\n self.grid_built = True", "def plot_cell_grid_partitioning(\n output, cellsize_lon=5.0, cellsize_lat=5.0, figsize=(12, 6)\n):\n mp.rcParams[\"font.size\"] = 10\n mp.rcParams[\"text.usetex\"] = True\n plt.figure(figsize=figsize, dpi=300)\n ax = plt.axes([0, 0, 1, 1])\n\n map = Basemap(\n projection=\"cyl\",\n llcrnrlat=-90,\n urcrnrlat=90,\n llcrnrlon=-180,\n urcrnrlon=180,\n ax=ax,\n )\n map.drawparallels(\n np.arange(-90, 90, cellsize_lat), labels=[1, 0, 0, 0], linewidth=0.5\n )\n map.drawmeridians(\n np.arange(-180, 180, cellsize_lon),\n labels=[0, 0, 0, 1],\n rotation=\"vertical\",\n linewidth=0.5,\n )\n # fill continents 'coral' (with zorder=0), color wet areas 'aqua'\n map.drawmapboundary(fill_color=\"aqua\")\n map.fillcontinents(color=\"0.6\", lake_color=\"aqua\")\n label_lats = np.arange(-90 + cellsize_lat / 2.0, 90, cellsize_lat)\n label_lons = np.arange(-180 + cellsize_lon / 2.0, 180, cellsize_lon)\n lons, lats = np.meshgrid(label_lons, label_lats)\n x, y = map(lons.flatten(), lats.flatten())\n cells = grids.lonlat2cell(\n lons.flatten(),\n lats.flatten(),\n cellsize_lon=cellsize_lon,\n cellsize_lat=cellsize_lat,\n )\n for xt, yt, cell in zip(x, y, cells):\n plt.text(\n xt,\n yt,\n \"{:}\".format(cell),\n fontsize=4,\n va=\"center\",\n ha=\"center\",\n weight=\"bold\",\n )\n plt.savefig(output, format=\"png\", dpi=300)\n plt.close()", "def fill_grid(self):\n\n for row_margin, row in enumerate(range(self.rows)):\n self.grid.append([])\n\n for col_margin, col in enumerate(range(self.cols)):\n x = col*self.cell_size + col_margin\n y = row*self.cell_size + row_margin\n\n rect = pygame.Rect(x, y, self.cell_size, self.cell_size)\n\n cell = Cell(row, col, rect)\n\n if row == 7 and col == 3:\n cell.root = True\n self.root = cell\n elif row == 7 and col == 16:\n cell.goal = True\n self.goal = cell\n\n self.grid[row].append(cell)" ]
[ "0.8058154", "0.78003556", "0.76907337", "0.76755744", "0.75905925", "0.7454775", "0.7441136", "0.73149663", "0.7275913", "0.7247664", "0.719331", "0.71902335", "0.7187785", "0.7140632", "0.71196425", "0.7098102", "0.70582795", "0.7006399", "0.70051944", "0.69672275", "0.69182676", "0.6899882", "0.68920386", "0.6861837", "0.6842023", "0.68086255", "0.67948663", "0.6789136", "0.6787156", "0.6775899", "0.67724144", "0.67581445", "0.67498916", "0.67384845", "0.67362934", "0.67267907", "0.67199695", "0.67199695", "0.6681585", "0.66703653", "0.66597265", "0.66560364", "0.664495", "0.6615883", "0.66139877", "0.6573092", "0.65436924", "0.65226686", "0.6500303", "0.64990866", "0.64845276", "0.64709044", "0.6462678", "0.6449755", "0.644613", "0.6435775", "0.6431246", "0.63609755", "0.6350466", "0.6340023", "0.6336068", "0.63350767", "0.6333591", "0.6331211", "0.6330468", "0.6318974", "0.63177806", "0.6314665", "0.63088125", "0.63045913", "0.62858623", "0.62701285", "0.62682605", "0.62578434", "0.6237693", "0.6228863", "0.6226704", "0.62259704", "0.6223338", "0.6218628", "0.6217202", "0.6217178", "0.6217178", "0.6215131", "0.62054884", "0.6191684", "0.61897933", "0.6180774", "0.6180404", "0.61770093", "0.6170683", "0.6169518", "0.6166292", "0.61634", "0.61607665", "0.6158926", "0.61582696", "0.61581475", "0.6154257", "0.61534107" ]
0.7732143
2
Draws the given map level by layering all the sprites.
def draw(self): pg.display.set_caption("{:.2f}".format(self.clock.get_fps())) if distance(self.player.pos, self.monster.pos)<MONSTER_BUBBLE_DISTANCE: now=pg.time.get_ticks() if self.fuzz: wait=NOISE_DURATION else: wait=NOISE_TIMESTEP #change to a function of distance to monster if now - self.last_update_noise>wait: self.last_update_noise=now if self.fuzz: self.map_img2=self.map_img else: self.map_img2=self.noisy_map_img #make static sound self.fuzz=not self.fuzz else: self.map_img2=self.map_img self.fuzz=False self.screen.blit(self.map_img2, self.camera.apply_rect(self.map_rect)) # Layer player and monsters on map for sprite in self.moving_sprites: self.screen.blit(sprite.image, self.camera.apply(sprite)) if self.draw_debug: pg.draw.rect(self.screen, LIGHTBLUE, self.camera.apply_rect(sprite.hit_rect), 1) if self.draw_debug: for wall in self.walls: pg.draw.rect(self.screen, LIGHTBLUE, self.camera.apply_rect(wall.rect), 1) for mirror in self.teleports: pg.draw.rect(self.screen, LIGHTBLUE, self.camera.apply_rect(mirror.rect), 1) for goal in self.win: pg.draw.rect(self.screen, LIGHTBLUE, self.camera.apply_rect(goal.rect), 1) dest=(self.monster.next_step[0]*TILESIZE, self.monster.next_step[1]*TILESIZE) next_step=pg.Rect(0, 0, 20, 20) next_step.center=dest pg.draw.rect(self.screen, LIGHTBLUE, self.camera.apply_rect(next_step), 1) for sprite in self.static_sprites: self.screen.blit(sprite.image, sprite.rect) pg.display.flip() #update the full display surface to the screen
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_level(self):\r\n self.level_surface.blit(self.map_image, self.viewport, self.viewport)\r\n self.level_surface.blit(self.title_box, self.title_rect)", "def render(self):\n\n self.baseMap.beginDraw()\n self.baseMap.background(255)\n self.baseMap.endDraw()\n\n numColumns = self.width / self.tileSize\n numRows = self.height / self.tileSize\n\n startX = floor(self.centerX - numColumns / 2.0)\n startY = floor(self.centerY - numRows / 2.0)\n\n endX = ceil(self.centerX + numColumns / 2.0)\n endY = ceil(self.centerY + numRows / 2.0)\n\n self.offsetX = -floor((self.centerX - floor(self.centerX)) * self.tileSize) + \\\n floor(self.width / 2.0) + \\\n floor(startX - floor(self.centerX)) * self.tileSize\n self.offsetY = -floor((self.centerY - floor(self.centerY)) * self.tileSize) + \\\n floor(self.height / 2.0) + \\\n floor(startY - floor(self.centerY)) * self.tileSize\n\n def onTileLoaded(tile, meta):\n self.baseMap.beginDraw()\n x = meta['destX']\n y = meta['destY']\n self.baseMap.image(tile, x, y)\n self.baseMap.endDraw()\n\n for x in xrange(startX, endX):\n for y in xrange(startY, endY):\n # Interpolate the URL for this particular tile.\n # 12/1208/1541.png\n url = self.url % (self.zoom, x, y)\n\n # Compute the x and y coordinates for where this tile will go on the map.\n destX = (x - startX) * self.tileSize + self.offsetX\n destY = (y - startY) * self.tileSize + self.offsetY\n\n # Attempts to load all the images lazily.\n meta = {\n 'url' : url,\n 'destX' : destX,\n 'destY' : destY,\n 'x' : x,\n 'y' : y,\n }\n self.lazyImageManager.addLazyImage(url, onTileLoaded, meta)\n\n # Kick off all the layer rendering.\n for layer in self.layers:\n layer.render()\n\n for marker in self.markers:\n marker.draw()", "def drawMap(self):\n for position, contain in self.map.items():\n if contain is \"block\":\n self.blocks.add(Block(position[1]*50,position[0]*50))\n elif contain is \"Coins\":\n self.Coins.add(Coins(position[1]*50+10,position[0]*50+10))", "def create_visualization(levelname, leveldirectory, spritesdirectory):\r\n\t#Load sprites\r\n\tsprites = {}\r\n\tfor filename in glob.glob(f\"{spritesdirectory}/**/*.png\", recursive=True):\r\n\t\tim = Image.open(filename)\r\n\t\tname = filename.split(\"/\")[-1][:-4]\r\n\t\tsprites[name] = im.convert(\"RGBA\")\r\n\r\n\tlevel = {}\r\n\twith open(f\"{leveldirectory}/{levelname}.txt\") as fp:\r\n\t\tfor y, line in enumerate(fp):\r\n\t\t\tlevel[y] = line[:-1]\r\n\t\t\tprint(f\"{y}:\")\r\n\t\t\tprint(line)\r\n\r\n\tmaxX = len(level[0])\r\n\tmaxY = y+1\r\n\tprint(f\"Max y is {y}\")\r\n\r\n\r\n\r\n\t#Create backdrop of tiled plains sprites to which to write actual sprites\r\n\tdef createTiledPlainsImage():\r\n\t\timage = Image.new(\"RGB\", (maxX*16, (maxY)*16), color=(91, 153, 254))\r\n\t\tpixels = image.load()\r\n\r\n\t\timageToUse = sprites[Tile.reverse_lookup[\"P\"].filename]\r\n\t\tpixelsToUse = imageToUse.load()\r\n\t\tfor y in range(0, maxY):\r\n\t\t\tfor x in range(0, maxX):\r\n\t\t\t\tfor x2 in range(0, 16):\r\n\t\t\t\t\tfor y2 in range(0, 16):\r\n\t\t\t\t\t\tpixels[x*16+x2,y*16+y2] = pixelsToUse[x2,y2][:-1]\r\n\t\treturn image, pixels\r\n\r\n\timage, pixels = createTiledPlainsImage()\r\n\r\n\t#Draw the actual building/terrain sprites to the image\r\n\tfor y in range(0, maxY):\r\n\t\tfor x in range(0, maxX):\r\n\t\t\timageToUse = None\r\n\t\t\tprint(y)\r\n\t\t\tprint(maxY)\r\n\t\t\tprint(levelname)\r\n\t\t\tprint(f\"{x}, {y}\")\r\n\t\t\tif level[y][x] in Tile.reverse_lookup.keys():\r\n\t\t\t\tprint(Tile.reverse_lookup[level[y][x]])\r\n\t\t\t\timageToUse = sprites[Tile.reverse_lookup[level[y][x]].filename]\r\n\t\t\tif not imageToUse == None:\r\n\t\t\t\tpixelsToUse = imageToUse.load()\r\n\t\t\t\tx2max = imageToUse.size[0]\r\n\t\t\t\ty2max = imageToUse.size[1]\r\n\t\t\t\tfor x2 in range(0, x2max):\r\n\t\t\t\t\tfor y2 in range(0, y2max):\r\n\t\t\t\t\t\tif pixelsToUse[x2,y2][3]>0:\r\n\t\t\t\t\t\t\tupwardoffset = y2max-16\r\n\t\t\t\t\t\t\tywritepixel = y*16+y2-upwardoffset if y*16+y2-upwardoffset>=0 else y*16+y2\r\n\t\t\t\t\t\t\t#print(ywritepixel)\r\n\t\t\t\t\t\t\t#ywritepixel=y*16+y2\r\n\t\t\t\t\t\t\tpixels[x*16+x2,ywritepixel] = pixelsToUse[x2,y2][:-1]\r\n\r\n\t#save the resulting level image\r\n\tabsleveldir = os.path.abspath(f\"{leveldirectory}\")\r\n\tprint(leveldirectory)\r\n\tprint(absleveldir)\r\n\timage.save(rf\"{absleveldir}/{levelname}.png\",\"PNG\")", "def draw_layers(self):\n\t\tfor z in xrange(0,16):\n\t\t\t#create surface for this layer\n\t\t\tsrf = pygame.Surface((16,128))\n\t\t\tfor x in xrange(0,16):\n\t\t\t\tfor y in xrange(0,128):\n\t\t\t\t\tv = self.data[ self.xyz_to_offset( x,y,z) ]\n\t\t\t\t\tif v != 0:\n\t\t\t\t\t\tsrf.fill( BLOCKS.get(v, [0,0])[1], \t(x, 127 -y, 1, 1 ))\n\t\t\t#save layer to dict for this chunk\n\t\t\tself.layers[z] = srf", "def draw(self):\n\n self.updateLazyImageLoading()\n\n image(self.baseMap, 0, 0)\n\n for layer in self.layers:\n layer.draw()\n\n for marker in self.markers:\n marker.draw()", "def render_map(self, surface):\n\n # fill the background color of our render surface\n if self.tmx_data.background_color:\n surface.fill(pygame.Color(self.tmx_data.background_color))\n\n # iterate over all the visible layers, then draw them\n for layer in self.tmx_data.visible_layers:\n if isinstance(layer, TiledTileLayer):\n self.render_tile_layer(surface, layer)\n\n elif isinstance(layer, TiledObjectGroup):\n self.render_object_layer(surface, layer)\n\n elif isinstance(layer, TiledImageLayer):\n self.render_image_layer(surface, layer)", "def draw(self, layer: Layer) -> None:\r\n if layer and layer.layer_index >= self.num_layers:\r\n return\r\n\r\n pyxel.bltm(layer.offset.x, layer.offset.y, self.tilemap_id + layer.layer_index,\r\n self.rect_uv.x, self.rect_uv.y, self.rect_uv.w, self.rect_uv.h,\r\n colkey=layer.transparency_color)", "def draw_map(self):\n self.vis.draw_map()", "def draw_maps(m, y, x):\r\n original_x = x # Saving the orignal value of x coordinate\r\n \r\n # Loop through the map\r\n for j in m: \r\n y += 10 # Increase y coordinate with 10\r\n x = original_x # Reset x coordinate\r\n if isinstance(j, list):\r\n # Draw a color depending on value in map\r\n for i in j: \r\n if i == 0:\r\n pygame.draw.rect(display, green, (x, y, 10, 10))\r\n x += 10\r\n elif i == 1:\r\n pygame.draw.rect(display, grey, (x, y, 10, 10))\r\n x += 10\r\n elif i == 2:\r\n pygame.draw.rect(display, brown, (x, y, 10 , 10))\r\n x += 10\r\n elif i == 3:\r\n pygame.draw.rect(display, white, (x, y, 10, 10))\r\n x += 10", "def draw(self, verbosity=0):\n\n # Calculate overall scale and position of the map\n self.update_bounds()\n # Draw the dungeon background (everything behind the grid)\n self.draw_background(verbosity)\n # Draw the grid\n self.draw_grid(verbosity)\n # Draw the dungeon foreground (everything in front of the grid)\n self.draw_foreground(verbosity)\n\n pygame.display.flip()", "def _draw_map(screen):\n my_map = HexMap(80, 80, _hex_size=10)\n my_map.generate_with_random_walk(150, iterations=25)\n for tile in my_map:\n # print(tile)\n color = COLORS[tile.type]\n\n tile_color = _modify_color(color)\n pygame.draw.polygon(screen, tile_color, tile.corners)\n return my_map", "def drawMap(mapObj, gameStateObj, goals, screen):\n \n # mapSurf will be the single Surface object that the tiles are drawn\n # on, so that it is easy to position the entire map on the DISPLAYSURF\n # Surface object. First, the width and height must be calculated.\n # mapWidth = len(mapObj) * TILEWIDTH\n # mapSurfHeight = (len(mapObj[0]) - 1) * TILEFLOORHEIGHT + TILEHEIGHT\n # mapSurf = pygame.Surface((mapSurfWidth, mapSurfHeight))\n # mapSurf.fill(BGCOLOR) # start with a blank color on the surface.\n \n for i in xrange(len(tiles)):\n tiles[i].hideturtle()\n \n debugprint(\"drawing map\")\n \n nxtiles = len(mapObj)\n nytiles = len(mapObj[0])\n \n xoffset = TILEWIDTH/2 + TILEWIDTH\n yoffset = WINHEIGHT - TILEHEIGHT/2 - TILEWIDTH\n \n tileCount = 0;\n \n def updateTile(screen, xpos, ypos, shape):\n global tiles\n \n if tileCount >= len(tiles):\n tiles.append(Tile(screen, xpos, ypos, shape))\n else:\n tiles[tileCount].goto(xpos, ypos)\n tiles[tileCount].shape(shape)\n tiles[tileCount].showturtle()\n\n return tileCount + 1\n \n # screen.tracer(1)\n # # Draw the tile sprites onto this surface.\n for x in range(nxtiles):\n for y in range(nytiles):\n xpos = x*TILEWIDTH + xoffset\n ypos = yoffset - y*40\n \n if mapObj[x][y] in TILEMAPPING:\n baseTile = TILEMAPPING[mapObj[x][y]]\n elif mapObj[x][y] in OUTSIDEDECOMAPPING:\n baseTile = TILEMAPPING[' ']\n\n # First draw the base ground/wall tile.\n tileCount = updateTile(screen, xpos, ypos, baseTile)\n # debugprint(xpos)\n # debugprint(ypos)\n if mapObj[x][y] in OUTSIDEDECOMAPPING:\n # Draw any tree/rock decorations that are on this tile.\n tileCount = updateTile(screen,xpos,ypos,OUTSIDEDECOMAPPING[mapObj[x][y]])\n elif (x, y) in gameStateObj['stars']:\n if (x, y) in goals:\n # A goal AND star are on this space, draw goal first.\n tileCount = updateTile(screen,xpos,ypos,IMAGESDICT['covered goal'])\n # Then draw the star sprite.\n tileCount = updateTile(screen,xpos,ypos,IMAGESDICT['star'])\n elif (x, y) in goals:\n # Draw a goal without a star on it.\n tileCount = updateTile(screen,xpos,ypos,IMAGESDICT['uncovered goal'])\n\n # Last draw the player on the board.\n if (x, y) == gameStateObj['player']:\n # Note: The value \"player_image\" refers\n # to a key in \"PLAYERIMAGES\" which has the\n # specific player image we want to show.\n tileCount = updateTile(screen,xpos,ypos,PLAYERIMAGES[game_state[\"player_image\"]])\n debugprint(PLAYERIMAGES[game_state[\"player_image\"]])", "def pickMap(self, selectedmap):\r\n for lvl in self.maplevels:\r\n lvl.style.border_color = (255,255,255)\r\n self.selectedmap = selectedmap;\r\n self.maplevels[selectedmap].style.border_color = (0,0,0)\r\n self.repaint()\r\n self.slevel.mapimage.value = pygame.image.load(\"../media/map\" + str(self.selectedmap) +\".png\")\r\n self.slevel.repaint()", "def LoadSprites(self):\n \"\"\"calculate the center point offset\"\"\"\n x_offset = (BLOCK_SIZE/2)\n y_offset = (BLOCK_SIZE/2)\n\n\n \"\"\"Load the level\"\"\"\n level1 = level001.level()\n layout = level1.getLayout()\n img_list = level1.getSprites()\n\n\n\t\t# > The pellet sprites are grouped\n\t\t# > The block sprites are grouped\n\t\t# > The Wall sprites are grouped\n\t\t# > This is an example of a style of Object Oriented Programming, assigning\n\t\t# > The groups of items as a bluprint of one object to save typing the\n\t\t# > same code over and over.\n self.pellet_sprites = pygame.sprite.Group()\n self.block_sprites = pygame.sprite.Group()\n self.gwall_sprites = pygame.sprite.Group()\n\n for y in xrange(len(layout)):\n for x in xrange(len(layout[y])):\n \"\"\"Get the center point for the rects\"\"\"\n centerPoint = [(x*BLOCK_SIZE)+x_offset,(y*BLOCK_SIZE+y_offset)]\n if layout[y][x]==level1.BLOCK:\n self.block_sprites.add(basicSprite.Sprite(centerPoint, img_list[level1.BLOCK]))\n elif layout[y][x]==level1.GWALL:\n self.gwall_sprites.add(basicSprite.Sprite(centerPoint, img_list[level1.GWALL]))\n elif layout[y][x]==level1.SNAKE:\n self.snake = Snake(centerPoint,img_list[level1.SNAKE])\n elif layout[y][x]==level1.PELLET:\n self.pellet_sprites.add(basicSprite.Sprite(centerPoint, img_list[level1.PELLET]))\n elif layout[y][x]==level1.GHOST:\n self.ghost = Ghost(centerPoint,img_list[level1.GHOST])\n elif layout[y][x]==level1.GHOST2:\n self.ghost2 = Ghost(centerPoint,img_list[level1.GHOST2])\n elif layout[y][x]==level1.GHOST3:\n self.ghost3 = Ghost(centerPoint,img_list[level1.GHOST3])\n elif layout[y][x]==level1.GHOST4:\n self.ghost4 = Ghost(centerPoint,img_list[level1.GHOST4])\n \"\"\"Create the Snake group\"\"\"\n self.snake_sprites = pygame.sprite.RenderPlain((self.snake))\n\tself.ghost_sprites = pygame.sprite.RenderPlain((self.ghost))\n\tself.ghost2_sprites = pygame.sprite.RenderPlain((self.ghost2))\n\tself.ghost3_sprites = pygame.sprite.RenderPlain((self.ghost3))\n\tself.ghost4_sprites = pygame.sprite.RenderPlain((self.ghost4))", "def draw(self):\n \n # Draw the background\n self.world.fill(BLUE)\n \n # Draw all the sprite lists that we have\n self.wall_list.draw(self.world)\n self.enemy_list.draw(self.world)\n self.sludge.draw(self.world)\n self.consumeable.draw(self.world)\n self.can_climb.draw(self.world)", "def draw_level(self, surface):\n surface.blit(self.background, (0, 0))\n surface.blit(self.player.image, self.player.rect)\n surface.blit(self.message_box.image, self.message_box.rect)\n surface.blit(self.arrow.image, self.arrow.rect)\n surface.blit(self.transition_surface, (0, 0))", "def drawTilesLayers(self, dc=None, clear=False):\n\n # if no given DC, get client DC\n if dc is None:\n dc = wx.ClientDC(self)\n\n # if map smaller than view, clear background as it will show\n if clear:\n dc.Clear()\n\n # figure out how to draw tiles\n if self.view_offset_x < 0:\n # centre in X\n start_x_tile = 0\n stop_x_tile = self.tiles.num_tiles_x\n x_pix = -self.view_offset_x\n else:\n x_offset = self.view_offset_x + self.move_dx\n start_x_tile = int(x_offset / self.tile_size_x)\n stop_x_tile = int((x_offset+self.view_width+self.tile_size_x-1)\n / self.tile_size_x)\n x_pix = start_x_tile*self.tile_size_y - x_offset\n\n if self.view_offset_y < 0:\n # centre in Y\n start_y_tile = 0\n stop_y_tile = self.tiles.num_tiles_y\n y_pix_start = -self.view_offset_y\n else:\n y_offset = self.view_offset_y + self.move_dy\n start_y_tile = int(y_offset / self.tile_size_y)\n stop_y_tile = int((y_offset+self.view_height+self.tile_size_y-1)\n / self.tile_size_y)\n y_pix_start = start_y_tile*self.tile_size_y - y_offset\n\n # start pasting tiles onto view\n for x in range(start_x_tile, stop_x_tile):\n y_pix = y_pix_start\n for y in range(start_y_tile, stop_y_tile):\n dc.DrawBitmap(self.tiles.get_tile(x, y), x_pix, y_pix, False)\n y_pix += self.tile_size_y\n x_pix += self.tile_size_x\n\n # draw layers\n for id in self.layer_z_order:\n l = self.layer_mapping[id]\n if l.visible:\n l.painter(dc, l.data, map_rel=l.map_relative,\n colour=l.colour, size=l.size, filled=l.filled,\n attributes=l.attributes)\n\n # draw selection rectangle, if any\n if self.sbox_1_x:\n penclr = wx.Colour(0, 0, 255, 255)\n dc.SetPen(wx.Pen(penclr, width=1))\n brushclr = wx.Colour(0, 0, 0, 0)\n dc.SetBrush(wx.Brush(brushclr, style=wx.TRANSPARENT))\n dc.DrawRectangle(self.sbox_1_x, self.sbox_1_y,\n self.sbox_w, self.sbox_h)", "def draw(self, screen):\n for branch_points in self.branches:\n pygame.draw.polygon(screen, self.branch_color, branch_points)\n for bottom_points in self.bottom:\n pygame.draw.polygon(screen, self.bottom_color, bottom_points)", "def create_map(self) -> pygame.sprite.Sprite:\n topleft = 50, 50\n bottomright = 500, 300\n f = TestFloor(topleft, bottomright, s.BROWN)\n\n p0 = Vec2d(topleft)\n p1 = p0 + Vec2d(bottomright)\n self.level_borders_ids.update(\n LevelBorders(s.flip_y(p0), s.flip_y(p1),\n space=self.main_loop.space,\n d=s.LEVEL_BORDERS_THICKNESS).get_ids\n )\n\n return f", "def draw_terrain(self, map_pos, screen_pos):\n if map_pos in MAP:\n num = self.terrain_data[map_pos.y][map_pos.x]\n else:\n num = -1\n if num == -1:\n # Outside the map\n terminal.puts(screen_pos.x, screen_pos.y, ' ')\n elif num < -0.2:\n # Water\n terminal.color(terminal.color_from_name('blue'))\n terminal.puts(screen_pos.x, screen_pos.y, '~')\n elif num < 0.5:\n # Grass\n terminal.color(terminal.color_from_name('green'))\n terminal.puts(screen_pos.x, screen_pos.y, '.')\n else:\n # Mountains\n terminal.color(terminal.color_from_name('gray'))\n terminal.puts(screen_pos.x, screen_pos.y, '^')", "def draw(self, DISP, life_counter:int, level:int):\r\n assert self.is_init, 'Call first Game_Field.init() before draw game!'\r\n y_count,x_count = 3, 0\r\n start_maze = 0, 0\r\n \r\n DISP.fill(Colors.colors['BLACK'])\r\n # Maze get blit on the Screen of the game\r\n DISP.blit(self.maze, start_maze) \r\n # Draw the numer of Pac-Mans's life\r\n self.draw_pacman_life(life_counter, DISP) \r\n # Draw the actual level on the screen\r\n self.draw_level(DISP, level)\r\n for y in self.look_up_table[3 : -2]: #< y is a list of one row from the maze\r\n for x in y: #< x is a string that is decoded as already explained\r\n pos = [self.grid_size * x_count, self.grid_size * y_count]\r\n # Set reference position in the middle of one square\r\n pos[0] += self.grid_size // 2\r\n pos[1] += self.grid_size // 2\r\n x_count += 1\r\n # Check if x is a Dot or an Energizer\r\n if x != None and (x[0] == 'p' or x == 'e'):\r\n radius = 6\r\n if x == 'e':\r\n radius = self.grid_size // 2 - 4\r\n pg.draw.circle(DISP, Colors.colors['POINTS'], tuple(pos), radius)\r\n elif x[0] == 'p':\r\n pg.draw.rect(DISP, Colors.colors['POINTS'], ((pos[0] - radius // 2, pos[1] - radius // 2), (radius, radius)))\r\n \r\n \r\n y_count += 1\r\n x_count = 0", "def curses_print_map(self):\n map_window = self.stdscreen.subwin(5,5)\n map_keypad = map_window.keypad(1)\n map_panel = panel.new_panel(map_window)\n\n map_panel.update_panels()\n map_panel.top()\n map_panel.show()\n map_window.clear()\n\n x = 0; y=0; z=0\n\n # Print map phase\n draw_map(self,[x,y,z])\n\n def draw_map(game,loc):\n grid = game.world.grid\n\n z = loc[2] # Load the current floor (z)\n\n for x in range(game.conf.x_dim):\n for y in range(game.conf.y_dim):\n # Draw a map here!\n pass", "def draw(self):\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.high_score_image, self.high_score_rect)\n self.screen.blit(self.level_image, self.level_rect)\n self.ships.draw(self.screen)", "def map_blit(self):\n for l in self.blocks:\n for b in range(len(l)):\n l[b].blit()", "def updateScreenTiling(self,level):\n\n self.tile_list=[]\n self.objList=[]\n self.level=level\n\n self.rowCount=0\n \n for row in worldData[self.level]:\n self.colCount=0\n for tile in row:\n if tile!=0:\n img11=self.tilType[tile-1]\n img=pygame.transform.scale(img11,(self.tileSize,self.tileSize))\n img_rect = img.get_rect()\n img_rect.x = self.colCount * self.tileSize\n img_rect.y = self.rowCount * self.tileSize\n tile= (img, img_rect)\n self.tile_list.append(tile)\n self.colCount+=1\n self.rowCount+=1\n \n self.rowCount=0\n for row in objectData[self.level]:\n self.colCount=0\n for tile in row:\n if tile!=0:\n img11=self.objType[tile-1]\n img=pygame.transform.scale(img11,(self.tileSize,self.tileSize))\n img_rect = img.get_rect()\n img_rect.x = self.colCount * self.tileSize\n img_rect.y = self.rowCount * self.tileSize\n tile= (img, img_rect)\n self.objList.append(tile)\n self.colCount+=1\n self.rowCount+=1", "def draw(self, g):\n #declare a temporary surface to apply transformations to\n temp = pygame.Surface( (self.rect.width, self.rect.height) ).convert()\n #set up transparency\n temp.fill( (255, 255, 0) )\n temp.set_colorkey( (255, 255, 0) )\n #draw img to temp\n temp.blit(self.img, pygame.Rect(0, 0, self.rect.width, self.rect.height) )\n #calculate offset\n offset = [-1 *( g.focus[0] - g.view[0] / 2 ), -1 * ( g.focus[1] - g.view[1] / 2 ) ]\n #zoom logic\n temp = pygame.transform.scale(temp, ( (int)(self.rect.width * g.zoom), (int)(self.rect.height * g.zoom) ))\n #draw to the game screen\n g.screen.blit( temp, pygame.Rect( (int)(self.x * g.zoom) + offset[0], (int)(self.y * g.zoom) + offset[1], \\\n (int)(self.rect.width * g.zoom), (int)(self.rect.height * g.zoom) ) )\n \n for bullet in self.projectiles:\n bullet.draw(g)\n #g.screen.blit(self.img, pygame.Rect(self.x, self.y, self.rect.width, self.rect.height), pygame.Rect(0, 0, self.rect.width, self.rect.height) )", "def setup(self, level):\r\n\r\n # Used to keep track of our scrolling\r\n self.view_bottom = 0\r\n self.view_left = 0\r\n\r\n # Keep track of the score\r\n self.score = 0\r\n\r\n # Keep track of lives\r\n # self.lives = 5\r\n\r\n # Create the Sprite lists\r\n self.player_list = arcade.SpriteList()\r\n self.foreground_list = arcade.SpriteList()\r\n self.background_list = arcade.SpriteList()\r\n self.wall_list = arcade.SpriteList()\r\n self.coin_list = arcade.SpriteList()\r\n\r\n # Set up the player, specifically placing it at these coordinates.\r\n image_source = \"images/Alice/Alice7_front.png\"\r\n self.player_sprite = arcade.Sprite(image_source, CHARACTER_SCALING)\r\n self.player_sprite.center_x = PLAYER_START_X\r\n self.player_sprite.center_y = PLAYER_START_Y\r\n self.player_list.append(self.player_sprite)\r\n\r\n # --- Load in a map from the tiled editor ---\r\n\r\n # Name of the layer in the file that has our platforms/walls\r\n platforms_layer_name = 'Platforms'\r\n moving_platforms_layer_name = 'Moving Platforms'\r\n # Name of the layer that has items for pick-up\r\n coins_layer_name = 'Coins'\r\n # Name of the layer that has items for foreground\r\n foreground_layer_name = 'Foreground'\r\n # Name of the layer that has items for background\r\n background_layer_name = 'Background'\r\n # Name of the layer that has items we shouldn't touch\r\n dont_touch_layer_name = \"Don't Touch\"\r\n\r\n # Map name\r\n map_name = f\"map4_level_{level}.tmx\"\r\n\r\n # Read in the tiled map\r\n my_map = arcade.tilemap.read_tmx(map_name)\r\n\r\n # Calculate the right edge of the my_map in pixels\r\n self.end_of_map = my_map.map_size.width * GRID_PIXEL_SIZE\r\n\r\n # -- Background\r\n self.background_list = arcade.tilemap.process_layer(my_map,\r\n background_layer_name,\r\n TILE_SCALING)\r\n\r\n # -- Foreground\r\n self.foreground_list = arcade.tilemap.process_layer(my_map,\r\n foreground_layer_name,\r\n TILE_SCALING)\r\n\r\n # -- Platforms\r\n self.wall_list = arcade.tilemap.process_layer(map_object=my_map,\r\n layer_name=platforms_layer_name,\r\n scaling=TILE_SCALING,\r\n use_spatial_hash=True)\r\n # -- Moving Platforms\r\n moving_platforms_list = arcade.tilemap.process_layer(my_map, moving_platforms_layer_name, TILE_SCALING)\r\n for sprite in moving_platforms_list:\r\n self.wall_list.append(sprite)\r\n\r\n # -- Coins\r\n self.coin_list = arcade.tilemap.process_layer(my_map,\r\n coins_layer_name,\r\n TILE_SCALING,\r\n use_spatial_hash=True)\r\n\r\n # -- Don't Touch Layer\r\n self.dont_touch_list = arcade.tilemap.process_layer(my_map,\r\n dont_touch_layer_name,\r\n TILE_SCALING,\r\n use_spatial_hash=True)\r\n\r\n # --- Other stuff\r\n # Set the background color\r\n if my_map.background_color:\r\n arcade.set_background_color(my_map.background_color)\r\n\r\n # Create the 'physics engine'\r\n self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite,\r\n self.wall_list,\r\n GRAVITY)", "def draw_components(drawer, center, scale, tile):\n # Draw any animal territories on the tile\n if tile.animal is not None:\n animal_color = {\"bear\": \"black\", \"cougar\": \"red\"}[tile.animal]\n draw_poly(\n drawer,\n center=center,\n sides=6,\n scale=int(0.8 * scale),\n border=COLORS[animal_color],\n )\n\n # Draw any structure on the tile\n if tile.structure is not None:\n struct, color = tile.structure\n border = \"white\" if color == \"black\" else \"black\"\n\n if struct == \"shack\":\n struct_center = [center[0], center[1] + 0.45 * scale]\n draw_poly(\n drawer,\n center=struct_center,\n sides=3,\n scale=int(0.5 * scale),\n color=COLORS[color],\n border=COLORS[border],\n rotation=cmath.pi / 6,\n )\n else:\n struct_center = [center[0], center[1] + 0.35 * scale]\n draw_poly(\n drawer,\n center=struct_center,\n sides=8,\n scale=int(0.4 * scale),\n color=COLORS[color],\n border=COLORS[border],\n rotation=cmath.pi / 8,\n )\n\n player_count = len(tile.players) - tile.players.count(None)\n if player_count:\n draw_width = 0.4 * scale * (player_count - 1)\n used_count = 0\n for player, truth in enumerate(tile.players):\n if truth is not None:\n x, y = center\n x += used_count * 0.4 * scale - draw_width / 2\n y -= 0.15 * scale\n\n used_count += 1\n\n if truth:\n draw_poly(\n drawer,\n center=(x, y),\n sides=20,\n scale=int(0.3 * scale),\n color=COLORS[f\"p{player + 1}\"],\n # border=COLORS['black'],\n )\n else:\n draw_poly(\n drawer,\n center=(x, y),\n sides=4,\n scale=int(0.3 * scale),\n color=COLORS[f\"p{player + 1}\"],\n # border=COLORS['black'],\n rotation=cmath.pi / 4,\n )", "def generate(self, level):\n # TODO The dungeon's instances are spawned and loaded here.\n # fill map with \"blocked\" tiles\n level.maze = [[Tile(x, y, True) for y in range(level.height)] for x in range(level.width)]\n\n for r in range(level.max_rooms):\n # random width and height\n w = random.randint(level.min_room_size, level.max_room_size)\n h = random.randint(level.min_room_size, level.max_room_size)\n\n # random position without going out of the boundaries of the map\n x = random.randint(0, level.width - w - 1)\n y = random.randint(0, level.height - h - 1)\n\n # \"DungeonRoom\" class makes rectangles easier to work with\n new_room = Room(x, y, w, h)\n level.rooms.append(new_room)\n\n # run through the other rooms and see if they intersect with this one\n failed = False\n for other_room in level.rooms:\n if other_room is not new_room and new_room.intersect(other_room):\n failed = True\n break\n\n if not failed:\n # this means there are no intersections, so this room is valid\n\n # \"paint\" it to the map's tiles\n self._create_room(level, new_room)\n\n # center coordinates of new room, will be useful later\n new_x, new_y = new_room.center()\n\n if level.num_rooms > 0:\n # connect it to the previous room with a tunnel\n # center coordinates of previous room\n (prev_x, prev_y) = level.rooms[level.num_rooms - 1].center()\n\n # draw a coin (random number that is either 0 or 1)\n if random.randint(0, 1) == 1:\n # first move horizontally, then vertically\n self._create_h_tunnel(level, prev_x, new_x, prev_y)\n self._create_v_tunnel(level, prev_y, new_y, new_x)\n else:\n # first move vertically, then horizontally\n self._create_v_tunnel(level, prev_y, new_y, prev_x)\n self._create_h_tunnel(level, prev_x, new_x, new_y)\n\n # finally, append the new room to the list\n level.rooms.append(new_room)\n level.num_rooms += 1\n\n # connect them with a tunnel\n self._create_h_tunnel(level, 25, 55, 23)", "def draw(self, canvas):\n canvas.draw_polygon([self._top_left_, self._top_right_, self._bot_right_, self._bot_left_],\n 3, \"red\")\n # draw_image(image, center_source, width_height_source, center_dest, width_height_dest, rotation=0)\n # print(\"self._tilemap_coord[0]\", self._tilemap_coord[0])\n # print(\"self._tilemap_coord[1]\", self._tilemap_coord[1])\n canvas.draw_image(\n # image\n PLATFORM_TILEMAP,\n # center_source\n [(self._tilemap_coord[0] + 0.5) * shooter_global_variables.TILE_DIM,\n (self._tilemap_coord[1] + 0.5) * shooter_global_variables.TILE_DIM],\n # width_height_source\n [shooter_global_variables.TILE_DIM, shooter_global_variables.TILE_DIM],\n # center_dest\n self._pos_,\n # width_height_dest\n PLATFORM_INFO.get_size())\n canvas.draw_text(str(round(self._pos_[1] / TILE_DIM - 1)) + \", \"\n + str(round(self._pos_[0] / TILE_DIM - 1)),\n [self._top_left_[0] + TILE_DIM / 3, self._pos_[1]], 20, \"white\")\n # draw tilemap here", "def drawMap(self, lmap):\n w = lmap.width\n h = lmap.height\n # set size of canvas and create bitmap of same size\n self.config(width=w, height=h, xscrollincrement=1, yscrollincrement=1)\n self.im = PhotoImage(width=w, height=h)\n # copy colors corresponding to lmap characters into bitmap and create on canvas\n for row in range(h):\n for col in range(w):\n if lmap.isKey((col, row)):\n color = 'green3'\n elif lmap.isDoor((col, row)):\n color = 'red'\n else:\n color = self.colorMap(lmap.getCell((col, row)))\n self.im.put(color, (col, row))\n self.original = self.create_image(0, 0, image=self.im, anchor=NW)", "def draw_level(self, DISP, level:int):\r\n windowsize = DISP.get_size()\r\n Level_Text_Obj = self.FontObj.render(\"LEVEL: \" + str(level), True, Colors.colors['WHITE'])\r\n Level_Text_rec = Level_Text_Obj.get_rect()\r\n Level_Text_rec.top = windowsize[1] - Level_Text_rec.height\r\n Level_Text_rec.left = windowsize[0] - Level_Text_rec.width\r\n DISP.blit(Level_Text_Obj, Level_Text_rec)", "def drawRow(gm, Row):\n for Column in range(MapSize):\n img = Images[gm.Grid[Column][Row][-1].Name]\n Screen.fill(Colors[\"white\"], \n [(TileMargin + TileWidth) * Column + TileMargin,\n (TileMargin + TileHeight) * Row + TileMargin,\n TileWidth, TileHeight])\n Screen.blit(img, \n ((TileMargin + TileWidth) * Column + TileMargin, \n (TileMargin + TileHeight) * Row + TileMargin))", "def draw(self, img, tile_img, tiles):\n rect = get_tile_rect(self.pos)\n rect = Rect([rect.x + self.anim_offset.x, rect.y + self.anim_offset.y, rect.w, rect.h])\n img.blit(tile_img, rect, tiles[self.tile])", "def onPaint(self, event):\n\n dc = wx.PaintDC(self)\n self.drawTilesLayers(dc)", "def render_map(self):\n # first we create a blank image, on which we will draw the base map\n width = self.image_size[0]\n height = self.image_size[1]\n # ex: size of the image 1080 height, 1920 width, 3 channels of colour\n base_map = np.zeros((height, width, 3), np.uint8)\n base_map[:, :] = self.background_color\n\n # we draw each shape of the dictionary on the blank image\n for shape_id in self.shape_dict_filt:\n shape = self.shape_dict_filt[shape_id]\n points = shape.points\n pts = np.array(points, np.int32)\n cv2.polylines(base_map, [pts], True, shape.color_line,\n shape.line_thick, cv2.LINE_AA)\n\n self.map_file = base_map", "def draw_grid(self, tile_img, tiles):\n #debug_print(\"drawing level\", data)\n img = Surface((self.xsize * SIZE, self.ysize * SIZE))\n for pos, char in self:\n rect = get_tile_rect(pos)\n img.blit(tile_img, rect, tiles[char])\n return img", "def paint(self, world):\n #camera = world.player.get_camera()\n\n for position, tile in world.tiles.items():\n x, y = position\n libtcod.console_set_default_foreground(self.console, tile.color)\n libtcod.console_put_char(self.console, x, y, tile.character, libtcod.BKGND_NONE)\n for position, entity in world.entities.items():\n x, y = position\n libtcod.console_set_default_foreground(self.console, entity.color)\n libtcod.console_put_char(self.console, x, y, entity.character,\n libtcod.BKGND_NONE)\n self._blit()\n self._flush()", "def draw(self, base, level):\n\n a = base.a\n b = base.b\n\n if level > 0:\n delta = base.b - base.a\n px = a.x + delta.x / 3\n py = a.y + delta.y / 3\n rx = a.x + 2 * delta.x / 3\n ry = a.y + 2 * delta.y / 3\n p = Point(px, py)\n r = Point(rx, ry)\n q = Point(rx, ry)\n q.rotate_deg(60, p)\n self.draw(Line(a,p), level-1)\n self.draw(Line(p,q), level-1)\n self.draw(Line(q,r), level-1)\n self.draw(Line(r,b), level-1)\n else:\n self.container.window.create_line(a.x, a.y, b.x, b.y)", "def draw(self,screen):\n for tile in self.tile_list:\n screen.blit(tile[0],tile[1])\n # pygame.draw.rect(screen,(255,255,255),tile[1],2)\n\n for tile in self.objList:\n screen.blit(tile[0],tile[1])\n # pygame.draw.rect(screen,(255,255,255),tile[1],2)\n # rectangle print for tiles", "def render_all(con, map_renderer, panel, entities, player, game_map, fov_map,\n fov_recompute, message_log, screen_width, screen_height, bar_width,\n panel_height, panel_y, mouse, colors, game_state):\n if fov_recompute:\n # Draw all the tiles in the game map\n for y in range(game_map.height):\n for x in range(game_map.width):\n tile = game_map.tiles[x][y]\n\n visible = libtcod.map_is_in_fov(fov_map, x, y)\n explored = tile.explored\n\n if visible:\n # print tile\n map_renderer.draw(x, y, tile, light=True)\n\n game_map.tiles[x][y].explored = True\n\n elif explored:\n map_renderer.draw(x, y, tile, light=False)\n\n entities_in_render_order = sorted(entities, key=lambda x: x.render_order.value)\n\n # Draw all entities in the list\n for entity in entities_in_render_order:\n map_renderer.draw_entity(entity, fov_map)\n\n libtcod.console_blit(con, 0, 0, screen_width, screen_height, 0, 0, 0)\n\n if game_state == GameStates.SHOW_INVENTORY:\n inventory_menu(con, 'Press the key next to an item to use it, '\n 'or Esc to cancel.\\n',\n player.inventory, 50, screen_width, screen_height)\n\n # clear panel\n libtcod.console_set_default_background(panel, colors['panel_back'])\n libtcod.console_clear(panel)\n\n # print the game_messages, one line at a time\n y = 1\n for message in message_log.messages:\n libtcod.console_set_default_foreground(panel, message.color)\n libtcod.console_print_ex(panel, message_log.x, y, libtcod.BKGND_NONE, libtcod.LEFT, message.text)\n y += 1\n\n # print the hp bar\n render_bar(panel, 1, 1, bar_width, 'HP', player.fighter.hp,\n player.fighter.max_hp,\n colors['hp_bar_front'],\n colors['hp_bar_back'],\n colors['hp_bar_foreground'])\n\n libtcod.console_set_default_foreground(panel, libtcod.light_gray)\n libtcod.console_print_ex(panel, 1, 0, libtcod.BKGND_NONE, libtcod.LEFT,\n get_names_under_mouse(mouse, entities, fov_map))\n\n libtcod.console_blit(panel, 0, 0, screen_width, panel_height, 0, 0, panel_y)", "def draw_world(self, world):\n\n # Clear screen\n self.window.fill(self.background_color)\n\n # Draw board\n for y in range(0, self.height):\n for x in range(0, self.width):\n c = BLACK\n\n blip_count = 0\n if world.map[y][x].blips:\n blip_count = len(world.map[y][x].blips)\n\n # Color code the blip's health %\n total_status = (0, 0, 0)\n\n for b in world.map[y][x].blips:\n status = b.get_status()\n total_status = tuple(map(sum, zip(total_status, status)))\n\n # Get average health in case of multiple blips\n hp = min(total_status)\n c = (255, 255 * hp / blip_count, 0)\n\n elif world.map[y][x].type == \"water\":\n c = WATER\n elif world.map[y][x].type == \"forest\":\n # Make sure the tile doesn't disappear completely\n fill_percent = max(world.map[y][x].value / params.FOOD_SIZE, 0.2)\n c = (0, 255 * fill_percent, 0)\n\n pygame.draw.rect(self.window, c, self.board[y][x], 0)\n\n # Add count for multiple blips in a tile\n if blip_count > 1:\n self.add_text(str(blip_count), BLACK, self.board[y][x].center, self.font)\n\n # Draw grid lines\n for i in range(0, self.height):\n screen_y = i * self.block_size\n pygame.draw.line(self.window, OUTLINE, (0, screen_y), (self.width * self.block_size, screen_y), 2)\n for i in range(0, self.width):\n screen_x = i * self.block_size\n pygame.draw.line(self.window, OUTLINE, (screen_x, 0), (screen_x, self.height * self.block_size), 2)\n\n # Write population count\n pos = (self.width * self.block_size / 2, 20)\n self.add_text(str(len(world.blips.keys())), WHITE, pos, self.counter_font)\n\n # Render to screen\n pygame.display.flip()", "def renderLayer(name, z, x, y, ntiles, map, suffix = 'png', useCairo = False):\n console.debugMessage(' Rendering layer: ' + name)\n env = getMercTileEnv(z, x, y, ntiles, True)\n tilesize = getTileSize(ntiles, True)\n map.zoom_to_box(env)\n if useCairo and USE_CAIRO:\n assert mapnik.has_cairo()\n surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, tilesize, tilesize)\n mapnik.render(map, surface)\n image = mapnik.Image.from_cairo(surface)\n else: \n image = mapnik.Image(tilesize, tilesize)\n mapnik.render(map, image)\n return image", "def draw(self):\n self.screen_surf.fill(BKGD_COLOUR)\n self.all_tiles.draw(self.screen_surf) # Tiles before other sprites.\n self.nests.draw(self.screen_surf) # Nests before chipmunks.\n self.chipmunks.draw(self.screen_surf)\n self.acorns.draw(self.screen_surf)\n self.screen_surf.blit(self.acorn_surf, self.acorn_surf.get_rect())\n self.screen_surf.blit(self.timer_surf, self.timer_rect)", "def print_level(self, list_level, window, begin, wall, end):\n\t\tfor y in range(0,15):\n\t\t\tfor x in range(0,15):\n\t\t\t\tif list_level[y][x] == 'd':\n\t\t\t\t\tposition_x = x * 30\n\t\t\t\t\tposition_y = y * 30\n\t\t\t\t\twindow.blit(begin, (position_x,position_y))\n\t\t\t\telif list_level[y][x] == 'm':\n\t\t\t\t\tposition_x = x * 30\n\t\t\t\t\tposition_y = y * 30\n\t\t\t\t\twindow.blit(wall, (position_x,position_y))\n\t\t\t\telif list_level[y][x] == 'a':\n\t\t\t\t\tposition_x = x * 30\n\t\t\t\t\tposition_y = y * 30\n\t\t\t\t\twindow.blit(end, (position_x,position_y))\n\t\t\t\telse: # it's a 0\n\t\t\t\t\tcontinue", "def draw(self, screen):\n \n # Draw the background\n screen.fill(CAVE)\n screen.blit(self.background,(self.world_shift // 3,0))\n \n # Draw all the sprite lists that we have\n self.platform_list.draw(screen)\n #self.enemy_list.draw(screen)\n self.enemy_list.draw(screen)", "def _draw_members(self, map, **kwargs):\n wcs_list = self._indv_mem_wcslist()\n\n for wcs in wcs_list:\n poly = SphericalPolygon.from_wcs(wcs)\n poly.draw(map, **kwargs)", "def drawMap(self,parentControl):\n if not self.Map:\n print \"Error: no map\"\n return\n \n rows=len(self.Map)\n columns=len(self.Map[0])\n \n listCells = []\n frameCheckerBoard=Frame(parentControl)\n for i in range(0,rows):\n listCells.append([])\n for j in range(0,columns):\n cellType = self.Map[i][j]\n if(cellType == \" \"):\n imgCell = self.createImageCell(frameCheckerBoard, (i, j),self.FloorImage)\n elif(cellType == \"^\"):\n imgCell = self.createImageCell(frameCheckerBoard, (i, j),self.WallImage)\n elif(cellType == \"~\"):\n imgCell = self.createImageCell(frameCheckerBoard, (i, j),self.WaterImage)\n elif(cellType == \"$\"):\n imgCell = self.createImageCell(frameCheckerBoard, (i, j),self.FloorImage)\n imgCell.changeForeground(self.CoinImage)\n elif(re.match(\"[a-zA-Z]\", cellType)):\n imgCell = self.createImageCell(frameCheckerBoard, (i, j),self.FloorImage)\n imgCell.changeForeground(self.PlayerImage)\n listCells[i].append(imgCell)\n frameCheckerBoard.grid(row = 0, column = 0, sticky = N+E+W+S)\n self.drawLegend(parentControl)\n parentControl.rowconfigure(0, weight = 1)\n parentControl.columnconfigure(0, weight = 1)\n \n for x in range(0,rows):\n frameCheckerBoard.rowconfigure(x, weight=1)\n \n for y in range(0,columns):\n frameCheckerBoard.columnconfigure(y, weight=1)\n \n return listCells", "def draw_tiles(self):\n db = self.double_buffer\n if db is not None:\n span_x = self.width\n span_y = self.height\n tiles_x = int(ceil(span_x/256.0))\n tiles_y = int(ceil(span_y/256.0))\n\n cc = cairo.Context(db)\n tiles = self.tile_loader.load_area(self.longitude,self.latitude,self.zoom,tiles_x,tiles_y)\n tile_number=0\n line_number=0\n\n x_center = self.width/2# - 128\n y_center = self.height/2# - 128\n offset_x,offset_y = self.tile_loader.gmap_tile_xy_from_coord(self.longitude,self.latitude,self.zoom)\n\n\n xtiles = len(tiles[0])\n ytiles = len(tiles)\n #print len(tiles),len(tiles[0])\n for line in tiles:\n for tile in line:\n x = (tile_number - int(xtiles/2)) * 256 + x_center\n y = (line_number - int(ytiles/2)) * 256 + y_center\n finalx = x - offset_x #+128\n finaly = y - offset_y #+128\n cc.set_source_surface(tile, finalx+self.dx, finaly+self.dy)\n cc.paint()\n tile_number += 1\n tile_number = 0\n line_number += 1\n\n self.draw_cross(cc,x_center,y_center)\n self.draw_points(cc)\n\n db.flush()\n\n else:\n print('Invalid double buffer')", "def generate(self):\n self.coins = 0\n self.crystals = 0\n all_sprite.empty()\n levels_group.empty()\n tiles_group.empty()\n game_objects.empty()\n enemy_group.empty()\n hearts_group.empty()\n key_group.empty()\n player_group.empty()\n count_tiles = len(self.level_map * len(self.level_map[0]))\n percent_one_tile = count_tiles // 100\n if not percent_one_tile:\n percent_one_tile = 1\n current_tile = 0\n for y in range(len(self.level_map)):\n for x in range(len(self.level_map[0])):\n if self.level_map[y][x] == '@':\n if not player_group.sprite:\n player = Player(x, y)\n else:\n print('There is already a player on the map')\n terminate()\n elif self.level_map[y][x] == '>':\n Enemy(x, y, ROTATION_RIGHT)\n elif self.level_map[y][x] == '<':\n Enemy(x, y, ROTATION_LEFT)\n elif self.level_map[y][x] in GAME_OBJECTS_DICT:\n file_name, configuration = GAME_OBJECTS_DICT[self.level_map[y][x]]\n if file_name == 'heart.png':\n Heart(x, y, configuration=configuration)\n elif file_name == 'pointer.png':\n CheckPoint(x, y, configuration=configuration)\n elif file_name == 'button.png':\n ButtonJump(x, y, configuration=configuration)\n elif file_name == 'key.png':\n Key(x, y, configuration=configuration)\n elif file_name == 'door.png':\n Door(x, y, configuration=configuration)\n elif file_name == 'stairs.png':\n Stairs(x, y, configuration=configuration)\n elif file_name == 'coin.png':\n Coin(x, y, configuration=configuration)\n self.coins += 1\n elif file_name == 'crystal.png':\n Crystal(x, y, configuration=configuration)\n self.crystals += 1\n else:\n GameObject(x, y, file_name, configuration=configuration)\n elif self.level_map[y][x] != '#' and self.level_map[y][x] != ' ':\n Tile(x, y, self.level_map[y][x])\n current_tile += 1\n self.show_loading_level(current_tile // percent_one_tile)\n if not player_group.sprite:\n print('There is no player on the level')\n terminate()\n camera.update(player)\n for sprite in all_sprite.sprites():\n camera.apply(sprite)\n camera.set_memory(0, 0)\n return player", "def load_level(level):\n\n global spawn_boxes\n\n level = pytmx.load_pygame('maps/level_' + level + '.tmx')\n\n y_num = 0\n for x, y, gid in level.get_layer_by_name('Objects'):\n if level.get_tile_image_by_gid(gid) != None:\n matrix[y_num].append(1)\n else:\n matrix[y_num].append(0)\n \n if x == 19: y_num += 1\n\n spawn_boxes = [] # Areas in which enemies can spawn. Requires tiled type 'spawn_box'\n for obj in level.get_layer_by_name('Triggers'):\n if obj.type == 'spawn_box':\n rect = pygame.rect.Rect(obj.x, obj.y, obj.width, obj.height)\n if obj.name == 'north': \n rect = rect.move(0, -64)\n rect.height += 64\n if obj.name == 'east': \n rect = rect.move(64, 0)\n rect.width += 64\n if obj.name == 'south': \n rect = rect.move(0, 64)\n rect.height += 64\n if obj.name == 'west': \n rect = rect.move(-64, 0)\n rect.width += 64\n spawn_boxes.append(rect)\n\n return level", "def draw(self, window):\n if self.selected:\n self.menu.draw(window) #Drawing menu\n window.blit(kill_count_table, (self.x + self.width // 2 - 15, self.y - self.height // 2 + 35))\n kills = self.font.render(str(self.kill_count) + \" Kills\", 1, (255, 255, 255))\n window.blit(kills, (self.x + self.width // 2 + 5, self.y - self.height // 2 + 43))\n\n tower_image = self.tower_images[self.level-1]\n\n if not self.level_up_animation: #Always draw the tower except when leveling up\n window.blit(tower_image, (self.x - tower_image.get_width() // 2, self.y - tower_image.get_height() // 2))\n\n else: #Leveling up animation procedure\n window.blit(self.level_up[self.level_animation // 2], (self.x - tower_image.get_width() - 75, self.y - 225))\n self.level_animation += 1\n if self.level_animation == len(level_up) * 2:\n self.level_up_animation = False\n self.level_animation = 0", "def map_displayer(stage, player,\n stage_tiles, TILES, special_tiles, default_tile):\n color.write(\"=============================================\\n\",\"BUILTIN\") # Hard seperation to show that a new turn has begun\n # Setup variables\n x = 1\n y = stage[1]\n player_x = player[0]\n player_y = player[1]\n\n while y > 0:\n while x < stage[0]+1:\n if x == player_x and y == player_y:\n color.write(TILES.get(\"player\", \"X\"), \"hit\")\n\n elif (\"{0},{1}\".format(x, y) in stage_tiles\n and \"{0},{1}\".format(x, y) in special_tiles):\n if (stage_tiles[\"{0},{1}\".format(x, y)] == \"npc\"\n or stage_tiles[\"{0},{1}\".format(x, y)] == \"sign\"):\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"KEYWORD\")\n \n else:\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"STRING\")\n\n elif \"{0},{1}\".format(x, y) in stage_tiles:\n if (stage_tiles[\"{0},{1}\".format(x, y)] == \"rock\"\n or stage_tiles[\"{0},{1}\".format(x, y)] == \"mountain\"):\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"stderr\")\n\n else:\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile], \"stdout\")\n\n elif \"{0},{1}\".format(x,y) in special_tiles:\n if (special_tiles[\"{0},{1}\".format(x, y)] == \"dark_water\"):\n tile = stage_tiles.get(\"{0},{1}\".format(x, y), default_tile)\n color.write(TILES[tile],\"stdin\") \n else:\n print(TILES[default_tile], end='')\n x += 1\n print(\" \",end='')\n print(\"\")\n y -= 1\n x = 1", "def draw(self, screen):\n\n # Draw the background\n screen.fill(constants.BLUE)\n\n # Draw all the sprite lists that we have\n\n self.decor.draw(screen)\n self.decorLayer.draw(screen)\n self.platform_quicksand.draw(screen)\n for boss in self.behind_boss_man:\n boss.draw(screen)\n\n self.behind_boss_man.draw(screen)\n self.platform_choose.draw(screen)\n self.platform_fallthrough.draw(screen)\n self.platform_slime.draw(screen)\n self.platform_list.draw(screen)\n self.enemy_list.draw(screen)\n self.kill_blocks.draw(screen)\n self.boss_man.draw(screen)\n self.end_blocks.draw(screen)\n self.attacks.draw(screen)\n\n for boss in self.boss_man:\n boss.draw(screen)", "def __init__(self, mapfile, camera=None, light=None,\r\n width=100.0, depth=100.0, height=10.0,\r\n divx=0, divy=0, ntiles=1.0, name=\"\",\r\n x=0.0, y=0.0, z=0.0, rx=0.0, ry=0.0, rz=0.0,\r\n sx=1.0, sy=1.0, sz=1.0, cx=0.0, cy=0.0, cz=0.0, smooth=True, cubic=False):\r\n super(ElevationMap, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\r\n sx, sy, sz, cx, cy, cz)\r\n if divx > 200 or divy > 200:\r\n print(\"... Map size can't be bigger than 200x200 divisions\")\r\n divx = 200\r\n divy = 200\r\n if issubclass(type(mapfile), type(\"\")): #HORRIBLE. Only way to cope with python2v3\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n if VERBOSE:\r\n print(\"Loading height map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n else:\r\n im = mapfile #allow image files to be passed as mapfile\r\n ix, iy = im.size\r\n if (ix > 200 and divx == 0) or (divx > 0):\r\n if divx == 0:\r\n divx = 200\r\n divy = 200\r\n im = im.resize((divx, divy), Image.ANTIALIAS)\r\n ix, iy = im.size\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n self.pixels = im.load()\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.ix = ix\r\n self.iy = iy\r\n self.ttype = GL_TRIANGLE_STRIP\r\n\r\n if VERBOSE:\r\n print(\"Creating Elevation Map ...\", ix, iy)\r\n\r\n wh = width * 0.5\r\n hh = depth * 0.5\r\n ws = width / ix\r\n hs = depth / iy\r\n ht = height / 255.0\r\n tx = 1.0*ntiles / ix\r\n ty = 1.0*ntiles / iy\r\n\r\n verts = []\r\n norms = []\r\n tex_coords = []\r\n idx = []\r\n\r\n for y in xrange(0, iy):\r\n for x in xrange(0, ix):\r\n hgt = (self.pixels[x, y])*ht\r\n this_x = -wh + x*ws\r\n this_z = -hh + y*hs\r\n if cubic:\r\n \"\"\" this is a bit experimental. It tries to make the map either zero\r\n or height high. Vertices are moved 'under' adjacent ones if there is\r\n a step to make vertical walls. Goes wrong in places - mainly because\r\n it doesn't check diagonals\r\n \"\"\"\r\n if hgt > height / 2:\r\n hgt = height\r\n else:\r\n hgt = 0.0\r\n if hgt == 0 and y > 0 and y < iy-1 and x > 0 and x < ix-1:\r\n if self.pixels[x-1, y] > 127:\r\n this_x = -wh + (x-1)*ws\r\n elif self.pixels[x+1, y] > 127:\r\n this_x = -wh + (x+1)*ws\r\n elif self.pixels[x, y-1] > 127:\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x, y+1] > 127:\r\n this_z = -hh + (y+1)*hs\r\n elif self.pixels[x-1, y-1] > 127:\r\n this_x = -wh + (x-1)*ws\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x-1, y+1] > 127:\r\n this_x = -wh + (x-1)*ws\r\n this_z = -hh + (y+1)*hs\r\n elif self.pixels[x+1, y-1] > 127:\r\n this_x = -wh + (x+1)*ws\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x+1, y+1] > 127:\r\n this_x = -wh + (x+1)*ws\r\n this_z = -hh + (y+1)*hs\r\n verts.append((this_x, hgt, this_z))\r\n tex_coords.append(((ix-x) * tx,(iy-y) * ty))\r\n\r\n s = 0\r\n #create one long triangle_strip by alternating X directions\r\n for y in range(0, iy-1):\r\n for x in range(0, ix-1):\r\n i = (y * ix)+x\r\n idx.append((i, i+ix, i+ix+1))\r\n idx.append((i+ix+1, i+1, i))\r\n s += 2\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, verts, tex_coords, idx, None, smooth))", "def draw(self):\n\n super().draw()\n \n self.dim = self.getdim()\n start_x, start_y, = self.x(), self.y()\n\n for y in range(self.r):\n for x in range(self.c):\n x_pos, y_pos = start_x + (self.dim * x), start_y + (self.dim * y)\n self.tiles[y][x].resize(x_pos, y_pos, self.dim, self.dim)", "def draw(self,surface):\n surface.blit(self.image, self.rect)\n for moving in self.shots.values():\n moving.draw()", "def draw_board(self):\r\n for i in range(self.size):\r\n for k in range(self.size):\r\n left = k * self.CELL_SIZE + (k+1) * self.BORDER_WIDTH\r\n top = i * self.CELL_SIZE + (i+1) * self.BORDER_WIDTH\r\n rect = pygame.Rect(left, top, self.CELL_SIZE, self.CELL_SIZE)\r\n color = self.BG_COLOR\r\n if self.map[i][k] == self.BLOCK_CHAR:\r\n color = self.BLOCK_COLOR\r\n elif self.map[i][k] == self.START_CHAR:\r\n color = self.START_COLOR\r\n elif self.map[i][k] == self.END_CHAR:\r\n color = self.END_COLOR\r\n elif (k, i) in self.path:\r\n color = self.PATH_COLOR\r\n pygame.draw.rect(self.screen, color, rect)", "def drawMonoPolygonLayer(self, dc, polys, map_rel, colour, size, filled,\n attributes):\n\n if polys is None:\n return\n\n dc.SetPen(wx.Pen(colour, width=size))\n if filled:\n dc.SetBrush(wx.Brush(colour))\n else:\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n\n if map_rel:\n for p in polys:\n p_lonlat = []\n for (lon, lat) in p:\n posn = self.convertGeo2View(lon, lat)\n p_lonlat.append(posn)\n dc.DrawPolygon(p_lonlat)\n else:\n for p in polys:\n pp = [wx.Point(point[0], point[1]) for point in p]\n dc.DrawPolygon(pp)", "def Draw(self):\n\t\tGameImage.Draw(self, self.coords)", "def draw(self, screen, size_block):\n for co in self.get_all_coordinates():\n pos = self.board.coordinate_to_position(co)\n screen.blit(pygame.transform.scale(self.image, (size_block, size_block)), pos)", "def draw_map(self, heightmap, height_modifier):\n # get size\n size = (len(heightmap), len(heightmap[0]))\n\n # create image\n hmap = PNMImage(size[0], size[1])\n\n # draw map\n for y in range(size[0]):\n for x in range(size[1]):\n h = (heightmap[x][y]) * height_modifier\n try:\n hmap.setXel(x, y, h)\n except:\n print \"Error on x,y: \", str((x, y)), \"; map --> 0-255 value: \", str((heightmap[x][y], h))\n\n return hmap", "def demo_pyglet(file_name):\n\n import pyglet\n from pyglet.gl import glTranslatef, glLoadIdentity\n\n world_map = TileMapParser().parse_decode(file_name)\n # delta is the x/y position of the map view.\n # delta is a list because the scoping is different for immutable types.\n # This list can be used within the update method.\n delta = [0.0, 0.0]\n window = pyglet.window.Window()\n\n @window.event\n def on_draw():\n window.clear()\n # Reset the \"eye\" back to the default location.\n glLoadIdentity()\n # Move the \"eye\" to the current location on the map.\n glTranslatef(delta[0], delta[1], 0.0)\n batch.draw()\n\n keys = pyglet.window.key.KeyStateHandler()\n window.push_handlers(keys)\n world_map.load(ImageLoaderPyglet())\n\n def update(dt):\n speed = 3.0 + keys[pyglet.window.key.LSHIFT] * 6.0\n if keys[pyglet.window.key.LEFT]:\n delta[0] += speed\n if keys[pyglet.window.key.RIGHT]:\n delta[0] -= speed\n if keys[pyglet.window.key.UP]:\n delta[1] -= speed\n if keys[pyglet.window.key.DOWN]:\n delta[1] += speed\n\n # Generate the graphics for every visible tile.\n batch = pyglet.graphics.Batch()\n groups = []\n sprites = []\n for group_num, layer in enumerate(world_map.layers[:]):\n if layer.visible is False:\n continue\n groups.append(pyglet.graphics.OrderedGroup(group_num))\n for xtile in range(layer.width):\n for ytile in range(layer.height):\n image_id = layer.content2D[xtile][ytile]\n if image_id:\n # o_x and o_y are offsets. They are not helpful here.\n o_x, o_y, image_file = world_map.indexed_tiles[image_id]\n # To compensate for pyglet's upside-down y-axis, the\n # Sprites are placed in rows that are backwards compared\n # to what was loaded into the map. The \"max - current\"\n # formula does this reversal.\n sprites.append(pyglet.sprite.Sprite(image_file,\n xtile * world_map.tilewidth,\n layer.pixel_height - (ytile+1) * world_map.tileheight,\n batch=batch, group=groups[group_num]))\n\n pyglet.clock.schedule_interval(update, 1.0 / 60.0)\n pyglet.app.run()", "def draw(self):\n self.screen.fill((0,51,102))\n # get the new drawables\n self.drawables = (self.game_model.get_background_drawables()\n + self.game_model.get_plane_drawables()\n + self.game_model.get_bullet_drawables()\n + self.game_model.get_enemy_drawables())\n for d in self.drawables:\n rect = d.get_rect()\n surf = d.get_surface()\n surf.set_colorkey((255,255,255))\n self.screen.blit(surf, rect)", "def draw_path(self, path):\n palettes = pokemontools.map_gfx.read_palettes(self.config)\n map_image = pokemontools.map_gfx.draw_map(self.map_group_id, self.map_id, palettes, show_sprites=True, config=self.config)\n\n for coordinates in path:\n y = coordinates[0]\n x = coordinates[1]\n\n some_image = Image.new(\"RGBA\", (32, 32))\n draw = ImageDraw.Draw(some_image, \"RGBA\")\n draw.rectangle([(0, 0), (32, 32)], fill=(0, 0, 0, 127))\n\n target = [(x * 4, y * 4), ((x + 32) * 4, (y + 32) * 4)]\n\n map_image.paste(some_image, target, mask=some_image)\n\n return map_image", "def draw(self):\n\n # I reset it at 24 because they're 4 images and I want the reduce the animation speed by 6 (6*4=24)\n if self.spriteCount + 1 >= 24:\n self.spriteCount = 0\n if self.isJump:\n self.screen.blit(self.spriteJump[self.spriteCount // 6], (self.x_pos, self.y_pos))\n else:\n self.screen.blit(self.spriteFall[self.spriteCount // 6], (self.x_pos, self.y_pos))\n self.spriteCount += 1", "def draw(self, screen):\n \n # Background drawing code can be put here\n \n \n \n # Draw all the sprite lists that we have\n self.platform_list.draw(screen)\n self.enemy_list.draw(screen)", "def drawAllSprites(self):\n\n # disegno il labirinto\n self.walls.draw(self.scrollSurface)\n\n # disegno le monete\n self.coins.draw(self.scrollSurface)\n\n # disegno il giocatore\n self.player.draw(self.scrollSurface)\n\n # disegno i nemici\n self.enemies.draw(self.scrollSurface)\n\n # disegno le bombe\n self.bombs.draw(self.scrollSurface)\n\n # disegno le wall bombs\n self.wallBombs.draw(self.scrollSurface)\n\n # disegno i killer enemies\n self.enemyKillers.draw(self.scrollSurface)\n\n # disegno i ricaricatori del tempo\n self.timeReloaders.draw(self.scrollSurface)\n\n # disegno i greedy enemies\n self.greedyEnemies.draw(self.scrollSurface)\n\n # disegno i portali\n self.portals.draw(self.scrollSurface)\n\n # disegno i nemici che rendono invisibile il giocatore\n self.invisibilityPlayers.draw(self.scrollSurface)\n\n # disegno i proiettili del giocatore insieme allo sprite del bonus\n self.playerBullets.draw(self.scrollSurface)\n self.bonusPlayerBullets.draw(self.scrollSurface)\n\n # disegno i proiettili sparatu dai nemici\n self.shooterBullets.draw(self.scrollSurface)", "def draw_room(screen, grid, start_location):\n wall_image = pygame.image.load(\"images/pillar.png\")\n wall_image_transparent = pygame.image.load(\"images/pillar_80.png\")\n floor_image = pygame.image.load(\"images/floor.png\")\n computer_image = pygame.image.load(\"images/desk_computer.png\")\n\n # map_to_image = [floor_image, # 0\n # wall_image, # 1\n # wall_image_transparent, # 2\n # computer_image] # 3\n map_to_image = {\n \"0\": floor_image,\n \"1\": wall_image,\n \"2\": wall_image_transparent,\n \"3\": computer_image,\n \"10\": wall_image # Secret passage\n }\n # better tile management for multiple environments / create multiple environments.\n # 0 = floor, 1 = wall (pillar)\n # First draw floor everywhere\n max_dimensions = grid.shape\n for r in range(max_dimensions[0]):\n for c in range(max_dimensions[1]):\n screen.blit(floor_image, (c * 30 + start_location[0],\n r * 30 + start_location[1]))\n\n for tile_type in [1, 2, 3, 10]:\n the_rows, the_cols = np.where(grid == tile_type)\n for i in range(len(the_cols)):\n screen.blit(map_to_image[str(tile_type)], (the_cols[i] * 30 + start_location[0],\n the_rows[i] * 30 + start_location[1]))", "def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)", "def __init__(self, mapfile, camera=None, light=None,\n width=100.0, depth=100.0, height=10.0,\n divx=0, divy=0, ntiles=1.0, name=\"\",\n x=0.0, y=0.0, z=0.0, rx=0.0, ry=0.0, rz=0.0,\n sx=1.0, sy=1.0, sz=1.0, cx=0.0, cy=0.0, cz=0.0, smooth=True, cubic=False):\n super(ElevationMap, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\n sx, sy, sz, cx, cy, cz)\n if mapfile[0] != '/':\n mapfile = sys.path[0] + '/' + mapfile\n if VERBOSE:\n print(\"Loading height map ...\", mapfile)\n\n if divx > 200 or divy > 200:\n print(\"... Map size can't be bigger than 200x200 divisions\")\n divx = 200\n divy = 200\n\n im = Image.open(mapfile)\n im = ImageOps.invert(im)\n ix, iy = im.size\n if (ix > 200 and divx == 0) or (divx > 0):\n if divx == 0:\n divx = 200\n divy = 200\n im = im.resize((divx, divy), Image.ANTIALIAS)\n ix, iy = im.size\n if not im.mode == \"P\":\n im = im.convert('P', palette=Image.ADAPTIVE)\n\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n self.pixels = im.load()\n self.width = width\n self.depth = depth\n self.height = height\n self.ix = ix\n self.iy = iy\n self.ttype = GL_TRIANGLE_STRIP\n\n if VERBOSE:\n print(\"Creating Elevation Map ...\", ix, iy)\n\n wh = width * 0.5\n hh = depth * 0.5\n ws = width / ix\n hs = depth / iy\n ht = height / 255.0\n tx = 1.0*ntiles / ix\n ty = 1.0*ntiles / iy\n\n verts = []\n norms = []\n tex_coords = []\n idx = []\n\n for y in xrange(0, iy):\n for x in xrange(0, ix):\n hgt = (self.pixels[x, y])*ht\n this_x = -wh + x*ws\n this_z = -hh + y*hs\n if cubic:\n \"\"\" this is a bit experimental. It tries to make the map either zero\n or height high. Vertices are moved 'under' adjacent ones if there is\n a step to make vertical walls. Goes wrong in places - mainly because\n it doesn't check diagonals\n \"\"\"\n if hgt > height / 2:\n hgt = height\n else:\n hgt = 0.0\n if hgt == 0 and y > 0 and y < iy-1 and x > 0 and x < ix-1:\n if self.pixels[x-1, y] > 127:\n this_x = -wh + (x-1)*ws\n elif self.pixels[x+1, y] > 127:\n this_x = -wh + (x+1)*ws\n elif self.pixels[x, y-1] > 127:\n this_z = -hh + (y-1)*hs\n elif self.pixels[x, y+1] > 127:\n this_z = -hh + (y+1)*hs\n elif self.pixels[x-1, y-1] > 127:\n this_x = -wh + (x-1)*ws\n this_z = -hh + (y-1)*hs\n elif self.pixels[x-1, y+1] > 127:\n this_x = -wh + (x-1)*ws\n this_z = -hh + (y+1)*hs\n elif self.pixels[x+1, y-1] > 127:\n this_x = -wh + (x+1)*ws\n this_z = -hh + (y-1)*hs\n elif self.pixels[x+1, y+1] > 127:\n this_x = -wh + (x+1)*ws\n this_z = -hh + (y+1)*hs\n verts.append((this_x, hgt, this_z))\n tex_coords.append(((ix-x) * tx,(iy-y) * ty))\n\n s = 0\n #create one long triangle_strip by alternating X directions\n for y in range(0, iy-1):\n for x in range(0, ix-1):\n i = (y * ix)+x\n idx.append((i, i+ix, i+ix+1))\n idx.append((i+ix+1, i+1, i))\n s += 2\n\n self.buf = []\n self.buf.append(Buffer(self, verts, tex_coords, idx, None, smooth))", "def draw(self, surface):\n temp = pygame.Surface(self.renderer.pixel_size)\n self.renderer.render_map(temp)\n pygame.transform.smoothscale(temp, surface.get_size(), surface)", "def renderTiles(self, window):\n window.blit(self.TileSurface, (self.x, self.y),(self.textures))", "def compile_levels():\n \n for ogmo_filename in [x for x in os.listdir(MAP_SRC_DIR) if x.endswith('.oel')]:\n ogmo_path = os.path.join(MAP_SRC_DIR, ogmo_filename)\n ogmo_flattened_path = os.path.join(MAP_COMPILED_DIR, ogmo_filename)\n\n if os.path.exists(ogmo_flattened_path):\n if os.path.getmtime(ogmo_flattened_path) > os.path.getmtime(ogmo_path):\n sys.stdout.write(\"--%s up to date\\n\" % ogmo_flattened_path)\n continue\n \n flatten_ogmo_tilemaps(ogmo_path, ogmo_flattened_path)", "def change_map(self, depth, *args, **kwargs):\n #Unload\n self.maplist[self.current_map_idx].on_unload()\n self.maplist[self.current_map_idx]._entities.remove(self._player)\n\n #Generate new maps if necessary\n while depth >= len(self.maplist):\n new_map = self.create_new_map()\n self.maplist.append(new_map)\n\n #Switch to the new map\n self.current_map_idx = depth\n self.load_map(self.maplist[self.current_map_idx])", "def use_level(self, level):\n\n if self.min_level <= level <= self.max_level:\n map_extent = self.tiles.use_level(level)\n if map_extent:\n self.level = level\n (self.map_width, self.map_height,\n self.ppd_x, self.ppd_y) = map_extent\n (self.map_llon, self.map_rlon,\n self.map_blat, self.map_tlat) = self.tiles.extent\n\n # do level change callback\n self.handleLevelChangeCallback(level)\n\n return True\n\n return False", "def draw(self, win):\n img = self.tower_imgs\n win.blit(img, (self.x - img.get_width() // 2, self.y - img.get_height() // 2))\n\n if self.selected:\n self.menu.draw(win)", "def render_tiles(self, tiles):\n for row in tiles:\n for tile in row:\n if tile is not None:\n if tile.height < 0:\n color = (0, 100, 0)\n else:\n z = max(0, tile.height)\n color = tuple([z * 255] * 3)\n self.surface.set_at((tile.x, tile.y), color)", "def draw_on(self, folium_map):", "def render_map(self, m, filename):\n\n # store this for later so we can post process the PDF\n self._filename = filename\n\n # work out the best scale to render out map at given the available\n # space\n (eff_width, eff_height) = self._get_render_area_size()\n map_aspect = m.envelope().width() / m.envelope().height()\n page_aspect = eff_width / eff_height\n\n scalex = m.envelope().width() / eff_width\n scaley = m.envelope().height() / eff_height\n\n scale = max(scalex, scaley)\n\n rounded_mapscale = self._scale(scale)\n scalefactor = scale / rounded_mapscale\n mapw = eff_width * scalefactor\n maph = eff_height * scalefactor\n if self._preserve_aspect:\n if map_aspect > page_aspect:\n maph = mapw * (1 / map_aspect)\n else:\n mapw = maph * map_aspect\n\n # set the map size so that raster elements render at the correct\n # resolution\n m.resize(*self._get_map_pixel_size(mapw, maph))\n # calculate the translation for the map starting point\n (tx, ty) = self._get_render_corner((mapw, maph), m)\n\n # create our cairo surface and context and then render the map into it\n self._s = cairo.PDFSurface(\n filename, m2pt(\n self._pagesize[0]), m2pt(\n self._pagesize[1]))\n ctx = cairo.Context(self._s)\n\n for l in m.layers:\n # extract the layer names for naming layers if we use OCG\n self._layer_names.append(l.name)\n\n layer_map = Map(m.width, m.height, m.srs)\n layer_map.layers.append(l)\n for s in l.styles:\n layer_map.append_style(s, m.find_style(s))\n layer_map.zoom_to_box(m.envelope())\n\n def render_map():\n ctx.save()\n ctx.translate(m2pt(tx), m2pt(ty))\n # cairo defaults to 72dpi\n ctx.scale(72.0 / self._resolution, 72.0 / self._resolution)\n render(layer_map, ctx)\n ctx.restore()\n\n # antimeridian\n render_map()\n if self._is_latlon and (\n m.envelope().minx < -180 or m.envelope().maxx > 180):\n old_env = m.envelope()\n if m.envelope().minx < -180:\n delta = 360\n else:\n delta = -360\n m.zoom_to_box(\n Box2d(\n old_env.minx + delta,\n old_env.miny,\n old_env.maxx + delta,\n old_env.maxy))\n render_map()\n # restore the original env\n m.zoom_to_box(old_env)\n\n if self._use_ocg_layers:\n self._s.show_page()\n\n self.scale = rounded_mapscale\n self.map_box = Box2d(tx, ty, tx + mapw, ty + maph)", "def draw_frame(self):\n self.render_surface.fill((135, 206, 235))\n # self.render_surface.fill((33, 38, 63))\n self.render_surface.blit(\n self.moon,\n (self.RENDER_SURFACE_WIDTH - 150, 80),\n special_flags=pygame.BLEND_ADD,\n )\n\n # draw background\n self.draw_background()\n\n self.render_surface.blit(\n self.assets.get_character_image(self.player),\n self.camera.translate(self.player.rect),\n )\n\n for enemy in self.enemies:\n pygame.draw.rect(\n self.render_surface, enemy.color, self.camera.translate(enemy.rect)\n )\n self.draw_enemy_health(enemy)\n\n # code to mask perticular block type.\n # for i in self.chunked_map.get_blocks():\n # if i.block_type == 4:\n # pygame.draw.rect(\n # self.render_surface, (255, 255, 255), self.camera.translate(i.rect)\n # )\n\n # draw tiles\n tiles = filter(\n lambda tile: not isinstance(tile, Reward) or tile.is_valid,\n self.chunked_map.get_blocks(),\n )\n tiles = map(self.get_tile_blit_seq, tiles)\n self.render_surface.blits(tiles)\n\n # draw particles\n for particle in self.particle_system.get_active_particles():\n pygame.draw.circle(\n self.render_surface,\n particle.color,\n self.camera.translate_xy(particle.center),\n particle.radius,\n )\n\n # self.draw_fps()\n # self.draw_score()\n self.draw_player_health()\n if self.player.attack_arc_end_deg != 300:\n self.draw_attack_arc(self.player)\n\n for enemy in filter(lambda e: e.attack_arc_end_deg != 300, self.enemies):\n self.draw_attack_arc(enemy)\n\n if not self.player.read_to_take_damage:\n red_s = pygame.Surface(\n (self.RENDER_SURFACE_WIDTH, self.RENDER_SURFACE_HEIGHT)\n )\n red_s.fill((100, 0, 0))\n self.render_surface.blit(red_s, (0, 0), special_flags=pygame.BLEND_ADD)", "def update_player_on_map():\n \n # Get's a constructed whole line from get_map_line()\n # Splits it and writes it one by one\n # Colours red if user has compass, and has previously discovered that position\n\n positions = (59, 61, 63, 65, 67, 69, 71, 73, 75, 77) # x coords of map tiles\n \n for i in range(1, 11):\n mapline = get_map_line(i)\n \n for mapdot in range(0, 10): # Split map line into 10 parts, and write them one by one\n whole_map_pos = ((i * 10) - 10) + mapdot # Use i to iterate through all the map lines\n if DISCOVERED[whole_map_pos] == \"Y\": # Fancy maths, works well for 10 x 10 grid\n mvaddstr(i, 59 + (mapdot * 2), mapline[mapdot], color_pair(DISCOVERED_MAP_COLOUR))\n else:\n mvaddstr(i, 59 + (mapdot * 2), mapline[mapdot], color_pair(MAP_COLOUR)) \n \n if LAST_LINE_HAD_PLYR: # Write the players avatar, and colour the players spot \n mvaddstr(i, positions[ZBPP], \"U\", color_pair(PLAYER_COLOUR) | A_BOLD)", "def draw(self):\n if (libt.map_is_in_fov(self.handler.fov_map, self.x, self.y) or \n self.handler.world.map[self.x][self.y].seen and self.visible_in_fog):\n libt.console_set_default_foreground(self.handler.game_map, self.colour)\n libt.console_put_char(self.handler.game_map, self.x, self.y, \n self.char, libt.BKGND_NONE)", "def draw(self, screen):\n for i in range(self.tiles_len):\n x, y = self.tilepos[i]\n screen.blit(self.images[i], (x, y))\n self.draw_text(screen, \"Moves : \" + str(self.nb_move), 40, 500, 10, 255, 255, 255, False)", "def draw_floor(screen, grid, start_location):\n floor_image = pygame.image.load(\"images/floor.png\")\n # better tile management for multiple environments / create multiple environments.\n # 0 = floor, 1 = wall (pillar)\n max_dimensions = grid.shape\n for r in range(max_dimensions[0]):\n for c in range(max_dimensions[1]):\n screen.blit(floor_image, (c * 30 + start_location[0],\n r * 30 + start_location[1]))", "def generate_map(self):\n map = Map.Map(50, 80, 1000, 10, 6)\n\n #here we can map out our larger map structure\n if self.level < 2:\n map.make_greathall()\n elif self.level >= 2 and self.level < 20:\n map.make_map()\n elif self.level >= 20:\n map.make_cave()\n else:\n map.make_map()\n return map", "def __init__(self, player, screen):\n\n # Call the parent constructor\n Level.__init__(self, player, screen)\n\n self.level_x_limit = -1380\n self.level_y_limit = 270\n\n\n # Array with type of platform, and x, y location of the platform.\n level = [[platforms.GRASS_MID, 15, 500],\n [platforms.GRASS_DIRT_LONG, 15, 575],\n [platforms.GRASS_RIGHT_EDGE, 225, 500],\n [platforms.GRASS_RIGHT_EDGE_DIRT, 225, 574],\n\n [platforms.GRASS_RIGHT_LONG, -68, 290],\n\n\n [platforms.GRASS_RIGHT_CORNER, 1274, 100],\n [platforms.GRASS_RIGHT_LONG, 1274, 192],\n [platforms.GRASS_LEFT_CORNER, 1193, 100],\n [platforms.GRASS_LEFT_LONG, 1193, 192],\n [platforms.GRASS_MID, 983, 193],\n [platforms.GRASS_DIRT_LONG, 983, 268],\n [platforms.GRASS_LEFT_CORNER, 901, 193],\n [platforms.GRASS_LEFT_LONG, 901, 285],\n [platforms.GRASS_MID, 691, 285],\n [platforms.GRASS_DIRT_LONG, 691, 360],\n [platforms.GRASS_LEFT_CORNER, 609, 285],\n [platforms.GRASS_LEFT_LONG, 609, 377],\n [platforms.GRASS_MID, 399, 377],\n [platforms.GRASS_DIRT_LONG, 399, 452],\n [platforms.GRASS_LEFT_CORNER, 318, 377],\n [platforms.GRASS_LEFT_LONG, 317, 469],\n\n\n [platforms.GRASS_LEFT_CORNER, 1558, 100],\n [platforms.GRASS_LEFT_LONG, 1558, 192],\n [platforms.GRASS_RIGHT_CORNER, 1639, 100],\n [platforms.GRASS_RIGHT_LONG, 1639, 192],\n [platforms.GRASS_MID, 1721, 193],\n [platforms.GRASS_DIRT_LONG, 1721, 268],\n [platforms.GRASS_RIGHT_CORNER, 1931, 193],\n [platforms.GRASS_RIGHT_LONG, 1931, 285],\n [platforms.GRASS_MID, 2013, 285],\n [platforms.GRASS_DIRT_LONG, 2013, 360],\n [platforms.GRASS_RIGHT_CORNER, 2223, 285],\n [platforms.GRASS_RIGHT_LONG, 2223, 377],\n [platforms.GRASS_MID, 2305, 377],\n [platforms.GRASS_DIRT_LONG, 2305, 452],\n [platforms.GRASS_RIGHT_CORNER, 2515, 377],\n [platforms.GRASS_RIGHT_LONG, 2515, 469],\n\n [platforms.GRASS_LEFT_EDGE, 2607, 500],\n [platforms.GRASS_LEFT_EDGE_DIRT, 2617, 574],\n [platforms.GRASS_MID, 2692, 500],\n [platforms.GRASS_DIRT_LONG, 2692, 575],\n\n\n ]\n\n # Go through the array above and add platforms\n for platform in level:\n block = platforms.Platform(platform[0])\n block.rect.x = platform[1]\n block.rect.y = platform[2]\n block.player = self.player\n self.platform_list.add(block)\n\n\n\n\n choosePort =[[platforms.PORTAL, 1356, 420, 3],\n\n [platforms.PORTAL, 2712, 320, 1],\n ]\n\n for port in choosePort:\n wego = platforms.ChooseLev(port[0], port[3])\n wego.rect.x = port[1]\n wego.rect.y = port[2]\n wego.player = self.player\n self.platform_choose.add(wego)\n\n\n\n background = platforms.backgroundGrass()\n background.rect.x = 0\n background.rect.y = 0\n self.decor.add(background)", "def draw(self, **kwargs):\n for o in sorted(self._drawables, key=default_itemgetter(\"z\", default=0)):\n o.draw(**kwargs)", "def generate_level(level):\n seed = level * 69420 # multiply by 69420 to not have the seeds too close to each other\n random.seed(seed)\n dimensions = get_map_size(level)\n level_map = np.full(dimensions, -1)\n while -1 in level_map:\n choice = random.choice(np.argwhere(level_map == -1))\n next_index = (choice[0], choice[1])\n # get indices of the tiles next to the current index\n left_index, up_index, right_index, down_index = get_direction_indices(next_index)\n left = tile_needs_connection(left_index, level_map, has_connection_right)\n up = tile_needs_connection(up_index, level_map, has_connection_down)\n right = tile_needs_connection(right_index, level_map, has_connection_left)\n down = tile_needs_connection(down_index, level_map, has_connection_up)\n level_map[next_index] = get_tile(left, up, right, down)\n return un_solve(level_map)", "def __init__(self, level):\n self.level = level\n self.my_map = {}\n self.my_level = []\n self.my_grid = []", "def generate_level(self):\n for _ in range(AMOUNT_REGIONS_TO_DRAW):\n self._generate_next_blocks()", "def draw(self, surface, camera_scroll):\n sprites = self.sprites()\n surface_blit = surface.blit\n for spr in sprites:\n self.spritedict[spr] = surface_blit(\n spr.image, spr.rect.move(adjust_scroll(camera_scroll))\n )\n self.lostsprites = []", "def draw(self, surface, camera_scroll):\n sprites = self.sprites()\n surface_blit = surface.blit\n for spr in sprites:\n self.spritedict[spr] = surface_blit(\n spr.image, spr.rect.move(adjust_scroll(camera_scroll))\n )\n self.lostsprites = []", "def draw(self, view):\n for r in self._aliens:\n for alien in r:\n if alien != None:\n alien.draw(view)\n if self._ship != None:\n self._ship.draw(view)\n self._dline.draw(view)\n for bolt in self._bolts:\n bolt.draw(view)", "def getLayersOnLevel(self, i):\n\n #return a filtered copy of the map's layers\n return filter(lambda a: a.level == i, self.layers)", "def drawMap(self):\n world_map = folium.Map(location=[25, 10], zoom_start=3)\n totals_column = 'total_' + self.map_type.lower()\n top10 = self.covid_df.sort_values(totals_column, axis=0, ascending=False)['location'][:10]\n scale, units = self.unitsDetector(self.covid_df[totals_column].max())\n \n color_scheme = {'Cases': 'YlOrRd', 'Deaths': 'PuRd'}[self.map_type]\n bins = list(np.linspace(0, np.ceil(self.covid_df[totals_column].max() / scale) * scale, 6))\n legend_name = 'Total Number of COVID-19 ' + self.map_type\n map_file_name = self.generateFileName()\n \n folium.Choropleth(geo_data=self.geo_data,\n data=self.covid_df,\n columns=['location', totals_column],\n key_on='feature.properties.ADMIN',\n fill_color=color_scheme,\n bins=bins,\n legend_name=legend_name,\n highlight=True\n ).add_to(world_map)\n \n for i in range(10):\n country = top10.iloc[i]\n cases = self.covid_df[self.covid_df['location'] == country][totals_column] / scale\n \n # Centroid coordinates for each country labelled by its ISO-2 code\n lat = self.countries_centroids.loc[self.name_iso2_mapping[country]]['latitude']\n long = self.countries_centroids.loc[self.name_iso2_mapping[country]]['longitude']\n popup = f\"{country}: {cases.values[0]:.2f}{units} total {self.map_type.lower()}\"\n \n folium.Marker(location=[lat, long],\n popup=folium.Popup(popup, \n max_width=1000)\n ).add_to(world_map)\n \n world_map.save(map_file_name)", "def world():\n bgcolor('black')\n path.color('blue')\n\n for index in range(len(tiles)):\n tile = tiles[index]\n \"\"\"\n Si estamos en un cuadro valido lo dibujamos en azul \n y ponemos el punto blanco\n \"\"\"\n if tile > 0:\n x = (index % 20) * 20 - 200\n y = 180 - (index // 20) * 20\n square(x, y)\n\n if tile == 1:\n path.up()\n path.goto(x + 10, y + 10)\n path.dot(2, 'white')", "def draw_onscreen(self):\n for spr in self.all_sprites:\n if spr.x >= self.cam_pos[0] and spr.x <= (self.cam_pos[0] +\n self.spr_width):\n if spr.y >= self.cam_pos[1] and spr.y <= (self.cam_pos[1]\n + self.spr_height):\n self.screen.blit(spr.image, spr.rect)", "def maps(offices, fixed):\n with Image(filename=BAT_B) as page, Drawing() as draw:\n for office, x, y in MAP_POSITIONS:\n label = door_label(offices[office], logo=False)\n if label:\n draw.composite(\"over\", x, y, label.width / 3, label.height / 3, label)\n draw(page)\n page.save(filename=\"generated_map%s.png\" % (\"_fixed\" if fixed else \"\"))" ]
[ "0.7113351", "0.67336434", "0.6681045", "0.666561", "0.6655747", "0.66555613", "0.6489397", "0.6463894", "0.63418233", "0.63139254", "0.6242701", "0.6222603", "0.6166904", "0.61456466", "0.6123982", "0.61208385", "0.6078546", "0.6004719", "0.5968714", "0.5968446", "0.5951832", "0.5932635", "0.5882009", "0.58557194", "0.58490425", "0.58459705", "0.5830537", "0.5760131", "0.574455", "0.5721593", "0.5712827", "0.5707047", "0.57056814", "0.569311", "0.5691971", "0.5683971", "0.56823176", "0.5672217", "0.5670112", "0.56611735", "0.5646931", "0.56450903", "0.5636868", "0.5633865", "0.5626458", "0.56207854", "0.5612522", "0.5604913", "0.56025636", "0.55991024", "0.55872506", "0.5582825", "0.5580927", "0.5566837", "0.55645967", "0.55569476", "0.5546288", "0.55122316", "0.54984075", "0.5486949", "0.5485085", "0.54838127", "0.5473331", "0.5465421", "0.5460846", "0.54595816", "0.5454784", "0.54542196", "0.5448197", "0.5442293", "0.5442216", "0.542756", "0.541703", "0.54037404", "0.53955", "0.53939795", "0.5389832", "0.5386527", "0.5382009", "0.5362339", "0.53611106", "0.53554136", "0.5352131", "0.5352087", "0.53447527", "0.5343646", "0.5342246", "0.533931", "0.53313154", "0.53177595", "0.5315212", "0.53144693", "0.5305132", "0.5305132", "0.53038496", "0.53035825", "0.5296554", "0.52919585", "0.5291356", "0.5291125" ]
0.5628971
44
Draws text onto a given surface.
def draw_text(self, text, font, color, surface, x, y): #use for narrative in end sequence text_obj = font.render(text, True, color) text_rect = text_obj.get_rect() text_rect.center = (x, y) surface.blit(text_obj, text_rect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_text(screen, font, text, surfacewidth, surfaceheight):\n\tfw, fh = font.size(text) # fw: font width, fh: font height\n\tsurface = font.render(text, True, (0, 0, 255))\n\t# // makes integer division in python3 \n\tscreen.blit(surface, (0,0))", "def drawText(text, font, surface, x, y, textcolour):\r\n textobj = font.render(text, 1, textcolour)\r\n textrect = textobj.get_rect()\r\n textrect.topleft = (x, y)\r\n surface.blit(textobj, textrect)", "def render_text_on_surface(text, surface, font, color=BLACK, top_padding=0, left_pading=0):\n rect = surface.get_rect()\n \n last_top = rect.top + top_padding\n for index, line in enumerate(text.split(\"\\n\")):\n text_surf = font.render(line, True, color)\n text_rect = text_surf.get_rect()\n text_rect.topleft = (rect.left + left_pading, last_top)\n surface.blit(text_surf, text_rect)\n \n last_top += text_rect.h", "def draw_text(display, font_name, text, size, color, x, y):\n font = pg.font.Font(font_name, size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n display.blit(text_surface, text_rect)", "def draw_text(\n self, text: str, size: int, color: Tuple[int, int, int], x: int, y: int\n ) -> None:\n # TODO: Select and use a better font\n font = pg.font.Font(pg.font.get_default_font(), size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n self.screen.blit(text_surface, text_rect)", "def draw_text(screen, text, size, x, y):\r\n font = pygame.font.Font(font_name, size)\r\n text_surface = font.render(text, True, WHITE)\r\n text_rect = text_surface.get_rect()\r\n text_rect.midtop = (x, y)\r\n screen.blit(text_surface, text_rect)\r\n pygame.display.update()", "def draw_text(self, text, size, x, y ,color=pygame.Color(\"white\")):\n font = pygame.font.Font(self.font_name,size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.center = (x,y)\n self.display.blit(text_surface,text_rect)", "def draw_text(surf, font, text, pos,\n antialiasing=True,\n color=(255, 255, 255),\n anchor=\"northwest\"):\n x, y = pos\n s = font.render(text, antialiasing, color)\n s_rect = s.get_rect()\n\n if \"north\" in anchor:\n s_rect.y = y\n elif \"south\" in anchor:\n s_rect.y = y - s_rect.h\n else:\n s_rect.y = y - s_rect.h/2\n\n if \"west\" in anchor:\n s_rect.x = x\n elif \"east\" in anchor:\n s_rect.x = x - s_rect.w\n else:\n s_rect.x = x - s_rect.w/2\n\n surf.blit(s, s_rect)", "def display_text(text, x, y, size):\r\n font = pygame.font.Font('freesansbold.ttf', size)\r\n text_surf, text_rect = text_objects(text, font)\r\n text_rect.center = (x, y)\r\n display.blit(text_surf, text_rect)", "def draw_pos_text(self, text):\n fw, fh = self.font.size(text) # fw: font width, fh: font height\n surface = self.font.render(text, True, (0, 255, 0))\n # // makes integer division in python3\n self.screen.blit(surface, ((self.width - fw) // 2, (self.height - fh) // 2))", "def draw_text(self, text, color = (100, 255, 100), dw = 0, dh = 0):\n fw, fh = self.font.size(text) # fw: font width, fh: font height\n surface = self.font.render(text, True, color)\n # // makes integer division in python3\n self.screen.blit(surface, ((self.width - fw - dw) // 2, (self.height - dh) // 2))", "def draw_text(self, display, text, size, x, y , mode):\n font = pygame.font.Font(self.font_name, size)\n text_surface = font.render(text, True, (0,0,0))\n text_rect = text_surface.get_rect()\n if mode == \"left\":\n text_rect.topleft = (x,y)\n elif mode == \"center\":\n text_rect.center = (x,y)\n display.blit(text_surface, text_rect)", "def text_draw(self, x, y, text, style={}):", "def draw_text(self, i, j, text, col, bg=None):\n txt = self.font.render(text, True, col, bg)\n rect = txt.get_rect()\n rect.center = self.get_rect(i, j).center\n self.screen.blit(txt, rect)", "def draw_text(window, text, size, text_pos, color=WHITE, bold=False):\n font = pygame.font.Font(FONT_PATH, size)\n if bold:\n font.set_bold(1)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = text_pos\n window.blit(text_surface, text_rect)", "def drawtxt(txt,font,fs,clr,x,y,w,h,tf):\n if tf == True:\n pygame.draw.rect(screen, BLACK, (x,y,w,h))\n if pygame.font:\n font = pygame.font.Font(font,fs)\n text = font.render(txt, False, clr)\n screen.blit(text, (x,y))\n pygame.display.update(x,y,w,h)", "async def outline_text(draw_surface, coords, draw_text, font):\n draw = partial(draw_surface.text, text=draw_text, font=font,\n fill=\"black\")\n for offset_pair in product(range(-1, 2), repeat=2):\n draw((coords[0]+offset_pair[0], coords[1]+offset_pair[1]))\n draw(coords, fill=\"white\")", "def showText(self, surface, point, text, color=None, size=20):\n if not color: color = self.color\n v = self / 2\n point = v(point)\n surface.print(text, tuple(point), color=color, size=size)", "def draw_text(SCREEN, text, x, y):\n text = constants.CALIBRI_25.render(text, True, constants.BLACK)\n SCREEN.blit(text, (x, y))", "def draw_text(text: str, surface: Surface, rect: Rect, font: Font, color: Color, line_spacing: int = -2, center: bool = True) -> list:\n\n font_height = font.size(\"Tg\")[1]\n if not isinstance(text, list):\n text = wrap_text(text, font, rect.width)\n\n printable_lines = 1\n for i in range(1, len(text)):\n if ((font_height + line_spacing) * (i + 1)) <= rect.height:\n printable_lines += 1\n\n y = rect.top\n if center:\n y = (rect.height / 2) - (((font_height + line_spacing) * printable_lines) / 2)\n\n for line in text[:printable_lines]:\n # render the line\n image = font.render(line, True, color)\n\n x = rect.left\n if center:\n x = (rect.width / 2) - (image.get_width() / 2)\n\n # blit the line\n surface.blit(image, (x, y))\n y += font_height + line_spacing\n\n return text[printable_lines:]", "def print_text(TINY_FONT, x, y, text, color = white):\n text_image = TINY_FONT.render(text, True, color)\n gameDisplay.blit(text_image, (x,y))", "def render_text(self, text, x_pos, y_pos, z_pos):\n GL.glDisable(GL.GL_LIGHTING)\n GL.glRasterPos3f(x_pos, y_pos, z_pos)\n font = GLUT.GLUT_BITMAP_HELVETICA_10\n\n for character in text:\n if character == '\\n':\n y_pos = y_pos - 20\n GL.glRasterPos3f(x_pos, y_pos, z_pos)\n else:\n GLUT.glutBitmapCharacter(font, ord(character))\n\n GL.glEnable(GL.GL_LIGHTING)", "def draw_text(self, text, i, j, **params):", "def render(cls, surface, text, font, position, anchor=Anchor.top_left, blend=0) -> None:\n x, y, w, h = cls.measure(text, font, position, anchor)\n gw = font[GLY][2]\n gh = font[GLY][3]\n\n for n, char in enumerate(text):\n if char in font[CHR]:\n ind = font[CHR].index(char)\n else:\n ind = 0\n\n # the char glyph tile x,y position in the grid\n tile = Vec.swap_xy(divmod(ind, font[GRD][0]))\n\n gx = (tile.x * font[CEL][0]) + font[GLY][0]\n gy = (tile.y * font[CEL][1]) + font[GLY][1]\n\n surface.blit(font[BMP], (x, y), (gx, gy, gw, gh), blend)\n\n x += gw", "def create_surface_with_text(text, font_size, text_rgb, bg_rgb):\r\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=True)\r\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\r\n return surface.convert_alpha()", "def add_text(self, text, color, pos, font):\n text = font.render(text, True, color)\n text_rec = text.get_rect(center=pos)\n self.window.blit(text, text_rec)", "def _draw_text(self, screen: Surface, changes: List[Rect]) -> None:\n orignal_rect = self._text_image.get_rect()\n\n centered_rect = orignal_rect.copy()\n centered_rect.center = self._rect.center\n\n clip_rect = centered_rect.clip(self._rect)\n centered_clip_rect = clip_rect.copy()\n centered_clip_rect.center = orignal_rect.center\n\n changes.append(screen.blit(self._text_image,\n clip_rect, centered_clip_rect))", "def create_text(text, font_size, bold, text_color):\n myfont = pygame.font.SysFont(\"Courier\", font_size, bold)\n surface = myfont.render(text,True,text_color)\n return surface", "def create_surface_with_text(text, font_size, text_rgb, bg_rgb):\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=True)\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\n return surface.convert_alpha()", "def drawText(font, textstr, clear_screen=True, color=(250, 10, 10)):\n if clear_screen:\n screen.fill(black) # black screen\n\n # Render font\n pltText = font.render(textstr, 1, color)\n\n # Center text\n textpos = pltText.get_rect()\n textpos.centerx = screen.get_rect().centerx\n textpos.centery = screen.get_rect().centery\n\n # Blit onto screen\n screen.blit(pltText, textpos)\n\n # Update\n pygame.display.update()", "def create_surface_with_text(text, font_size, text_rgb, bg_rgb):\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=False)\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\n return surface.convert_alpha()", "def DrawTextToTempSurface(self, message_text):\r\n\t\tlabel = self.font.render(message_text, 1, self.textcolor)\r\n\t\tself.tempSurface.blit(label, (self.textMargin, self.textMargin))", "def render_text_surfaces(self):\n self.images = [] # The text surfaces.\n line_width = 0\n line = []\n space_width = self.font.size(' ')[0]\n\n # Put the words one after the other into a list if they still\n # fit on the same line, otherwise render the line and append\n # the resulting surface to the self.images list.\n for word in self.text:\n line_width += self.font.size(word)[0] + space_width\n # Render a line if the line width is greater than the rect width.\n if line_width > self.rect.w:\n surf = self.font.render(' '.join(line), True, self.text_color)\n self.images.append(surf)\n line = []\n line_width = self.font.size(word)[0] + space_width\n\n line.append(word)\n\n # Need to render the last line as well.\n surf = self.font.render(' '.join(line), True, self.text_color)\n self.images.append(surf)", "def show_text(text, colour):\n message = font_style.render(text, True, colour)\n dis.blit(message, [game_size_x/2, game_size_y/2])", "def print(self, my_screen, text_string):\n text_bitmap = self.font.render(text_string, True, BLACK)\n my_screen.blit(text_bitmap, [self.x_pos, self.y_pos])\n self.y_pos += self.line_height", "def draw_text(\n self,\n text,\n position,\n font=FONT,\n size=16,\n color=Color.BLACK,\n centered=True,\n ):\n _font = pygame.font.Font(font, size)\n _text = _font.render(text, True, color)\n _rect = _text.get_rect()\n x, y = position\n if centered:\n x -= _rect.width // 2\n y -= _rect.height // 2\n _rect.topleft = x, y\n self.screen.blit(_text, _rect)", "def draw_text(self, text, position, font_size, font_color):\n font_color = check_color(font_color)\n STtext.text(self.canvas, text, position, font_size, font_color)", "def draw_text(\n self,\n text: str,\n transform: Matrix44,\n properties: Properties,\n cap_height: float,\n ) -> None:\n raise NotImplementedError", "def put_text(self, text, color, point):\n x1, y1 = self.pos_shift\n x2, y2 = point\n if not self.in_display((x2 - x1, y2 - y1)):\n return\n font = pygame.font.SysFont(\"monospace\", 18, bold=True)\n label = font.render(text, 1, color)\n self.screen.blit(label, (\n x2 - x1,\n y2 - y1\n ))", "def render_text(self, text, x_pos, y_pos, colour=0):\n if colour == 0:\n GL.glColor3f(0.0, 0.0, 0.0) # text is black\n elif colour == 1:\n GL.glColor3f(1.0, 0.0, 0.0)\n elif colour == 2:\n GL.glColor3f(0.0, 1.0, 0.0)\n GL.glRasterPos2f(x_pos, y_pos)\n font = GLUT.GLUT_BITMAP_HELVETICA_12\n\n for character in text:\n if character == '\\n':\n y_pos = y_pos - 20\n GL.glRasterPos2f(x_pos, y_pos)\n else:\n GLUT.glutBitmapCharacter(font, ord(character))", "def draw_on_surface(surface):\n pangocairo_ctx = pangocairo.CairoContext(cairo.Context(surface))\n layout = pangocairo_ctx.create_layout()\n\n pango_ctx = layout.get_context()\n if language is not None:\n pango_ctx.set_language(pango.Language(language))\n\n if rtl:\n if vertical:\n base_dir = pango.DIRECTION_TTB_RTL\n else:\n base_dir = pango.DIRECTION_RTL\n alignment = pango.ALIGN_RIGHT\n else:\n if vertical:\n base_dir = pango.DIRECTION_TTB_LTR\n else:\n base_dir = pango.DIRECTION_LTR\n alignment = pango.ALIGN_LEFT\n\n pango_ctx.set_base_dir(base_dir)\n layout.set_alignment(alignment)\n\n layout.set_width(width * pango.SCALE)\n layout.set_spacing((line_spacing-font_size) * pango.SCALE)\n\n # TODO: use ctypes to wrap fontconfig to avoid using the system's fonts\n font = pango.FontDescription()\n font.set_family(family)\n font.set_size(font_size * pango.SCALE)\n font.set_style(style)\n font.set_weight(weight)\n layout.set_font_description(font)\n\n layout.set_text(text)\n\n# # Doesn't work for some reason\n# pango_ctx.set_base_gravity(pango.GRAVITY_AUTO)\n# matrix = pango_ctx.get_matrix()\n# matrix.rotate(90)\n# pango_ctx.set_matrix(matrix)\n# layout.context_changed()\n\n extents = layout.get_pixel_extents()\n top_usage = min(extents[0][1], extents[1][1], 0)\n bottom_usage = max(extents[0][3], extents[1][3])\n\n pangocairo_ctx.set_antialias(cairo.ANTIALIAS_GRAY)\n pangocairo_ctx.set_source_rgb(1, 1, 1) # White background\n pangocairo_ctx.paint()\n\n pangocairo_ctx.translate(0, -top_usage)\n pangocairo_ctx.set_source_rgb(0, 0, 0) # Black text color\n pangocairo_ctx.show_layout(layout)\n\n return bottom_usage - top_usage", "def draw_text(self, words, screen, pos, size, color, font_name, centered=False):\r\n font = pygame.font.SysFont(font_name, size)\r\n text = font.render(words, False, color)\r\n text_size = text.get_size()\r\n if centered:\r\n pos[0] = pos[0]-text_size[0]//2\r\n pos[1] = pos[1]-text_size[1]//2\r\n screen.blit(text, pos)", "def draw(self, surface, offset=(0,0)):\n mouse = pg.mouse.get_pos()\n pos = mouse[0]-offset[0], mouse[1]-offset[1]\n if self.clicked:\n fill_color = pg.Color(\"white\")\n text = self.selected_text\n elif self.rect.collidepoint(pos):\n fill_color = (198, 226, 255)\n text = self.selected_text\n else:\n fill_color = self.color\n text = self.text\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(fill_color, self.rect.inflate(-2,-2))\n surface.blit(text, self.text_rect)", "def drawString(text: str):\n pass", "def draw_centered_text(surface, text, color, font_size):\n\n # Set the font\n font = pygame.font.Font(pygame.font.get_default_font(), font_size)\n\n # Render the text\n rendered_text = font.render(text, True, color)\n\n # Get the bounding box of the text\n text_rect = rendered_text.get_rect(center=(surface.get_width() / 2, surface.get_height() / 2))\n\n # Draw the text on the surface\n surface.blit(rendered_text, text_rect)", "def show_text(self, txt, col=YELLOW, bg=None):\n self.textL = self.fontL.render(txt, True, col, bg)\n self.textL2 = self.fontL.render(txt, True, WHITE, bg)\n phi = 0\n t0 = time()\n while time() < t0 + 3:\n surf = pygame.transform.rotate(self.textL, phi)\n surf2 = pygame.transform.rotate(self.textL2, -phi)\n rect = surf.get_rect()\n rect.center = (self.w//2, self.h//2)\n self.screen.blit(surf, rect)\n self.screen.blit(surf2, rect) \n pygame.display.update()\n phi += 2", "def draw_text(self, text, origin, font=cv2.FONT_HERSHEY_SIMPLEX, text_scale=0.7, text_color=(255,0,0), thickness=2):\r\n cv2.putText(self.image, text, origin, font, text_scale, text_color, thickness)", "def makeText(colour, size, text, bgcolour, textSize=15):\n sx = int((len(text)+1)*textSize/2.5)\n size = (sx, size[1])\n image = pygame.Surface(size)\n image.fill(bgcolour)\n font = pygame.font.SysFont(None, textSize)\n txtSurface = font.render(text, False, colour, bgcolour)\n tx = (image.get_width() - txtSurface.get_width())/2\n image.blit(txtSurface, (tx, size[1]/2))\n image.convert()\n return image", "def draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24, linespace=20):\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == \"\\n\":\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n # noinspection PyBroadException\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error('Actual OpenGL version doest not support glutBitmapCharacter function')\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')", "def DrawText(*args, **kwargs):\n return _gdi_.DC_DrawText(*args, **kwargs)", "def draw_text(text, pos, color=COLOR_WHITE, font=GLUT_BITMAP_TIMES_ROMAN_24,\n linespace=20):\n glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n glRasterPos3f(x, y, z)\n for char in text:\n if char == \"\\n\":\n y += linespace\n glRasterPos3f(x, y, z)\n else:\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _ERRS[0]:\n printGLError(\n 'la version actual de OpenGL no posee la funcion glutBitmapCharacter')\n _ERRS[0] = True\n else:\n raise Exception(\"el punto debe ser del tipo point3\")", "def __init__(self,text,position,xmid = False,fontsize = 36,backgroundcolor = (200,200,200),surface = None):\n pygame.font.init()\n basicfont = pygame.font.Font(None,fontsize)\n\n # Figure out the size of the image that will be drawn on and create that\n # image\n self.linewidths = []\n for x in text:\n self.texttemp = basicfont.render(x,0,(1,1,1))\n self.linewidths.append(self.texttemp.get_width())\n self.imagewidth = basicfont.render(text[self.linewidths.index(max(self.linewidths))],0,(1,1,1)).get_width()\n self.imageheight = len(text) * fontsize + (len(text)-1) * 10\n self.image = pygame.Surface((self.imagewidth,self.imageheight))\n self.image.fill(backgroundcolor)\n\n # Draw the text to the image\n n = 0\n for x in text:\n self.texttemp = basicfont.render(x,0,(1,1,1))\n self.image.blit(self.texttemp,(0,n * fontsize + n * 10))\n n +=1\n\n # Set the position of the text. If xmid is passed in as true set the\n # pos to the top middle pixel of the text\n if xmid:\n self.pos = (position[0] - int(self.image.get_width() / 2),position[1])\n else:\n self.pos = position\n\n # Set up the information that will be needed to blit the image to a\n # surface\n self.blitinfo = (self.image, self.pos)\n\n # automatically blit the text onto an input surface\n if surface:\n surface.blit(*self.blitinfo)", "def draw_text(self, text, position=(0, 0), color='black', font=None,\n font_size=12, rotation=0, **kwargs):\n font = self.font(font_size)\n\n text_image = Image.new('L', self.dimensions, 'black')\n draw_text_image = ImageDraw.Draw(text_image)\n draw_text_image.text(position, text, font=font, fill='white')\n\n alpha = Image.new('L', self.dimensions)\n alpha = ImageChops.lighter(alpha, text_image)\n\n solidcolor = Image.new('RGBA', self.dimensions, color)\n image_mask = Image.eval(text_image, lambda p: 255 * (int(p != 0)))\n self.base_image = Image.composite(solidcolor, self.base_image, image_mask)\n self.base_image.putalpha(alpha)", "def text(self, str: str, x: int, y: int, colour: int, /) -> None:", "def _draw_text_on_image(self, string, position, color = (77, 255, 9)):\n cv2.putText(self.image, string, (position),\n fontFace = self._font_face, fontScale = self._font_scale,\n color = color, thickness = self._font_thickness)", "def draw_string(message, x, y, textSize):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n t = Text(message, textSize)\r\n t.move(x, y)\r\n t.setFontColor(_current_color)\r\n _canvas.add(t)", "def text(self, font, text, x0, y0, color=WHITE, background=BLACK):\n if font.WIDTH == 8:\n self._text8(font, text, x0, y0, color, background)\n else:\n self._text16(font, text, x0, y0, color, background)", "def DrawText(*args, **kwargs):\n return _gdi_.GraphicsContext_DrawText(*args, **kwargs)", "def text(self, string,\n location,\n font, fontSize,\n antialias=False,\n colour=(0,0,0),\n newlinePad=5,\n screen=None):\n if not screen:\n screen = self.screen\n x = location[0]\n y = location[1]\n font = pygame.font.Font(font, fontSize)\n lines = string.split(\"\\n\")\n counter = 0\n height = 0\n for line in lines:\n fontSurface = font.render(line, antialias, colour).convert()\n if counter == 0:\n screen.blit(fontSurface, location)\n else:\n newY = y * counter + newlinePad + height\n screen.blit(fontSurface, (x, newY))\n height = font.size(line)[1] + height + newlinePad\n counter += 1", "def renderText(self, taille = 24, text = \"\", couleur = (0,0,0)):\n self.pygamePol = pygame.font.SysFont(self.path, taille)\n textRend = self.pygamePol.render(text, True, couleur)\n return textRend", "def DrawText(*args, **kwargs):\n return _gdi_.PseudoDC_DrawText(*args, **kwargs)", "def draw_text(self, text, position, color, centered=False, scale=1.5, thickness=3):\n if centered:\n text_size = opencv.getTextSize(text, opencv.FONT_HERSHEY_SIMPLEX, fontScale=scale, thickness=thickness)[0]\n text_size = Point(-text_size[0]/2.0, text_size[1]/2.0)\n position = (position + text_size)\n position = self._format_point(position)\n opencv.putText(self.img, text, position.tuple(), opencv.FONT_HERSHEY_SIMPLEX, fontScale=scale,\n color=color.bgra(), thickness=thickness)", "def draw_text(dc, text, center_x, center_y):\r\n tw, th = dc.GetTextExtent(text)\r\n dc.DrawText(text, (center_x-tw/2), (center_y-th/2))", "def draw_text(self, text, x=0, y=0,\n color=None, bg=colors.Off, aa=False,\n font_name=font.default_font, font_scale=1):\n fh = font.fonts[font_name]['height']\n for c in text:\n if c == '\\n':\n y += font_scale * fh\n x = 0\n elif c == '\\r':\n pass # skip it\n else:\n fw = self.draw_char(x, y, c, color, bg, aa, font_name, font_scale)\n x += font_scale * fw\n if x >= self.width:\n break", "def paintText(self, text):\n return '@paint '+text * 2", "def draw(self, surface):\n ent = self.controller.entity_selection\n\n # If we have not selected an entity.\n if not ent:\n self.surface.blit(self.background, (0, 0))\n self.controller.entity_selection_track = False\n return\n \n # And provide details about the unit.\n unit_text = self.font.render(\"%s (id: %s)\" % (ent.name, ent.id), True, (255, 255, 255))\n w, _ = unit_text.get_size()\n self.surface.blit(unit_text, ((self.width / 2) - w / 2, 15))\n \n output = [\"Location: (%d, %d)\" % tuple(ent.location)]\n\n if ent.name == \"ant\":\n output.append(\"Energy: %s\" % ent.c[\"attrs\"][\"energy\"])\n output.append(\"Health: %s\" % ent.c[\"attrs\"][\"health\"])\n output.append(\"Brain state: %s\" % ent.brain.active_state.name)\n output.append(\"Speed: %d\" % ent.c[\"velocity\"].speed)\n if ent.c[\"destination\"].location:\n output.append(\"Destination: (%s, %s)\" % tuple(ent.c[\"destination\"].location))\n if ent.c[\"destination\"].isentity:\n output.append(\"Target: (%s)\" % ent.c[\"destination\"].val.name)\n \n for i, line in enumerate(output):\n text = self.font.render(line, True, (255, 255, 255))\n self.surface.blit(text, (10, 30 + i*15))\n \n # Blit to the main surface.\n surface.blit(self.surface, ((self.x, self.y)))", "def __init__(self,text,position,xmid = False,surface = None,**kargs):\n\n # Initialize the pygame font class.\n pygame.font.init()\n\n # Unpack the **kargs dictionary\n fontsize = kargs.pop('fontsize',36)\n align = kargs.pop('align','l')\n\n # Create the font object\n basicfont = pygame.font.Font(None,fontsize)\n\n # Figure out the size of the image that will be drawn on and create that\n # image\n linewidths = []\n for x in text:\n texttemp = basicfont.render(x,0,(1,1,1))\n linewidths.append(texttemp.get_width())\n # The width of the image is the width of the text that corresponds to\n # the index of linewidths that contains the largest number in linewidths\n self.imagewidth = basicfont.render(text[linewidths.index(max(linewidths))],0,(1,1,1)).get_width()\n self.imageheight = len(text) * fontsize + (len(text)-1) * 10\n self.image = pygame.Surface((self.imagewidth,self.imageheight))\n self.image.fill((200,200,200))\n\n # make the background transparent\n self.image.set_colorkey((200,200,200))\n\n # Draw the text to the image using the user chosen alignment\n n = 0\n if align == 'l':\n for x in text:\n texttemp = basicfont.render(x,0,(1,1,1))\n self.image.blit(texttemp,(0,n * fontsize + n * 10))\n n +=1\n elif align == 'c':\n for x in text:\n texttemp = basicfont.render(x,0,(1,1,1))\n self.image.blit(texttemp,(self.imagewidth // 2 - texttemp.get_width() // 2,n * fontsize + n * 10))\n n +=1\n elif align == 'r':\n for x in text:\n texttemp = basicfont.render(x,0,(1,1,1))\n self.image.blit(texttemp,(self.imagewidth - texttemp.get_width(),n * fontsize + n * 10))\n n +=1\n\n # Set the position of the text. If xmid is passed in as true set the\n # pos to the top middle pixel of the text\n if xmid:\n self.pos = (position[0] - int(self.image.get_width() / 2),position[1])\n else:\n self.pos = position\n\n # Set up the information that will be needed to blit the image to a\n # surface\n self.blitinfo = (self.image, self.pos)\n\n # automatically blit the text onto an input surface\n if surface:\n surface.blit(*self.blitinfo)", "def textObject(text, font, color):\n\n textSurface = font.render(text, True, color)\n return textSurface, textSurface.get_rect()", "def TextDisplay(file,x_pos,y_pos,width,size,screen):\n string = filter(None,[str.replace(\"\\n\",'') for str in open(file,'r').readlines()])\n wrappedstring=[]\n for str in string:\n new=textwrap.wrap(str,width)\n for st in new:\n wrappedstring.append(st)\n wrappedstring.append('')\n\n shift=0\n for str in wrappedstring: \n font = pygame.font.Font(None, size)\n text = font.render(str.decode('utf-8'),1, (10, 10, 10))\n textpos = text.get_rect()\n textpos.topleft = (x_pos,y_pos+shift)\n screen.blit(text, textpos)\n shift+=size", "def displayTextToScreen(wof_settings,screen,title_image,text,text_font,text_font_size):\n \n # Position the title image\n title_rect = pygame.image.load(title_image).get_rect()\n top_coord = 50\n title_rect.top = top_coord\n title_rect.centerx = wof_settings.width/2\n top_coord += title_rect.height\n \n # Start with drawing a blank color to the entire window:\n screen.fill(wof_settings.titleScreenBgColor)\n \n # Title image\n screen.blit(pygame.image.load(title_image), title_rect)\n \n # Position and draw the text\n for i in range(len(text)):\n title_font = pygame.font.Font(text_font, text_font_size)\n text_surf = title_font.render(text[i], 1, wof_settings.titleTextColor)\n text_rect = text_surf.get_rect()\n top_coord += 10\n text_rect.top = top_coord\n text_rect.centerx = wof_settings.width/2\n top_coord += text_rect.height\n screen.blit(text_surf, text_rect)\n \n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n terminate()\n return # user has pressed a key, so return.\n \n # Display the contents to the actual screen.\n pygame.display.flip()", "def set_text(self, text):\n self.str = text\n self.image = self.font.render(self.str, True, self.font_color)\n self.rect.width = self.image.get_width()\n self.rect.height = self.image.get_height()", "def text(self):\n surface_score = pygame.font.SysFont('Helvetic', 100).render(str(self.score), False, BLACK)\n screen.blit(surface_score, (50, 50))", "def drawText(txt,x,y,ucoords=1,TeX=0):\n if (type(txt) is types.IntType) or (type(txt) is types.FloatType) or \\\n (type(txt) is types.LongType):\n drawNumber(txt,x,y,ucoords)\n else:\n if TeX: setTeXMode(1)\n drawMessage(txt,x,y,ucoords)", "def draw(canvas):\n global n\n global message\n canvas.draw_text(message, [WIDTH // 2, HEIGTH // 2], 35, 'Gray')\n canvas.draw_text(display(), [250, 20], 25, 'Gray')", "def draw_text(img, text, pos):\n font = cv2.FONT_HERSHEY_SIMPLEX\n return cv2.putText(img, text, pos, font, 1, (255, 255, 255), 1, cv2.LINE_AA)", "def draw_text (self, ctx):\n\t\tif self.unseen_count > 0:\n\t\t\tif self.p_layout == None:\n\t\t\t\tself.p_layout = ctx.create_layout()\n\t\t\telse:\n\t\t\t\tctx.update_layout(self.p_layout)\n\n\t\t\twidth = self.get_text_width(ctx, str(self.unseen_count), self.text_font)\n\t\t\theight = self.get_text_height(ctx, str(self.unseen_count), self.text_font)\n\t\t\tprintx = self.position_x - width / 2\n\t\t\tprinty = self.position_y - height / 2\n\n\t\t\tctx.translate(printx, printy)\n\n\t\t\tp_fdesc = pango.FontDescription(self.text_font)\n\t\t\tself.p_layout.set_font_description(p_fdesc)\n\t\t\tself.p_layout.set_markup(str(self.unseen_count))\n\t\t\tctx.set_source_rgba(0.5, 0.5, 0.5, 0.3)\n\t\t\tctx.show_layout(self.p_layout)\n\t\t\tctx.fill()\n\t\t\tctx.translate(-1, -1)\n\t\t\tctx.set_source_rgba(self.text_color[0], self.text_color[1], self.text_color[2], self.text_color[3])\n\t\t\tctx.show_layout(self.p_layout)\n\t\t\tctx.fill()", "def draw(self, screen):\n lines = self.text.strip().split('\\n')\n y = self.y\n for line in lines:\n self.ui.show_text(line, (self.x, y), 30)\n y += 32", "def show(self, cv_img, text, X=0, Y=0):\n w, h, pads = self.get_textSize(text)\n x = X + pads\n y = Y + h + pads*2\n cv2.putText( cv_img, text, (x, y),\n self.fontFace, self.fontScale,\n self.fontColor, self.fontThickness, 255)", "def create_text(text, font, colour, position):\n _text = font.render(text, False, colour)\n _text_rect = _text.get_rect()\n _text_rect.center = position # place text centered on given position\n\n return {'surface': _text, 'rect': _text_rect}", "def DrawStringAt(self, x, y, s, color=(229, 153, 153, 255)):\r\n self.screen.blit(self.font.render(s, True, color), (x, y))", "def draw(self, win):\n self.rect.draw(win)\n self.text.draw(win)", "def add_text(text, x, y, rgb = [0, 0, 0], font_name = 'times new roman', size = 32, bold = False, italics = False, merged = False, slot = 0):\r\n \r\n font = pygame.font.SysFont(font_name, size, bold, italics)\r\n text = font.render(text, 1, rgb)\r\n \r\n if slot not in __g.keys():\r\n raise IndexError('No image loaded at slot %i' %(slot))\r\n \r\n surface = __g.values()[slot]\r\n if merged:\r\n surface.image.blit(text, (x, y))\r\n else:\r\n surface.text_add.append([x, y, text])", "def AfficherText(chaine_de_caratere):\n font = pygame.font.SysFont(\"Arial\", 24)\n text = font.render(chaine_de_caratere, 0, (255, 255, 255))\n textpos = text.get_rect()\n textpos.centerx = screen.get_rect().centerx\n screen.blit(text, textpos)", "def draw(self):\n if not self.exists:\n return\n if self.attributes[AT.TARGET_TYPE] == TargetType.TIMED:\n self.draw_frame_timed(self.text.opacity / 2 + 0.5)\n else:\n self.draw_frame(0.5)\n self.text.draw()", "def message_display(text, loc, size, color=None):\n # gameDisplay = pygame.display.set_mode((width, height))\n largeText = pygame.font.Font('freesansbold.ttf', size)\n TextSurf, TextRect = text_objects(text, largeText, color)\n TextRect.center = (loc[0], loc[1])\n gameDisplay.blit(TextSurf, TextRect)\n\n pygame.display.update()", "def drawTextLayer(self, dc, text, map_rel, colour, size, filled, attributes):\n\n if text is None:\n return\n\n # handle attributes here\n placement = attributes.get('placement', 'cm')\n offset = attributes.get('offset', 4)\n angle = attributes.get('angle', 0)\n colour = attributes.get('colour', wx.BLACK)\n\n dc.SetPen(wx.Pen(colour))\n dc.SetBrush(wx.Brush(colour))\n\n # draw text on map/view\n if map_rel:\n for i in text:\n try:\n (lon, lat, t) = i\n d = None\n except ValueError:\n raise RuntimeError('Map-relative text data must be: '\n '[(lon, lat, text), ...]')\n (x, y) = self.convertGeo2View(lon, lat)\n (w, h, _, _) = dc.GetFullTextExtent(t)\n\n dc.DrawCircle(x, y, 2)\n exec self.text_placement[placement.lower()]\n dc.DrawText(t, x, y)\n else:\n for i in text:\n try:\n (x, y, t) = i\n d = None\n except ValueError:\n raise RuntimeError('View-relative text data must be: '\n '[(x, y, text), ...]')\n dc.DrawCircle(x, y, 2)\n dc.DrawText(t, x, y)", "def draw(text: list):\n\n curses.wrapper(wrapper, text)", "def basic_render(self, surface) -> None:\n if not self.visible:\n return\n l, t = self.pos\n r, b = self.get_anchor_pos(Anchor.bottom_right)\n tpos = self.get_anchor_pos(Anchor.middle)\n backcolor = (128, 128, 128)\n forecolor = {False: (255, 255, 192), True: (255, 0, 0)}\n pts = ((l, t), (r, t), (r, b), (l, b))\n pygame.draw.polygon(surface, backcolor, pts, 0)\n pygame.draw.polygon(surface, forecolor[self.hover], pts, 1)\n BitmapFont.set_colors(BitmapFont.medium, backcolor, forecolor[self.hover])\n BitmapFont.render(surface, str(self.label), BitmapFont.medium, tpos, Anchor.middle)", "def draw_greeting(self):\n if pygame.font:\n # grab the correct font\n font = pygame.font.Font(None, 120) # fontname, size\n # render the font into the \"text\" surface\n # text, antialias, color\n text = font.render(self.caption, 1, (200, 50, 100))\n # center the text\n textpos = text.get_rect()\n textpos.centerx = self.screen.get_rect().centerx\n # render to screen\n self.screen.blit(text, textpos)", "def draw_text(mat, s, origin, scale, color=(0, 0, 255), thickness=1):\n cv2.putText(mat, s, origin, cv2.FONT_HERSHEY_SIMPLEX, scale, color, thickness=thickness)", "def draw(self, screen):\n\n x, y = self.get_abs_x(), self.get_abs_y()\n font_size = self.get_property('font_size')\n min_width, min_height = get_text_size(self.get_text(), font_size=font_size)\n width, height = self.get_properties('width', 'height')\n pad_left, pad_right = self.get_properties('pad_left', 'pad_right')\n pad_top, pad_bottom = self.get_properties('pad_top', 'pad_bottom')\n text = self.get_text()\n\n super().draw(screen)\n font_size = self.get_property('font_size')\n show_text(screen, text, x + width / 2, y + height / 2,\n font_size=font_size)\n\n return self", "def text(text = 'abcd', size = 10, justify = 'left', layer = 0, font = \"DEPLOF\"):\n t = Device('text')\n xoffset = 0\n yoffset = 0\n\n face = font\n if face == \"DEPLOF\":\n scaling = size/1000\n\n for line in text.split('\\n'):\n l = Device(name = 'textline')\n for c in line:\n ascii_val = ord(c)\n if c == ' ':\n xoffset += 500*scaling\n elif (33 <= ascii_val <= 126) or (ascii_val == 181):\n for poly in _glyph[ascii_val]:\n xpts = np.array(poly)[:, 0]*scaling\n ypts = np.array(poly)[:, 1]*scaling\n l.add_polygon([xpts + xoffset, ypts + yoffset],\n layer = layer)\n xoffset += (_width[ascii_val] + _indent[ascii_val])*scaling\n else:\n valid_chars = '!\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~µ'\n warnings.warn('[PHIDL] text(): Warning, some characters ignored, no geometry for character \"%s\" with ascii value %s. ' \\\n 'Valid characters: %s' % (chr(ascii_val), ascii_val, valid_chars))\n t.add_ref(l)\n yoffset -= 1500*scaling\n xoffset = 0\n else:\n from .font import _get_font_by_name, _get_font_by_file, _get_glyph\n\n # Load the font\n # If we've passed a valid file, try to load that, otherwise search system fonts\n font = None\n if (face.endswith(\".otf\") or face.endswith(\".ttf\")) and os.path.exists(face):\n font = _get_font_by_file(face)\n else:\n try:\n font = _get_font_by_name(face)\n except ValueError:\n pass\n if font is None:\n raise ValueError(('[PHIDL] Failed to find font: \"%s\". ' +\n 'Try specifying the exact (full) path to the .ttf or .otf file. ' +\n 'Otherwise, it might be resolved by rebuilding the matplotlib font cache') % (face))\n\n # Render each character\n for line in text.split('\\n'):\n l = Device('textline')\n xoffset = 0\n for letter in line:\n letter_dev = Device(\"letter\")\n letter_template, advance_x = _get_glyph(font, letter)\n for poly in letter_template.polygons:\n letter_dev.add_polygon(poly.polygons, layer=layer)\n ref = l.add_ref(letter_dev)\n ref.move(destination=(xoffset, 0))\n ref.magnification = size\n xoffset += size*advance_x\n\n ref = t.add_ref(l)\n ref.move(destination=(0, yoffset))\n yoffset -= size\n\n justify = justify.lower()\n for l in t.references:\n if justify == 'left': pass\n if justify == 'right': l.xmax = 0\n if justify == 'center': l.move(origin = l.center,\n destination = (0, 0), axis = 'x')\n\n t.flatten()\n return t", "def write_text(size, text):\n img = Image.new('RGB', size, (255, 255, 255))\n draw = ImageDraw.Draw(img)\n font = ImageFont.truetype('/Library/Fonts/Arial.ttf', size)\n draw.text((10,10), text, (0, 0, 0), font=font)\n return img", "def drawKeybind(position, text, anchor = \"center\"):\n\t\t# Draw the text\n\t\ttextObject = canvas.create_text(\n\t\t\tpixelFromPosition(Vector2(0, 0) + position),\n\t\t\ttext = text,\n\t\t\tfill = \"black\",\n\t\t\tfont = (\"Fixedsys\", InterfaceTools.fontSize(40), \"\"),\n\t\t\tanchor = anchor\n\t\t)\n\t\t# Get the bounding box of the text object in pixels\n\t\tboundingBox = canvas.bbox(textObject)\n\t\t# Convert the bounding box into position coordinates\n\t\tboundsMin = positionFromPixel(boundingBox[0], boundingBox[1])\n\t\tboundsMax = positionFromPixel(boundingBox[2], boundingBox[3])\n\t\tcenter = Vector2((boundsMin.x + boundsMax.x) / 2, (boundsMin.y + boundsMax.y) / 2)\n\t\t# Minimum rectangle size\n\t\tboundsMin.x = min(boundsMin.x - 0.05, -0.075 + center.x)\n\t\tboundsMin.y = min(boundsMin.y - 0.01, -0.075 + center.y)\n\t\tboundsMax.x = max(boundsMax.x + 0.05, 0.075 + center.x)\n\t\tboundsMax.y = max(boundsMax.y + 0.01, 0.075 + center.y)\n\t\t# Draw the rectangle\n\t\tcanvas.create_rectangle(\n\t\t\t*pixelFromPosition(boundsMin),\n\t\t\t*pixelFromPosition(boundsMax),\n\t\t\tfill = \"white\",\n\t\t\toutline = \"gray\",\n\t\t\twidth = InterfaceTools.imageScale(5)\n\t\t)\n\t\t# Position the text in front of the rectangle\n\t\tcanvas.tag_raise(textObject)", "def AddText(img, new_dimensions, text):\n font_file = DownloadFont()\n with Drawing() as draw:\n font_size = new_dimensions[1] * .045\n xpos = round(new_dimensions[0] / 2)\n ypos = round(new_dimensions[1] - 1.5 * font_size)\n outline = font_size / 10\n draw.font = font_file\n draw.font_size = font_size\n draw.text_alignment = 'center'\n draw.text_antialias = True\n invis = draw.stroke_color\n draw.stroke_color = Color('#000000')\n draw.fill_color = Color('#ffff00')\n draw.stroke_width = outline\n draw.text(xpos, ypos, text)\n draw.stroke_color = invis\n draw.text(xpos, ypos, text)\n draw(img)\n os.remove(font_file)", "def add_text(self, x, y, text, style=None):\n style = self.__prepare_style(style, ' ')\n for i, c in enumerate(text):\n if self.check_coord_in_range(x + i, y):\n text_style = Style(c, style.fg_color, style.bg_color, style.font_style)\n self.canvas[y][x + i] = text_style", "def test_generate_mine_text(self):\n pg.font.init()\n font_surface = utils.generate_mine_text(1)\n self.assertIsInstance(font_surface, pg.Surface)", "def draw_text(text, levels):\n positions = []\n length = len(levels)\n xypos = []\n for x in range(len(levels)):\n y = levels[x]\n xypos.append((x, y))\n tlist = []\n for y in range(max(levels)+1):\n tlist.append([])\n for x in range(length):\n tlist[-1].append(\" \")\n tlist[-1].append(\"\\n\")\n for i in range(len(text)):\n char = text[i]\n x, y = xypos[i]\n tlist[y][x] = char\n endtext = \"\"\n for l in tlist:\n endtext += \"\".join(l)\n return endtext", "def display_message(self, text, color, text_size, coordinates):\n\n if text_size == \"large\":\n text_size = 100\n elif text_size == \"normal\":\n text_size = 60\n \n text_style = pygame.font.Font(\"etc/Roboto-Regular.ttf\", text_size)\n text_surface, text_rect = self.text_objects(text, text_style, color)\n text_rect.center = coordinates\n self.game_display.blit(text_surface, text_rect)\n pygame.display.update()", "def text(self, text: str, xo: int, yo: int, color: int):\n for offset, letter in enumerate(text):\n template = font.get(letter)\n for x, line in enumerate(template):\n line_str = '{:08b}'.format(line).replace('0b', '')\n if self.portrait:\n line_str = reversed(line_str)\n for y, pix in enumerate(line_str):\n if pix == '1':\n self.pixel(xo + x + (offset * 8), yo + y, color)" ]
[ "0.8523884", "0.8420744", "0.82012075", "0.81627655", "0.80817306", "0.7885264", "0.78392565", "0.78264725", "0.7806616", "0.77619326", "0.77171636", "0.7670391", "0.7642468", "0.7597258", "0.7542746", "0.7457794", "0.745692", "0.7357429", "0.7349292", "0.7319174", "0.729991", "0.72916424", "0.728086", "0.7253746", "0.7253538", "0.7242685", "0.7224358", "0.72200406", "0.7219605", "0.7218411", "0.7210224", "0.718779", "0.71874845", "0.71645886", "0.7125469", "0.71148473", "0.71081644", "0.7096947", "0.7055393", "0.7052589", "0.70480394", "0.70404226", "0.7014705", "0.69979", "0.69969946", "0.69903046", "0.69522476", "0.69349176", "0.6933544", "0.6930105", "0.691693", "0.6894734", "0.6867737", "0.68475574", "0.6838597", "0.6820481", "0.6804419", "0.6803951", "0.6773708", "0.6747393", "0.671819", "0.6710622", "0.6707875", "0.669341", "0.6691726", "0.6685575", "0.667654", "0.6676484", "0.6655242", "0.66547436", "0.6652607", "0.6645539", "0.66454494", "0.66438514", "0.6641358", "0.6623675", "0.66161656", "0.6606474", "0.6605607", "0.65822107", "0.6579878", "0.6577275", "0.6551614", "0.6542265", "0.65382254", "0.65222347", "0.65220475", "0.6505515", "0.64950997", "0.6489175", "0.64798015", "0.6472766", "0.6463535", "0.6456692", "0.6456033", "0.642951", "0.6424295", "0.641231", "0.64076805", "0.639808" ]
0.85818034
0
Split the file and save chunks to separate files
def split(self): print 'Splitting file', self.__filename print 'Number of chunks', self.__numchunks, '\n' try: f = open(self.__filename, 'rb') except (OSError, IOError), e: raise FileSplitterException, str(e) bname = (os.path.split(self.__filename))[1] # Get the file size fsize = os.path.getsize(self.__filename) # Get size of each chunk self.__chunksize = int(float(fsize)/float(self.__numchunks)) chunksz = self.__chunksize total_bytes = 0 for x in range(self.__numchunks): chunkfilename = bname + '-' + str(x+1) + self.__postfix # if reading the last section, calculate correct # chunk size. if x == self.__numchunks - 1: chunksz = fsize - total_bytes try: print 'Writing file',chunkfilename data = f.read(chunksz) total_bytes += len(data) chunkf = file(chunkfilename, 'wb') chunkf.write(data) chunkf.close() except (OSError, IOError), e: print e continue except EOFError, e: print e break print 'Done.'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_file(self, input_file):\r\n file_list = [] \r\n with open(input_file, 'r', encoding='GB18030', errors='ignore') as f_in:\r\n data = f_in.readlines()\r\n lines_num = len(data)\r\n size = lines_num // self.num_workers # lines splitted in a chunk\r\n start = 0\r\n end = size\r\n w_path = \"../data/\"\r\n for i in range(lines_num//size):\r\n chunk_name = \"chunk_\" + str(i) + \".dat\"\r\n with open(w_path + chunk_name, 'w', encoding='utf-8') as f_out:\r\n f_out.write(''.join(data[start:end]))\r\n start = start + size\r\n end = end + size\r\n file_list.append(\"../data/chunk_\" + str(i) + \".dat\")\r\n \r\n print(f\"File splitted into {self.num_workers} chunks.\")\r\n return file_list, size", "def write_chunks(file, chunks):\n\n\tfor c in chunks:\n\n\t\tchunk(file, c[0], c[1])", "def split_file(filename, split_num):\n root, ext = os.path.splitext(filename)\n with open(filename) as f:\n lines = f.readlines()\n total_line = len(lines)\n\n print lines[0].split('\\t')\n\n size = total_line / split_num\n\n print 'Total line: %d, splited file line number: %d' % (total_line, size)\n\n total_line - size * split_num\n for i in range(0, split_num):\n split_file = root + '_' + str(i+1) + ext\n\n start = i * size;\n end = (i+1) * size;\n if i == split_num - 1:\n end = total_line\n\n print 'splite file %s: line from %d to %d' % (split_file, start, end)\n\n with open(split_file, 'w') as fw:\n for j in range(start, end):\n fw.write('%s' % lines[j])", "def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()", "def split_single_file(self, filename):\n file_size = os.path.getsize(filename)\n chunk_size = (file_size + self.worker_num - 1) / self.worker_num\n file_handler = open(filename, \"r\")\n chunks = []\n pos = 0\n while pos < file_size:\n next_pos = min(pos + chunk_size, file_size)\n if pos == 0:\n chunks.append((filename, pos, self.find_next_newline(file_handler, next_pos)))\n else:\n chunks.append((filename, self.find_next_newline(file_handler, pos), self.find_next_newline(file_handler, next_pos)))\n pos = next_pos\n file_handler.close()\n return chunks", "def combine(self):\n\n import re\n \n print 'Creating file', self.__filename\n \n bname = (os.path.split(self.__filename))[1]\n bname2 = bname\n \n # bugfix: if file contains characters like +,.,[]\n # properly escape them, otherwise re will fail to match.\n for a, b in zip(['+', '.', '[', ']','$', '(', ')'],\n ['\\+','\\.','\\[','\\]','\\$', '\\(', '\\)']):\n bname2 = bname2.replace(a, b)\n \n chunkre = re.compile(bname2 + '-' + '[0-9]+')\n \n chunkfiles = []\n for f in os.listdir(\".\"):\n print f\n if chunkre.match(f):\n chunkfiles.append(f)\n\n\n print 'Number of chunks', len(chunkfiles), '\\n'\n chunkfiles.sort(self.sort_index)\n\n data=''\n for f in chunkfiles:\n\n try:\n print 'Appending chunk', os.path.join(\".\", f)\n data += open(f, 'rb').read()\n except (OSError, IOError, EOFError), e:\n print e\n continue\n\n try:\n f = open(bname, 'wb')\n f.write(data)\n f.close()\n except (OSError, IOError, EOFError), e:\n raise FileSplitterException, str(e)\n\n print 'Wrote file', bname", "def split(self):\n if(self.back == 'y'):\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1\n backNames = [self.file_path + str(num) + 'b' for num in range(len(files))]\n for num,file in enumerate(files):\n open(backNames[num],'w').write(file)\n else:\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1", "def splitter(file_name: str, MAX_SIZE: int = 7):\n\n # convertion to MB\n MAX_SIZE = MAX_SIZE * 1024 * 1024\n\n # index go throught the bit stream\n start_index: int = 0\n\n # harvested data\n data: bytes = None\n\n created_files: int = 0\n\n with open(file_name, \"rb\") as input_stream:\n # while we didn't go out the file\n while data != b'':\n # we place the cursor at start index\n input_stream.seek(start_index)\n # read a chunk of size MAX_SIZE bytes\n data = input_stream.read(MAX_SIZE)\n\n if data == b'':\n break\n # then we open an output file\n with open(str(start_index) + \"_\" + file_name, \"wb\") as ouput_stream:\n # A write the related chunk in it\n ouput_stream.write(data)\n\n created_files += 1\n\n # we translate the cursor\n start_index += MAX_SIZE\n\n print(\"Done! \", created_files, \" files created\")", "def split_file(self):\n # process lines into blocks with Parser until EOF triggers StopIteration\n while self.maf_lines:\n try:\n # rest counters and open new file at the top of the loop AFTER\n # the most recent yield\n if self._stop:\n self._yield(new_file=True)\n # try to get next block from Parser and write to current file\n block_string = self.parser.get_block(self.maf_lines).next()\n self.current_file.write(block_string)\n # update char count for the current file\n self.char_count += len(block_string)\n # if char count crosses limit, yield current file name start new file\n if self._stop:\n yield self.current_filename\n\n except StopIteration:\n self._yield(new_file=False)\n yield self.current_filename", "def split_file(self, input_file, buffer=1024) -> str:\n file_size = os.stat(input_file).st_size\n with create_pg(total=file_size, leave=False, unit='B', unit_scale=True, unit_divisor=1024,\n desc='Splitting file') as t:\n\n with open(input_file, 'rb') as src:\n while True:\n with tempfile.NamedTemporaryFile() as f:\n with open(f.name, 'wb') as dest:\n written = 0\n while written < self.max_size:\n data = src.read(buffer)\n if data:\n dest.write(data)\n written += buffer\n t.update(len(data))\n else:\n if written == 0:\n return # file has ended on split size - don't yield\n\n break\n\n yield f.name", "def join_chunks(self):\n if self.state == self.STATE_UPLOADING and self.total_chunks_uploaded == self.total_chunks:\n\n # create file and write chunks in the right order\n temp_file = open(self.full_path, \"wb\")\n for chunk in self.chunks.all():\n chunk_bytes = chunk.file.read()\n temp_file.write(chunk_bytes)\n temp_file.close()\n\n # set state as completed\n self.state = self.STATE_COMPLETED\n super(FlowFile, self).save()\n\n # delete chunks automatically if is activated in settings\n if FLOWJS_AUTO_DELETE_CHUNKS:\n self.chunks.all().delete()", "def split_decode_file():\n # split files by chromosome\n header = []\n current_chrom = 'chr1'\n # file_template = decode_folder + '/{}.deCODE_2019.GRCh38.txt'\n file_template = decode_folder + '/{}.deCODE_2019_hg19.txt'\n decode_file = decode_folder + '/aau1043_DataS3_hg19_liftOver.bed'\n w = open(file_template.format(current_chrom), 'a')\n print('NOTE: appending to map files, not overwriting. may cause duplicates')\n with open(decode_file, 'r') as f:\n for line in f:\n # save the header info\n if line.startswith('#'):\n header.append(line)\n # save the column labels\n elif line.startswith('Chr'):\n header.append('# ' + line)\n # write header to first file now\n w.write(''.join(header))\n # the remaining lines are data\n else:\n # get the chromosome for the current line\n ch = line.split()[0]\n # if the chromosome matches the open file, write to it\n if ch == current_chrom:\n w.write(line)\n # if a new chromosome arises, switch to a new writefile\n else:\n w.close()\n current_chrom = ch\n w = open(file_template.format(current_chrom), 'a')\n # write header to file\n w.write(''.join(header))\n w.write(line)\n\n # close the last open file\n w.close()", "def splitFile(f, rootdir=\"/tmp\", splitCmd=\"/usr/bin/split\", chunkSize=\"100m\"):\n d = str(uuid.uuid4())\n path = os.path.join(rootdir, d)\n # I want it to fail hard here\n os.makedirs(path)\n prefix = os.path.join(path, \"chunk-\")\n subprocess.check_call([splitCmd, \"-b\", chunkSize, \"-d\", \"-a\", \"5\", f, prefix])\n chunks = glob.glob(os.path.join(path, \"chunk-*\"))\n chunks.sort()\n return chunks", "def split_file(in_file, num_splits, split_dir, mut_file):\n\n # create the output directory if it does\n # not exist\n if not os.path.exists(split_dir):\n os.mkdir(split_dir)\n\n # open the info file\n f = open(in_file)\n pdb_header = f.readline()\n\n # open the mutation file\n m = open(mut_file)\n mut_header = m.readline()\n\n # read into a dictionary containing\n # structure ids as keys and lines pertaining\n # to it as values\n pdb_dict = read_file(f)\n mut_dict = read_file(m)\n\n # determine total num of ids in file\n total_ids = len(list(pdb_dict.keys()))\n print(total_ids)\n # determine num of ids to put in each split\n num_ids = int(total_ids/num_splits)\n\n # counters\n count_file = 0\n count_id = num_ids\n\n # randomize order of insertions\n keys = list(pdb_dict.keys())\n random.shuffle(keys)\n\n # iterate through dict and write to files\n #for key in sorted(pdb_dict):\n for key in keys:\n\n # check if we need a new file\n if (count_id == num_ids and count_file < num_splits):\n count_id = 0\n pdb_out = open(split_dir + \"/pdb_info_split_\" + str(count_file) + \".txt\", 'w')\n pdb_out.write(pdb_header)\n mut_out = open(split_dir + \"/mut_info_split_\" + str(count_file) + \".txt\", 'w')\n mut_out.write(mut_header)\n count_file += 1\n\n # write all lines pertaining to the structure id\n for line in pdb_dict[key]:\n pdb_out.write(line)\n if key in mut_dict:\n for line in mut_dict[key]:\n mut_out.write(line)\n\n count_id += 1", "def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)", "def go(self):\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n for isplit,fof_split in enumerate(fof_splits):\n logger.info('%s %s' % (isplit,fof_split))\n self._write_split(isplit, fof_split)", "def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]", "def split_file(self):\n title = \"row_id,x,y,accuracy,time,place_id\\n\"\n print \"splitting files into grid files...\"\n sub_folder = os.path.join(Setting.grid_path, str(self.xsplit)+\"_\"+str(self.ysplit))\n if not os.path.exists(sub_folder):\n os.mkdir(sub_folder)\n for m in range(self.xsplit):\n # to avoid open too many files (ysplit should less than 1000 here)\n print \"starting No.\", m, \" subprocess...\"\n train_writers = []\n for n in range(self.ysplit):\n xfolder = os.path.join(sub_folder, str(m))\n if not os.path.exists(xfolder):\n os.mkdir(xfolder)\n yfolder = os.path.join(xfolder, str(n))\n if not os.path.exists(yfolder):\n os.mkdir(yfolder)\n train_file = os.path.join(yfolder, \"train.csv\")\n train_writers.append(open(train_file, \"w\"))\n train_writers[-1].write(title)\n\n for record in read_record(self.train_path):\n place_id = record[-1]\n rec_str = \",\".join([str(x) for x in record])\n for n in range(self.ysplit):\n row_id = 1\n slot = m*self.ysplit + n\n if place_id in self.grid_place[slot]:\n train_writers[n].write(str(row_id) + \",\" + rec_str + \"\\n\")\n row_id += 1\n\n for writer in train_writers:\n writer.close()\n\n test_writers = []\n for n in range(self.ysplit):\n test_file = os.path.join(sub_folder, str(m), str(n), \"test.csv\")\n test_writers.append(open(test_file, \"w\"))\n test_writers[-1].write(title)\n\n for record in read_record(self.test_path):\n x_ind, y_ind = grid_cut(record[0], record[1], self.xsplit, self.ysplit)\n grid_slot = x_ind*self.ysplit + y_ind\n for n in range(self.ysplit):\n row_id = 1\n slot = m*self.ysplit + n\n if grid_slot == slot:\n rec_str = \",\".join([str(x) for x in record])\n test_writers[n].write(str(row_id) + \",\" + rec_str + \"\\n\")\n row_id += 1\n\n for writer in test_writers:\n writer.close()", "def split_start(infiles, outfiles):\n\n # split always runs exactly one job (unlike @subdivide)\n # So it implicitly combines all its inputs before running and generating multiple output\n # @originate generates multiple output so the input for @split is a list...\n infile = infiles[0]\n\n # clean up previous\n for f in outfiles:\n os.unlink(f)\n\n\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n #\n # Create more files than the previous invocation\n #\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n n_to_produce = len(outfiles) + 1\n for i in range(n_to_produce):\n f = '{}{}.split'.format(tempdir, i)\n open(f, 'a').close()", "def split_train_into_chunks(chunk_size):\n for syscall_type in SYSCALLS:\n syscalls_split_file = open(f\"{TEMP_DIR}/{syscall_type}-split.train\", \"w\")\n snd_train_path = f\"{FILE_PATH}/{syscall_type}/{syscall_type}.train\"\n with open(snd_train_path) as train_file:\n for syscall in train_file:\n # Generate all n-grams of the current syscall\n n_grams = extract_n_grams(syscall.strip(),chunk_size,unique=True)\n if len(n_grams)==0:\n continue\n # Write n-grams to syscall chunks file\n syscalls_split_file.writelines(n_grams)\n syscalls_split_file.close()", "def splitFileIntoShards(filename, shardsize):\n os.popen('split -a 4 -d --additional-suffix=_shard -l{} {}'.format(shardsize, filename))", "def split_file(file_path, chunk_output_dir, compress=True, encrypt_key=None):\n\n file_size = os.stat(file_path).st_size\n logger.debug('original file size is %s' % file_size)\n chunk_sizes = []\n data_stream = ''\n if encrypt_key:\n esalt = encrypt_key.binary_salt\n if esalt is None:\n esalt = ''\n assert isinstance(encrypt_key, AESKey)\n # account for iv\n # size should be same as key\n iv = os.urandom(16)\n assert len(iv) == len(encrypt_key.binary_key)\n encryptor = Cipher(\n CIPHER_MODE, encrypt_key.binary_key, iv, CIPHER_ENCODE\n )\n data_stream = data_stream + esalt + iv\n if not compress:\n chunk_sizes = calc_chunk_sizes(file_size + len(data_stream))\n logger.debug('splitting %s into %s' % (file_path, chunk_output_dir))\n logger.debug('compress: %s' % compress)\n if encrypt_key:\n logger.debug('encrypt: True')\n f = open(file_path, 'rb')\n chunks = []\n chunk_prefix = 'chunk'\n if compress:\n compressor = zlib.compressobj(9)\n chunk_prefix = 'tmp_chunk'\n # figure out the size of the first chunk\n if chunk_sizes:\n chunk_size = chunk_sizes.pop(0)\n else:\n chunk_size = CHUNK_SIZE_MAX\n\n def chunk_stream(data_stream, chunk_size, check_size=True):\n # check_size is for the last bit of data that is smaller than a\n # chunk when data is compressed and the data sizes are\n # unpredictable.\n min_size = chunk_size\n if not check_size:\n min_size = 1\n while len(data_stream) >= min_size:\n chunk_data = data_stream[:chunk_size]\n # If compressing, will have to create new chunks later.\n chunks.append(ClientChunk.create(chunk_data, chunk_output_dir,\n prefix=chunk_prefix))\n data_stream = data_stream[chunk_size:]\n if chunk_sizes:\n # next chunk size may be different\n chunk_size = chunk_sizes.pop(0)\n return (data_stream, chunk_size)\n\n while f.tell() < file_size:\n data = f.read(CHUNK_SIZE_MAX)\n if compress:\n data = compressor.compress(data)\n if encrypt_key:\n data = encryptor.update(data)\n assert not encryptor.final()\n data_stream += data\n data_stream_len = len(data_stream)\n logger.debug('data stream length: %s' % data_stream_len)\n (data_stream, chunk_size) = chunk_stream(data_stream, chunk_size)\n # process data not chunked yet\n logger.debug('%s bytes left over' % len(data_stream))\n if compress:\n # may have compressed data left.\n flushed_data = compressor.flush()\n if flushed_data:\n logger.debug(\n 'another %s bytes of flushed data' % len(flushed_data))\n if encrypt_key:\n flushed_data = encryptor.update(flushed_data)\n assert not encryptor.final()\n data_stream += flushed_data\n if data_stream:\n (data_stream, chunk_size) = chunk_stream(data_stream, chunk_size,\n False)\n assert not chunk_sizes\n f.close()\n # finished initial data chunking.\n new_size = sum((c.size - 4) for c in chunks)\n if not compress:\n emsg = ('original size was %s. expected new size to be '\n '%s, but it is %s')\n expected_size = file_size\n if encrypt_key:\n expected_size = file_size + len(esalt) + len(iv)\n emsg = emsg % (file_size, expected_size, new_size)\n assert expected_size == new_size, emsg\n else:\n # must reorganize the chunks.\n new_chunks = []\n chunk_sizes = calc_chunk_sizes(new_size)\n # just replace the old chunk with the new one.\n data_stream = ''\n for chunk_size in chunk_sizes:\n # read the old chunks until there is enough to write.\n while len(data_stream) < chunk_size:\n old_chunk = chunks.pop(0)\n data_stream += old_chunk.read(raw=True)[:-4]\n # free up the space\n os.unlink(old_chunk.file_path)\n # small files will not fill a chunk\n if not chunks:\n break\n chunk_data = data_stream[:chunk_size]\n new_chunks.append(ClientChunk.create(chunk_data, chunk_output_dir))\n data_stream = data_stream[chunk_size:]\n chunks = new_chunks\n # There should not be anything left over.\n assert not data_stream\n # size for comparison\n size_ratio = 1.0 * new_size / file_size\n logger.debug('new size (combined chunks) is %s bytes' % new_size)\n logger.debug('size ratio is %f' % size_ratio)\n logger.debug('split file into %s chunks' % len(chunks))\n return chunks", "def mergeAndSaveFile(dumpMetaFile, chunkSizeFile, outFile):\n dump = open (dumpMetaFile, \"r\")\n chunk = open (chunkSizeFile, \"r\")\n out = open (outFile, \"w\")\n \n cline = \"\"\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n\n while dump:\n dline = dump.readline()\n if not dline:\n break\n dline = dline.rstrip(\"\\n\")\n \n # Split line parts \n dlineParts = dline.split(' ')\n \n # Read lines from chunkSize\n numEntries = int(dlineParts[2])\n \n entries = []\n for i in range(numEntries):\n entries.append([dlineParts[i*3 + 3], dlineParts[i*3 + 4], dlineParts[i*3 + 5], 0])\n #entries[i][0] = dlineParts[i*3 + 3]\n #entries[i][1] = dlineParts[i*3 + 4]\n #entries[i][2] = dlineParts[i*3 + 5]\n #entries[i][3] = 0\n\n while True:\n clineParts = cline.split(' ')\n if ((dlineParts[0] == clineParts[0]) and (dlineParts[1] == clineParts[1])):\n for i in range(numEntries):\n if ((entries[i][0] == clineParts[3]) and (entries[i][1] == clineParts[4])):\n entries[i][3] = clineParts[2]\n else:\n break\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n if not cline:\n break\n\n # Print output\n out.write(dlineParts[0]+\" \"+dlineParts[1]+\" \"+dlineParts[2]+\" \")\n for i in range(numEntries):\n out.write(str(entries[i][3])+\" \"+entries[i][0]+\" \"+entries[i][1]+\" \"+entries[i][2]+\" \")\n out.write(\"\\n\")\n out.close()", "def split(self):\n overall_chunks = []\n for filename in self.get_all_files():\n file_chunks = self.split_single_file(filename)\n overall_chunks.extend(file_chunks)\n return overall_chunks", "def split(self):\n\n # FIXME: user should be able to change the default behavior of\n # this function (for instance user may require one filter not\n # to split the content of the input file and the same input \n # to be used by the next filter.\n \n utils.split_file(self.files['hit_ids'],\n self.files['input'],\n self.files['filtered_reads'],\n self.files['survived_reads'])", "def chunk_input(self, input_files, chunksize):\n part_lists = [] # Lists of partial files\n known_nlines = None\n part_suffix = \"\"\n chunk_nlines = chunksize * 2\n\n for input_file in input_files:\n # Count number of lines in the file\n nlines = int(command.execute_with_output(\"wc -l %s\" % input_file)\n .strip().split()[0])\n # Number of lines should be the same in paired files\n if known_nlines is not None:\n msg = \"Mismatched line counts in supposedly paired files: {}\".format(\n input_files)\n assert nlines == known_nlines, msg\n known_nlines = nlines\n\n # Set number of pieces and names\n numparts = (nlines + chunk_nlines - 1) // chunk_nlines\n ndigits = len(str(numparts - 1))\n part_suffix = \"-chunksize-%d-numparts-%d-part-\" % (chunksize, numparts)\n out_prefix_base = os.path.basename(input_file) + part_suffix\n out_prefix = os.path.join(self.chunks_result_dir_local, out_prefix_base)\n\n # Split large file into smaller named pieces\n command.execute(\"split -a %d --numeric-suffixes -l %d %s %s\" %\n (ndigits, chunk_nlines, input_file, out_prefix))\n command.execute_with_retries(f\"aws s3 sync --only-show-errors {self.chunks_result_dir_local}/ {self.chunks_result_dir_s3}/ --exclude '*' --include '{out_prefix_base}*'\")\n\n # Get the partial file names\n partial_files = []\n paths = command.execute_with_output(\"ls %s*\" % out_prefix).rstrip().split(\"\\n\")\n for pf in paths:\n partial_files.append(os.path.basename(pf))\n\n # Check that the partial files match our expected chunking pattern\n pattern = \"{:0%dd}\" % ndigits\n expected_partial_files = [(out_prefix_base + pattern.format(i))\n for i in range(numparts)]\n msg = \"something went wrong with chunking: {} != {}\".format(\n partial_files, expected_partial_files)\n assert expected_partial_files == partial_files, msg\n part_lists.append(partial_files)\n\n # Ex: [[\"input_R1.fasta-part-1\", \"input_R2.fasta-part-1\"],\n # [\"input_R1.fasta-part-2\", \"input_R2.fasta-part-2\"],\n # [\"input_R1.fasta-part-3\", \"input_R2.fasta-part-3\"],...]\n input_chunks = [list(part) for part in zip(*part_lists)]\n return part_suffix, input_chunks", "def splitting():\n n = 1\n with open('numbers.txt', 'r+') as f:\n f.readline()\n seek_2 = f.tell()\n seek_1 = 0\n\n while seek_1 != seek_2:\n print(n)\n n += 1\n with open('numbers.txt', 'r+') as f, open('numbers.txt', 'r+') as f_2:\n f.seek(seek_1)\n f_2.seek(seek_2)\n seek_1, seek_2 = merge(f, f_2)\n\n make_result_file(seek_1)", "def splitFileContents(f, delimiter, BLOCKSIZE=8192):\n remainder = StringIO()\n while True:\n block = f.read(BLOCKSIZE)\n if not block:\n break\n parts = block.split(delimiter)\n remainder.write(parts[0])\n for part in parts[1:]:\n yield remainder.getvalue()\n remainder = StringIO()\n remainder.write(part)\n yield remainder.getvalue()", "def split_data(raw_data, output_pref):\n train_data = output_pref + \".train\"\n test_data = output_pref + \".test\"\n random.shuffle(raw_data)\n with open(train_data, \"w\", encoding=\"utf8\") as fw1:\n with open(test_data, \"w\", encoding=\"utf8\") as fw2:\n with open(train_data + \".raw\", \"w\", encoding=\"utf8\") as fw3:\n with open(test_data + \".raw\", \"w\", encoding=\"utf8\") as fw4:\n for idx, (line, item) in enumerate(raw_data):\n if idx < 1000:\n fw2.write(line + \"\\n\")\n fw4.write(\"\\t\".join([str(i) for i in item]) + \"\\n\")\n else:\n fw1.write(line + \"\\n\")\n fw3.write(\"\\t\".join([str(i) for i in item]) + \"\\n\")", "def _split_and_write(\n path: str,\n saved_model: saved_model_pb2.SavedModel,\n max_size: int,\n export_files: Sequence[str],\n):\n constants.debug_set_max_size(max_size)\n\n if \"pbtxt\" in export_files:\n output_path = f\"{path}.pbtxt\"\n file_io.write_string_to_file(output_path, str(saved_model))\n logging.info(\" %s written\", output_path)\n if \"pb\" in export_files:\n output_path = f\"{path}.pb\"\n file_io.write_string_to_file(output_path, saved_model.SerializeToString())\n logging.info(\" %s written\", output_path)\n if \"cpb\" in export_files:\n splitter = split_saved_model.SavedModelSplitter(saved_model)\n splitter.write(path)\n chunks, _ = splitter.split()\n if len(chunks) > 1:\n logging.info(\" %s.cpb written\", path)\n else:\n raise RuntimeError(\n \"For some reason this graph was not chunked, so a .cpb file was not\"\n \" produced. Raising an error since this should not be the case.\"\n )", "def split (input_file, output_format):\n \n input = pyPdf.PdfFileReader( open(input_file, \"rb\") )\n \n for i in range(input.numPages):\n output = pyPdf.PdfFileWriter()\n output.addPage(input.getPage(i))\n \n with open(output_format % (i + 1), \"wb\") as outputStream:\n output.write(outputStream)\n\n return True", "def iterate_chunks(file, chunk_size):\n chunk = file.read(chunk_size)\n while chunk:\n yield chunk\n chunk = file.read(chunk_size)", "def build_chunks(read_bytes, file_size, chunk_size):\n\n chunks = []\n\n index = 0\n start = 0\n\n while start < file_size:\n end = min(start + chunk_size, file_size)\n size = end - start\n\n chunk = FileChunk(index, size, partial(read_bytes, start, size))\n chunks.append(chunk)\n\n index += 1\n start += chunk_size\n\n return chunks", "def split_file(filename):\n xmlnum = []\n end_pos = 0\n start_pos = 0\n current_pos = 0\n file = open(filename, 'r')\n for number, content in enumerate(file):\n if content.startswith(\"<?xml\"):\n xmlnum.append(number)\n\n for i in range(len(xmlnum)):\n splitfile = \"{}-{}\".format(PATENTS, i)\n start_pos = end_pos\n current_pos = end_pos\n newfile = open(splitfile, 'w+')\n print i+1\n with open(PATENTS, 'r') as f:\n for line in f:\n if (i + 1) == len(xmlnum):\n current_pos += 1\n if current_pos >= start_pos and current_pos <= len(open(PATENTS, 'r') .readlines()):\n newfile.writelines(line)\n else:\n current_pos += 1\n end_pos = xmlnum[i + 1]\n if current_pos >= start_pos and current_pos <= end_pos:\n newfile.writelines(line)", "def get_chunks(self,file_size):\n chunk_start = 0\n chunk_size = 0xA00000 # 10485760 bytes, default max ssl buffer size\n while chunk_start + chunk_size <= file_size:\n yield(chunk_start, chunk_size)\n chunk_start += chunk_size\n final_chunk_size = file_size - chunk_start\n yield(chunk_start, final_chunk_size)", "def writeChunk(chunk):", "def read_and_save(self, filename, headers, socket=0):\n \n ret, total_lines, max_lines = self.newportxps._xps.GatheringCurrentNumberGet(socket)\n nchunks = self.determine_num_chunks(total_lines, socket)\n print('Number of chunks to split up gather file: ' + str(nchunks))\n lines_per_chunk = int(total_lines/nchunks)\n remaining_lines = total_lines - lines_per_chunk*nchunks\n with open(filename, 'w') as f:\n for header in headers:\n f.write(\"## \" + header + \"\\n\")\n for i in range(nchunks):\n start = lines_per_chunk*i \n ret, buffer = self.newportxps._xps.GatheringDataMultipleLinesGet(socket, start, lines_per_chunk)\n f.write(buffer)\n start = lines_per_chunk * nchunks\n ret, buffer = self.newportxps._xps.GatheringDataMultipleLinesGet(socket, start, remaining_lines)\n f.write(buffer)\n print('Successfully saved gather file at: ' + str(filename))\n return", "def split_file(filename):\n \n \n#tree = ElementTree.ElementTree()\n#root = ElementTree.Element(\"root\")\n#a = ElementTree.Element(\"a\")\n#a.text = \"1\"\n#root.append(a)\n#tree._setroot(root)\n#tree.write(\"sample.xml\" \n\n \n find_counter = 0\n check_counter = 0 \n tree_file = files()\n #outfile = next(tree_file)\n \n \n with open(filename,mode =\"r\") as file :\n \n for line in file :\n \n if line.startswith(\"<?xml\"):\n outfile = next(tree_file)\n outfile.write(line)", "def saveToFile(fileName):\n outfile = open (fileName, \"w\")\n chunkInfoKeys = gChunkMap.keys()\n chunkInfoKeys.sort()\n\n for chunkInfo in chunkInfoKeys:\n c = gChunkMap[chunkInfo]\n outfile.write(c.printChunkInfo())\n outfile.write(\"\\n\");", "def split_large_osm_file(filename):\n command = \"java -Xmx4000M -jar ./lib/splitter.jar --output=xml --output-dir=data --max-nodes=15000 \" + filename + \" > splitter.log\"\n os.system(command)", "def splitFile(filename, n):\n in_file = open(filename)\n line = in_file.readline()\n count = 0\n while line <> \"\":\n if count < 10: num = \"0\"+str(count)\n else: num = str(count)\n f = open(\"output/\"+filename+\"-\"+num,\"w\")\n for i in range(n):\n if line == \"\": break\n f.write(line)\n line = in_file.readline()\n f.close()\n count += 1\n return count", "def split_input(self):\n namenode = self.runner.namenode\n splitter = Splitter(RECORDS_PER_BLOCK)\n results = []\n input_files = []\n for fname in self.inputs:\n input_files.append(RecordFile(fname, namenode))\n\n taskid = 0\n for block in splitter.split(input_files):\n fname = map_input(self.id, taskid)\n taskid += 1\n namenode.create_file(fname)\n\n bytes_written = 0\n for record in block:\n bytes_written += namenode.write_file(fname, bytes_written,\n record)\n\n namenode.close_file(fname)\n results.append(fname)\n self.open_files.append(fname)\n\n for file_ in input_files:\n file_.close()\n\n return results", "def split_data_corpus(filename):\n\n fid = 1\n with open(filename, 'r') as infile:\n f = open('%s-%s.txt' % (filename.strip('.txt'), fid), 'wb')\n for line, doc in enumerate(infile):\n f.write(doc)\n if not line % 1000 and line > 1:\n f.close()\n fid += 1\n f = open('%s-%s.txt' % (filename.strip('.txt'), fid),\n 'wb')\n f.close()", "def split_text_file(data_file, model_dir, eval_fraction):\n with io.open(data_file, 'r', encoding='utf-8') as fp:\n data = fp.readlines()\n\n random.shuffle(data)\n\n root, ext = os.path.splitext(data_file)\n train_file = os.path.join(model_dir, \"{}-train{}\".format(root, ext))\n eval_file = os.path.join(model_dir,\"{}-eval{}\".format(root, ext))\n train_offset = int(len(data)*(1-eval_fraction))\n\n if not os.path.exists(train_file) or not os.path.exists(eval_file):\n tf.logging.info('Splitting into train and test datasets..')\n with io.open(train_file, 'w', encoding='utf-8') as tfp,\\\n io.open(eval_file, 'w', encoding='utf-8') as efp:\n\n for i, line in enumerate(data):\n if i < train_offset:\n tfp.write(line)\n else:\n efp.write(line)\n\n return train_file, eval_file", "def split_files(filepath, max_size):\n for filename in os.listdir(filepath):\n abspath = f\"{filepath}/{filename}\"\n if os.stat(abspath).st_size >= max_size * MEGABYTE:\n csvheader = None\n split_files = []\n with open(abspath, \"r\") as fhandle:\n csvreader = csv.reader(fhandle)\n csvheader = next(csvreader)\n LOG.debug(f\"Header: {csvheader}\")\n\n part = 1\n while True:\n newfile, eof = write_part(\n abspath, csvreader, csvheader, num=part, size=(max_size * MEGABYTE))\n split_files.append(newfile)\n part += 1\n if eof or part >= MAX_SPLITS:\n break\n\n os.remove(abspath)\n\n # return the list of split files to stdout\n LOG.info(f\"Split files: {split_files}\")", "def write_chunk(chunk, token):\n dest = rem_dir('grab')\n # input(dest)\n file_name = '{}_{}'.format('cpf_temp', token)\n dest_file_name = os.path.join(os.path.abspath(dest), file_name)\n # input(dest_file_name)\n WRITE_STREAM = open(dest_file_name, 'wb')\n WRITE_STREAM.write(chunk)\n WRITE_STREAM.close()\n\n return True", "def chunkify(self, size=1024*1024*5):\n with open(self.file_name_raw, 'rb') as file:\n chunk_end = file.tell()\n while True:\n chunk_start = chunk_end\n file.seek(size, 1)\n file.readline()\n chunk_end = file.tell()\n\n if chunk_end > self.file_end:\n chunk_end = self.file_end\n yield chunk_start, chunk_end - chunk_start\n break\n else:\n yield chunk_start, chunk_end - chunk_start", "def chunk(f, n, data):\n\n\t# Chunk ID\n\tf.write(number(2, n))\n\t# Chunk length\n\tf.write(number(4, len(data)))\n\t# Data\n\tf.write(data)", "def divide_fasta_like_file(input_file, output_dir, ext=''):\n with open(input_file, 'r') as file:\n body = ''\n p_id = ''\n for line in file:\n if line[0] == '>':\n if len(p_id) > 0:\n with open(output_dir + p_id.replace(':', '_') + '.' + ext, \"w\") as out_file:\n out_file.write('>' + p_id.replace(':', '_') + '\\n' + body + '\\n')\n body = ''\n p_id = line.strip()[1:]\n else:\n body += line.strip()\n with open(output_dir + p_id.replace(':', '_') + '.' + ext, \"w\") as out_file:\n out_file.write('>' + p_id.replace(':', '_') + '\\n' + body + '\\n')", "def slice_file(filepath, partsize):\n filesize = os_path_getsize(filepath)\n startpos = 0 # file pointer start position, defaults to 0\n buffersize = 1024 * 1024 # file pointer move forward buffersize each time\n md5OfWholeFile = get_md5(filepath, md5())\n md5OfFileSlices = md5()\n metaData = {}\n # slice file into partfiles\n with open(filepath, 'rb') as reader:\n while startpos < filesize:\n # We use partsize num from 1 just for convenience\n for num in xrange(1, filesize / partsize + 2):\n partfile = '{filepath}_{num:02d}'.format(\\\n filepath=filepath,\\\n num=num)\n # endpos of each file slice\n # move file pointer to endpos for reading data chunk\n if partsize * num > filesize:\n endpos = filesize\n else:\n endpos = partsize * num\n md5sumOfSlice = md5() # store md5sum of each slice\n with open(partfile, 'wb') as writer:\n while startpos < endpos:\n if startpos + buffersize > endpos:\n readsize = endpos - startpos\n else:\n readsize = buffersize\n data = reader.read(readsize)\n writer.write(data)\n md5sumOfSlice.update(data)\n startpos += readsize\n # store necessary data\n md5OfFileSlices = get_md5(partfile, md5OfFileSlices)\n partfilename = os_path_basename(partfile)\n metaDataOfSlice = (partfilename, md5sumOfSlice.hexdigest())\n metaData.setdefault('parts', []).append(metaDataOfSlice)\n #\n # compare md5sum of the whole file with md5sum of file slices\n # if both are the same then succeed to complete\n # otherwise failed\n if md5OfWholeFile.hexdigest() == md5OfFileSlices.hexdigest():\n md5sumOfWholeFile = md5OfWholeFile.hexdigest()\n filename = os_path_basename(filepath)\n metaData['whole'] = {'name': filename, 'md5sum': md5sumOfWholeFile}\n # pickle dump meta data into index file\n indexfile = filepath + '.idx'\n with open(indexfile, 'wb') as fp:\n pickle_dump(metaData, fp, protocol=2)\n print('Succeed! MD5 is EQUAL: {md5}'.format(md5=md5sumOfWholeFile))\n else:\n print('MD5 is NOT EQUAL, {wholefile} != {fileslices}'.format(\\\n wholefile=md5OfWholeFile.hexdigest(),\\\n fileslices=md5OfFileSlices.hexdigest()))", "def test_chunks(year, day, part_number):\n chunks = []\n chunk_index = -1\n data_file_lines(part_number).each do |line|\n if line[0] == '#'\n chunk_index += 1\n chunks[chunk_index] = [line[1..-1].strip, []]\n elsif chunk_index >= 0\n chunks[chunk_index][1] << line\n end\n end\n chunks", "def save(self, file: Union[str, BinaryIO]=None) -> bytes:\n # Store all the chunks data as zlib compressed nbt data\n chunks_data = []\n for chunk in self.chunks:\n if chunk is None:\n chunks_data.append(None)\n continue\n chunk_data = BytesIO()\n if isinstance(chunk, Chunk):\n nbt_data = nbt.NBTFile()\n nbt_data.tags.append(nbt.TAG_Int(name='DataVersion', value=chunk.version))\n nbt_data.tags.append(chunk.data)\n else:\n nbt_data = chunk.save()\n nbt_data.write_file(buffer=chunk_data)\n chunk_data.seek(0)\n chunk_data = zlib.compress(chunk_data.read())\n chunks_data.append(chunk_data)\n\n # This is what is added after the location and timestamp header\n chunks_bytes = bytes()\n offsets = []\n for chunk in chunks_data:\n if chunk is None:\n offsets.append(None)\n continue\n # 4 bytes are for length, b'\\x02' is the compression type which is 2 since its using zlib\n to_add = (len(chunk)+1).to_bytes(4, 'big') + b'\\x02' + chunk\n\n # offset in 4KiB sectors\n sector_offset = len(chunks_bytes) // 4096\n sector_count = math.ceil(len(to_add) / 4096)\n offsets.append((sector_offset, sector_count))\n\n # Padding to be a multiple of 4KiB long\n to_add += bytes(4096 - (len(to_add) % 4096))\n chunks_bytes += to_add\n\n locations_header = bytes()\n for offset in offsets:\n # None means the chunk is not an actual chunk in the region\n # and will be 4 null bytes, which represents non-generated chunks to minecraft\n if offset is None:\n locations_header += bytes(4)\n else:\n # offset is (sector offset, sector count)\n locations_header += (offset[0] + 2).to_bytes(3, 'big') + offset[1].to_bytes(1, 'big')\n\n # Set them all as 0\n timestamps_header = bytes(4096)\n\n final = locations_header + timestamps_header + chunks_bytes\n\n # Pad file to be a multiple of 4KiB in size\n # as Minecraft only accepts region files that are like that\n final += bytes(4096 - (len(final) % 4096))\n assert len(final) % 4096 == 0 # just in case\n\n # Save to a file if it was given\n if file:\n if isinstance(file, str):\n with open(file, 'wb') as f:\n f.write(final)\n else:\n file.write(final)\n return final", "def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))", "def decompose(self, file_name):\n print(\"[+] Decompose started...\")\n with open(file_name, \"rb\") as image_file:\n\n # We check if the directory chunks doesn't exist, then, we create it\n if not path.exists(\"./chunks/\"):\n makedirs(\"chunks\")\n \n to_print = b64.b64encode(image_file.read()).decode('utf-8')\n size = len(to_print)\n re_size = self.verify_size_content(self.divide(size))\n content = \"\"\n i = 0\n\n print(\"[+] FILENAME: \" + str(file_name))\n print(\"[+] \" + str(re_size))\n print(\"[+] SIZE: \" + str(size))\n \n while to_print:\n content = to_print[:re_size['chunck']]\n title = md5(content[:300].encode()).hexdigest()\n self.map[i] = title\n self.chunk_array.append({title: content})\n print(\"> chunck: \" + title)\n\n system(\"mkdir ../chunks/\")\n # Optionnal, to saved the chunks\n with open(\"../chunks/\" + title, \"w+\") as file:\n file.write(content)\n # Optionnal, to saved the chunks\n to_print = to_print[re_size['chunck']:]\n i += 1\n print(\"[+] Decompose done.\")\n print(\"-------\")", "def go(self):\n\n self._write_master()\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n njobs=0\n fobj=None\n\n icondor=0\n for isplit,fof_split in enumerate(fof_splits):\n if njobs % self['jobs_per_sub']==0:\n if fobj is not None:\n fobj.close()\n fobj = self._open_condor_script(icondor)\n icondor += 1\n\n self._write_split(fobj, isplit, fof_split)\n\n njobs += 1", "def main(opts):\n\n # split the file\n split_file(opts['in_file'], opts['num_splits'], opts['split_dir'], opts['mut_file'])", "def chunk_data(path, chunksize):\n reader = pandas.read_table(path, chunksize=chunksize, skiprows=0)\n\n start = 0\n for chunk in reader:\n stop = start + len(chunk) - 1\n dataframe_to_csv(chunk, file=get_chunk_file_name(path, (start, stop)))\n start = stop + 1\n\n return alphabetize_chunk_files(os.path.basename(path))", "def splitter(fasta_file, output, limit, large_handling=False):\n file_ = open(fasta_file, 'r')\n file_count = 1\n outfile = open(output.rstrip(\"/\")+\"/%s_%05d.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n nt_count = 0\n for seq in SeqIO.parse(fasta_file, 'fasta'):\n if large_handling == True and len(str(seq.seq)) >= int(limit):\n file_count += 1\n largefile = open(output.rstrip(\"/\")+\"/%s_%05d_XL.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n largefile.write(\">\"+str(seq.description)+\"\\n\"+\"\\n\".join(\n str(seq.seq)[i:i+50]for i in range(0,len(seq.seq),50))+\"\\n\")\n largefile.close()\n else:\n nt_count += len(str(seq.seq))\n outfile.write(\">\"+str(seq.description)+\"\\n\"+\"\\n\".join(\n str(seq.seq)[i:i+50]for i in range(0,len(seq.seq),50))+\"\\n\") \n if nt_count >= int(limit):\n outfile.close()\n file_count += 1\n nt_count = 0\n outfile = open(output.rstrip(\"/\")+\"/%s_%05d.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n outfile.close()", "def split_standard(path: str, export_files: Sequence[str]):\n fn1 = [100, 100, 100]\n fn2 = [100, 500]\n fn3 = [100]\n fn4 = [100, 100]\n\n max_size = 500\n constants.debug_set_max_size(max_size)\n\n graph_def = test_util.make_graph_def_with_constant_nodes(\n STANDARD_SIZES, fn1=fn1, fn2=fn2, fn3=fn3, fn4=fn4\n )\n proto = saved_model_pb2.SavedModel()\n proto.meta_graphs.add().graph_def.CopyFrom(graph_def)\n\n _split_and_write(path, proto, max_size, export_files)", "def _split_chunk(self, collection_name: str, key: int):\n def split_command():\n self._mongo_client.admin.command('split', collection_name, middle={SHARD_KEY: key})\n self._try_until_done(split_command)\n self._chunks[collection_name][key] = MAIN_MONGO_SHARD_NAME\n logging.info(f\"MongoAgent: Split chunk of {collection_name} at {key}\")", "def test_write_read_fif_split_file():\n bids_root = _TempDir()\n tmp_dir = _TempDir()\n bids_path = _bids_path.copy().update(root=bids_root, datatype='meg')\n raw = _read_raw_fif(raw_fname, verbose=False)\n n_channels = len(raw.ch_names)\n n_times = int(2.2e9 / (n_channels * 4)) # enough to produce a split\n data = np.empty((n_channels, n_times), dtype=np.float32)\n raw = mne.io.RawArray(data, raw.info)\n big_fif_fname = pathlib.Path(tmp_dir) / 'test_raw.fif'\n raw.save(big_fif_fname)\n raw = _read_raw_fif(big_fif_fname, verbose=False)\n write_raw_bids(raw, bids_path, verbose=False)\n\n raw1 = read_raw_bids(bids_path=bids_path)\n assert 'split-01' in str(bids_path.fpath)\n\n bids_path.update(split='01')\n raw2 = read_raw_bids(bids_path=bids_path)\n bids_path.update(split='02')\n raw3 = read_raw_bids(bids_path=bids_path)\n assert len(raw) == len(raw1)\n assert len(raw) == len(raw2)\n assert len(raw) > len(raw3)", "def create_chunks(file_names):\n\n\tnew_chunks = []\n\n\tfor name in file_names:\n\n\t\t# Find the .inf file and read the details stored within\n\t\ttry:\n\t\t\tdetails = open(name + suffix + 'inf', 'r').readline()\n\t\texcept IOError:\n\n\t\t\ttry:\n\t\t\t\tdetails = open(name + suffix + 'INF', 'r').readline()\n\t\t\texcept IOError:\n\t\t\t\tprint(\"Couldn't open information file, %s\" % name+suffix+'inf')\n\t\t\t\tsys.exit()\n\n\t\t# Parse the details\n\t\tdetails = [string.rstrip(details)]\n\n\t\tsplitters = [' ', '\\011']\n\n\t\t# Split the details up where certain whitespace characters occur\n\t\tfor s in splitters:\n\n\t\t\tnew_details = []\n\n\t\t\t# Split up each substring (list entry)\n\t\t\tfor d in details:\n\n\t\t\t\tnew_details = new_details + string.split(d, s)\n\n\t\t\tdetails = new_details\n\n\t\t# We should have details about the load and execution addresses\n\n\t\t# Open the file\n\t\ttry:\n\t\t\tin_file = open(name, 'rb')\n\t\texcept IOError:\n\t\t\tprint(\"Couldn't open file, %s\" % name)\n\t\t\tsys.exit()\n\n\t\t# Find the length of the file (don't rely on the .inf file)\n\t\tin_file.seek(0, 2)\n\t\tlength = in_file.tell()\n\t\tin_file.seek(0, 0)\n\n\t\t# Examine the name entry and take the load and execution addresses\n\t\tdot_at = string.find(details[0], '.')\n\t\tif dot_at != -1:\n\t\t\treal_name = details[0][dot_at+1:]\n\t\t\tload, exe = details[1], details[2]\n\t\telse:\n\t\t\treal_name = get_leafname(name)\n\t\t\tload, exe = details[0], details[1]\n\n\t\tload = hex2num(load)\n\t\texe = hex2num(exe)\n\n\t\tif load == None or exe == None:\n\t\t\tprint('Problem with %s: information is possibly incorrect.' % name+suffix+'inf')\n\t\t\tsys.exit()\n\n\t\t# Reset the block number to zero\n\t\tblock_number = 0\n\n\t\t# Long gap\n\t\tgap = 1\n\t\n\t\t# Write block details\n\t\twhile True:\n\t\t\tblock, last = write_block(in_file, real_name, load, exe, length, block_number)\n\n\t\t\tif gap == 1:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x05dc)))\n\t\t\t\tgap = 0\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x0258)))\n\n\t\t\t# Write the block to the list of new chunks\n\n\t\t\t# For old versions, just write the block\n\t\t\tif UEF_major == 0 and UEF_minor < 9:\n\t\t\t\tnew_chunks.append((0x100, block))\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x100, block))\n\n\t\t\tif last == 1:\n\t\t\t\tbreak\n\n\t\t\t# Increment the block number\n\t\t\tblock_number = block_number + 1\n\n\t\t# Close the input file\n\t\tin_file.close()\n\n\t# Write some finishing bytes to the list of new chunks\n#\tnew_chunks.append((0x110, number(2,0x0258)))\n#\tnew_chunks.append((0x112, number(2,0x0258)))\n\n\t# Return the list of new chunks\n\treturn new_chunks", "def get_file_chunks(file, model, workspace, header, user):\n uri = (\"https://api.anaplan.com/1/3/workspaces/{}/models/{}/\"\n \"files/{}/chunks/\").format(workspace, model, file)\n response = requests.get(uri, headers = header)\n return json.loads(response.text.encode(\"utf-8\"))", "def _write_split(self, fobj, isplit, fof_split):\n\n start, end = fof_split\n\n output_file = files.get_split_output(\n self['run'],\n start,\n end,\n ext='fits',\n )\n log_file = files.get_split_output(\n self['run'],\n start,\n end,\n ext='log',\n )\n\n d={}\n d['seed'] = self._get_seed()\n d['output_file'] = os.path.abspath(output_file)\n d['fit_config'] = self['fit_config']\n d['fof_file'] = self['fof_file']\n d['start'] = start\n d['end'] = end\n d['logfile'] = os.path.abspath(log_file)\n d['job_name']='%s-%06d-%06d' % (self['run'], start, end)\n\n job = _condor_job_template % d\n\n fobj.write(job)", "def Seperate(f_read, f_write_name):\n lines = f_read.readlines()\n line_s = [line.split() for line in lines]\n\n for i in range(6, 13):\n nbytes = pow(2,i)\n f_write = f_write_name + str(nbytes) + \"b.txt\"\n f = open(f_write, \"w+\")\n\n for line in line_s:\n if line[3] == str(nbytes):\n f.write(\" \".join(line))\n f.write(\"\\n\")\n f.close()", "def handle_uploaded_file(f, fname):\n with open(fname, 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)", "def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]", "def split_by_contigs(self, output_dir: Path = None) -> None:\n if output_dir is None:\n output_dir = (\n Path(self._input_file.parent) / \"split_\" + self._input_file.name\n )\n else:\n output_dir = Path(output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n contigs = pyfastx.Fasta(\n self.file_path.as_posix(), build_index=False, full_name=True\n )\n for contig_name, seq in contigs:\n output_file = (\n output_dir / f\"{contig_name.split(' ')[0]}{self._input_file.suffix}\"\n )\n with open(output_file, \"w+\", encoding=\"UTF-8\") as outfile:\n outfile.write(f\">{contig_name}\\n\")\n outfile.write(seq + \"\\n\")", "def save_chunks(chunk_sound, out_path, video_id):\n chunk_start_ms = int(chunk_sound.get_start_time()*1000)\n chunk_end_ms = int(chunk_sound.get_end_time()*1000)\n chunk_duration = chunk_end_ms - chunk_start_ms\n\n chunk_fn = '{0}_{1}_{2}.wav'.format(video_id, chunk_start_ms, chunk_end_ms)\n chunk_file_path = path.join(out_path, chunk_fn)\n chunk_sound.save(chunk_file_path, 'WAV')\n\n return {'filename': chunk_fn, 'video_id': video_id, 'start_time': chunk_start_ms, 'end_time': chunk_end_ms, 'duration': chunk_duration}", "def _chunks(filename, start):\n with open(filename, 'r') as f:\n buffer = []\n for line in f:\n if line.startswith(start):\n if buffer:\n yield buffer\n buffer = []\n else:\n buffer.append(line.strip())", "def partition_files(list_of_files, number_of_parts):\n return np.array_split(list_of_files, number_of_parts)", "def read_in_chunks(self):\n chunksize = 10 ** 3\n lines_number = sum(1 for line in open(self.filepath))\n self.progressMaximum.emit(lines_number // chunksize)\n dfList = []\n\n # self.df = traja.read_file(\n # str(filepath),\n # index_col=\"time_stamps_vec\",\n # parse_dates=[\"time_stamps_vec\"],\n # )\n\n TextFileReader = pd.read_csv(\n self.filepath,\n index_col=\"time_stamps_vec\",\n parse_dates=[\"time_stamps_vec\"],\n chunksize=chunksize,\n )\n for idx, df in enumerate(TextFileReader):\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S:%f\")\n dfList.append(df)\n self.intReady.emit(idx)\n self.completed.emit(dfList)\n self.finished.emit()", "def transform_split(cfg, file_name, xlmr, label2id):\n def is_not_none(e):\n \"\"\"\n Helper function to filter nulls.\n \"\"\"\n return e is not None\n\n def generate_groups():\n \"\"\"\n Generates groups for serialization.\n \"\"\"\n groups = group_elements(\n generate_examples(file_name),\n cfg.tfrecord_size)\n\n # pairing groups to unique numbers and \n # filtering nulls from zip_longest\n groups = (\n list(filter(is_not_none, group))\n for group in groups\n )\n\n yield from groups\n\n split_name = splitext(basename(file_name))[0]\n\n tfrecord_name = join(\n cfg.data_dir,\n split_name + '.{}.tfrecord')\n\n tfrecord_name = abspath(tfrecord_name)\n\n encode_fn = functools.partial(\n encode_example,\n xlmr=xlmr,\n label2id=label2id)\n\n def generate_results():\n \"\"\"\n Performs serialization and generates\n the resulting file names and sizes.\n \"\"\"\n for idx, examples in enumerate(generate_groups()):\n # converting iterators to list so resources\n # are not shared in concurrent workers\n yield write_tfrecord(\n examples=examples,\n encode_fn=encode_fn,\n file_name=tfrecord_name.format(idx))\n\n # generates split sizes and filenames \n # of the tfrecords\n tfrecord_paths, sizes = zip(*generate_results())\n\n return tfrecord_paths, sum(sizes)", "def split_multiple_recordings_file(file_path, min_silence_duration=0.25, noise_threshold=150):\n print(file_path)\n rate, audio = scipy.io.wavfile.read(file_path)\n split_recordings = split_multiple_recordings(audio, min_silence_duration=min_silence_duration,\n noise_threshold=noise_threshold, sample_rate_hz=rate)\n\n if file_path.count('.') != 1:\n raise Exception('File_path must contain exactly one period, usually in extension. IE: /home/test.wav')\n\n for idx, recording in enumerate(split_recordings):\n print(\"spliting \" + file_path)\n new_file_path = file_path.split('.')[0] + '_' + str(idx) + \".wav\"\n scipy.io.wavfile.write(new_file_path, rate, recording)", "def do_process_user_file_chunks(\n page_size: int, error_handler: ErrorHandler, position: int, participant: Participant\n):\n \n # FIXME: this is a gross hack to force some time related safety, which is only ever used deep\n # inside of data processing.\n common_constants.LATEST_POSSIBLE_DATA_TIMESTAMP = \\\n int(time.mktime((timezone.now() + timedelta(days=90)).timetuple()))\n \n # Declare a defaultdict of a tuple of 2 lists\n all_binified_data = defaultdict(lambda: ([], []))\n ftps_to_remove = set()\n # The ThreadPool enables downloading multiple files simultaneously from the network, and continuing\n # to download files as other files are being processed, making the code as a whole run faster.\n # In principle we could make a global pool that is free-memory aware.\n pool = ThreadPool(CONCURRENT_NETWORK_OPS)\n survey_id_dict = {}\n \n # A Django query with a slice (e.g. .all()[x:y]) makes a LIMIT query, so it\n # only gets from the database those FTPs that are in the slice.\n # print(participant.as_dict())\n print(\"Number Files To Process:\", participant.files_to_process.exclude(deleted=True).count())\n print(f\"will process {page_size} files.\")\n print(\"current count processing within this run:\", position)\n \n # TODO: investigate, comment. ordering by path results in files grouped by type and\n # chronological order, which is perfect for download efficiency... right? would it break anthing?\n files_to_process = participant.files_to_process \\\n .exclude(deleted=True) #.order_by(\"s3_file_path\", \"created_on\")\n \n # This pool pulls in data for each FileForProcessing on a background thread and instantiates it.\n # Instantiating a FileForProcessing object queries S3 for the File's data. (network request))\n files_for_processing = pool.map(\n FileForProcessing, files_to_process[position: position + page_size], chunksize=1\n )\n \n for file_for_processing in files_for_processing:\n with error_handler:\n process_one_file(\n file_for_processing, survey_id_dict, all_binified_data, ftps_to_remove\n )\n pool.close()\n pool.terminate()\n \n # there are several failure modes and success modes, information for what to do with different\n # files percolates back to here. Delete various database objects accordingly.\n more_ftps_to_remove, number_bad_files, earliest_time_bin, latest_time_bin = upload_binified_data(\n all_binified_data, error_handler, survey_id_dict, participant\n )\n ftps_to_remove.update(more_ftps_to_remove)\n \n # Update the data quantity stats, if it actually processed any files\n if len(files_to_process) > 0:\n calculate_data_quantity_stats(participant,\n earliest_time_bin_number=earliest_time_bin,\n latest_time_bin_number=latest_time_bin)\n \n # Actually delete the processed FTPs from the database\n FileToProcess.objects.filter(pk__in=ftps_to_remove).delete()\n return number_bad_files", "def read_in_chunks(self, file_object, chunk_size=10240):\n\n while True:\n data = file_object.read(chunk_size)\n if not data:\n break\n self.log.debug(\"Transimitting a %s Chunk \", len(data))\n yield data", "def iter_chunks(file: io.BytesIO, chunk_size: int = 64 * 1024):\n while True:\n data = file.read(chunk_size)\n if not data:\n break\n yield data", "def split(self, start, nsamps, filename=None, gulp=1024,\n back_compatible=True, **kwargs):\n if filename is None:\n filename = f\"{self.header.basename}_{start:d}_{start+nsamps:d}.fil\"\n new_tstart = self.header.tstart + ((self.header.tsamp * start) / 86400.0)\n out_file = self.header.prepOutfile(\n filename, updates={'tstart': new_tstart}, nbits=self.header.nbits\n )\n for _count, _ii, data in self.readPlan(\n gulp, start=start, nsamps=nsamps, **kwargs,\n ):\n out_file.cwrite(data)\n out_file.close()\n return out_file.name", "def split_sff(sff_file_handles, map_file_handle, outdir=\"/tmp/\"):\r\n\r\n try:\r\n (flowgrams, header) = cat_sff_files(sff_file_handles)\r\n except ValueError:\r\n # reading in the binary sff usually shows up as ValueError\r\n raise FileFormatError('Wrong flogram file format. Make sure you pass the sff.txt format ' +\r\n 'produced by sffinfo. The binary .sff will not work here.')\r\n\r\n (inverse_map, map_count) = build_inverse_barcode_map(\r\n parse_fasta(map_file_handle))\r\n\r\n filenames = []\r\n # we might have many barcodes and reach python open file limit\r\n # therefor we go the slow way and open and close files each time\r\n # First set up all files with the headers only\r\n for barcode_id in map_count.keys():\r\n fh = open(outdir + barcode_id, \"w\")\r\n write_sff_header(header, fh, map_count[barcode_id])\r\n fh.close()\r\n filenames.append(outdir + barcode_id)\r\n # Then direct each flowgram into its barcode file\r\n for f in flowgrams:\r\n if f.Name in inverse_map:\r\n barcode_id = inverse_map[f.Name]\r\n fh = open(outdir + barcode_id, \"a\")\r\n fh.write(f.createFlowHeader() + \"\\n\")\r\n return filenames", "def read_and_split_sets():\n gen_train_test_sets(\"Data_Sent_Embds/en_sent.pkl\", \"Data_Sent_Embd_Splitted/en_train.pkl\",\n \"Data_Sent_Embd_Splitted/en_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/es_sent.pkl\", \"Data_Sent_Embd_Splitted/es_train.pkl\",\n \"Data_Sent_Embd_Splitted/es_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/pr_sent.pkl\", \"Data_Sent_Embd_Splitted/pr_train.pkl\",\n \"Data_Sent_Embd_Splitted/pr_test.pkl\")", "def zi_spliting(info_list, path, mode='train'):\r\n new_info_list = list()\r\n for infos in info_list:\r\n if mode == 'train':\r\n [user, age, gender, education, querys] = infos\r\n elif mode == 'test':\r\n [user, querys] = infos\r\n new_querys = list()\r\n for query in querys:\r\n new_query = list()\r\n for zi in query:\r\n new_query.append(zi)\r\n new_querys.append(new_query)\r\n if mode == 'train' :\r\n new_info_list.append((user, age, gender, education, new_querys))\r\n elif mode == 'test' :\r\n new_info_list.append((user, new_querys))\r\n # write in the disk\r\n with open(path, 'w') as fw:\r\n for info in new_info_list:\r\n if mode == 'train' :\r\n user, age, gender, education, querys = info\r\n query_str = '\\t'.join([' '.join(query) for query in querys])\r\n fw.writelines((user + '\\t' + age + '\\t' + gender + '\\t' + education +\\\r\n '\\t' + query_str + '\\n').encode('gb18030'))\r\n elif mode == 'test' :\r\n user, querys = info\r\n query_str = '\\t'.join([' '.join(query) for query in querys])\r\n fw.writelines((user + '\\t' + query_str + '\\n').encode('gb18030'))", "def packFiles(source, filesPerBlock, dest):\n\tfileCount = 1\n\t\n\ttmpFileName = \"tmp.h5\"\t\n\n\n\toutFile = createBlockFile(tmpFileName)\t\n\tfor dirname, subdirs, files in os.walk(source):\t\n\t print 'Scanning ' + dirname + '...'\t\n\t for f in files:\t\n\t if f.endswith('.h5'):\t\n\t inFile = h5py.File(os.path.join(dirname, f), 'r')\t\n\t outFile.copy(inFile, outFile['songs'], f)\t\n\t inFile.close()\t\n\t fileCount = fileCount + 1\t\n\t if(fileCount > filesPerBlock):\t\n\t outFile.close()\t\n\t upload(tmpFileName, bucket)\t\n\t fileCount = 1\t\n\t outFile = createBlockFile(tmpFileName)\t\n\n \toutFile.close()\n \tif fileCount > 1:\n\t \tupload(tmpFileName, bucket)\n\n\tos.remove(tmpFileName)", "def file_sync_write_chunks(radosobject, chunksize, offset, chunks, size=None):\n padding = 0\n cursize = chunksize * offset\n radosobject.seek(cursize)\n for chunk in chunks:\n if padding:\n radosobject.sync_write(buffer(zeros(chunksize), 0, padding))\n if size is not None and cursize + chunksize >= size:\n chunk = chunk[:chunksize - (cursize - size)]\n radosobject.sync_write(chunk)\n cursize += len(chunk)\n break\n radosobject.sync_write(chunk)\n padding = chunksize - len(chunk)\n\n padding = size - cursize if size is not None else 0\n if padding <= 0:\n return\n\n q, r = divmod(padding, chunksize)\n for x in xrange(q):\n radosobject.sunc_write(zeros(chunksize))\n radosobject.sync_write(buffer(zeros(chunksize), 0, r))", "def store_file(filename1, filename2):\n print 'Splitting ', filename1, ' into encoded comments for keys'\n file_list = read_file_into_list(filename1)\n output_file = open(filename2, 'w')\n counter_length = len(file_list)\n counter = 0\n for chunk in file_list:\n print 'Creating key ', counter, ' of ', counter_length\n counter = counter + 1\n key_id = create_key(chunk)\n output_file.write(send_key(key_id)+'\\n')\n print '--> key has been created and uploaded'\n print 'File has been successfully uploaded to ', KEYSERVER", "def _read_in_chunks(self, file_object, blocksize=4096, chunks=-1,\n shard_index=None):\n i = 0\n while chunks:\n data = file_object.read(blocksize)\n if not data:\n break\n yield data\n i += 1\n\n chunks -= 1", "def __do_write(filestream, seq, header=None):\n if header is not None:\n filestream.write(header + '\\n') # double check newlines\n try:\n for line in chunks(seq, 70):\n filestream.write(line + '\\n')\n except Exception as e:\n print(e)", "def combine_chunks(chunks, output_path, decompress=False, encrypt_key=None):\n\n msg = 'combining %s chunks' % len(chunks)\n logger.info(msg)\n\n salt_length = 0\n key_length = 0\n if encrypt_key:\n if encrypt_key.binary_salt:\n salt_length = len(encrypt_key.binary_salt)\n assert salt_length == 16, salt_length\n key_length = len(encrypt_key.binary_key)\n assert key_length == 16, key_length\n f = open(output_path, 'wb')\n if decompress:\n decompressor = zlib.decompressobj()\n if encrypt_key:\n # salt, then iv\n iv = chunks[0].read(byte_range=[salt_length, salt_length + key_length])\n decryptor = Cipher(\n CIPHER_MODE, encrypt_key.binary_key, iv, CIPHER_DECODE\n )\n # sort out any parity chunks\n parity_chunks = []\n while chunks[-1].parity:\n parity_chunks.insert(0, chunks.pop())\n if parity_chunks and has_par2():\n cwd1 = os.getcwd()\n os.chdir(os.path.dirname(parity_chunks[0].file_path))\n # The files have to end in .par2 to work.\n for parity_chunk in parity_chunks:\n new_name = '%s.%s' % (parity_chunk.filename, 'par2')\n os.rename(parity_chunk.filename, new_name)\n parity_chunk.file_path = os.path.join(\n os.path.dirname(parity_chunk.file_path), new_name)\n # It won't recognize them by name, so put them on the command line.\n par2_repair([p.filename for p in parity_chunks])\n os.chdir(cwd1)\n for (i, chunk) in enumerate(chunks):\n chunk.verify_checksum()\n if i == 0 and encrypt_key:\n chunk_size = chunk.size\n if encrypt_key.binary_salt:\n # strip salt and IV\n data = chunk.read(\n byte_range=[len(encrypt_key.binary_key) + \\\n len(encrypt_key.binary_salt),\n chunk.size-4]\n )\n else:\n # skip the IV\n data = chunk.read(\n byte_range=[len(encrypt_key.binary_key), chunk.size-4]\n )\n else:\n data = chunk.read()\n if encrypt_key:\n data = decryptor.update(data)\n assert not decryptor.final()\n if decompress:\n data = decompressor.decompress(data)\n assert not decompressor.unused_data\n f.write(data)\n f.close()\n logger.debug('file size is %s' % os.stat(output_path).st_size)", "def _wiki_dump_to_many_dumps( env_dict ):\n wiki_file = env_dict[\"wiki\"][\"big_xml\"]\n if not os.path.exists(wiki_file):\n logger.warning(u\"Wiki [%s] does not exists!\", wiki_file)\n return\n\n chunk_size = env_dict[\"wiki\"][\"wikis_file_buffer\"]\n buffer_size = chunk_size\n file_limit = env_dict[\"wiki\"][\"wikis_file_limit\"]\n\n pos = 0\n buf_leftover = \"\"\n\n def should_end( b ):\n if b == \"\":\n raise IOError(\"end reached\")\n\n wiki_file_out_templ = wiki_file + u\".part%s.xml\"\n\n with open(wiki_file, 'rb') as f_wiki:\n buf = f_wiki.read(chunk_size)\n to_find = \">\"\n first_page = buf.find(to_find)\n header = buf[:first_page + len(to_find)]\n footer = \"\\n</mediawiki>\"\n\n page_end = \"</page>\"\n first_time = True\n try:\n with open(wiki_file, 'rb', buffer_size) as f_wiki:\n while buf != \"\":\n read = 0\n pos += 1\n wiki_file_out = unicode(wiki_file_out_templ % pos)\n with open(wiki_file_out, 'wb+') as f_out:\n logger.info(\"Working on [%s]\", wiki_file_out)\n if not first_time:\n f_out.write(header)\n else:\n first_time = False\n while read < file_limit:\n buf = buf_leftover + f_wiki.read(chunk_size)\n buf_leftover = \"\"\n should_end(buf)\n read += len(buf)\n f_out.write(buf)\n # find page\n buf = f_wiki.read(chunk_size)\n if buf != \"\":\n page_end_pos = buf.find(page_end)\n assert page_end_pos >= 0, \"something fishy happened\"\n page_end_pos += len(page_end)\n f_out.write(buf[:page_end_pos])\n buf_leftover = buf[page_end_pos:]\n f_out.write(footer)\n except IOError:\n pass", "def splice_a_chunk_in_a_file(self, file_data, glitch_num):\n start_point, end_point = self.get_random_start_and_end_points_in_file(file_data)\n section = file_data[start_point:end_point]\n repeated = ''\n\n for i in range(1, glitch_num):\n repeated += section\n\n new_start_point, new_end_point = self.get_random_start_and_end_points_in_file(file_data)\n file_data = file_data[:new_start_point] + repeated + file_data[new_end_point:]\n return file_data", "def write(self, file):\n #write header\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def chunk_motifs(file_type, motifs, num_workers=4, min_ic=None):\n\n temp_dir = tempfile.gettempdir()\n\n if min_ic is not None:\n motifs = list(itertools.compress(motifs, [m.information_content >= min_ic for m in motifs]))\n\n if num_workers == 1:\n file_name = os.path.join(temp_dir, \"chunk1.mchunk\")\n file_type.write(file_name, motifs)\n return [file_name]\n\n chunk_size = math.ceil(len(motifs) / num_workers)\n\n files = []\n\n for i in range(num_workers):\n file_name = os.path.join(temp_dir, \"chunk\" + str(i) + \".mchunk\")\n file_type.write(file_name, motifs[i * chunk_size:min((i + 1) * chunk_size, len(motifs))])\n files.append(file_name)\n\n return files", "def extract_chunks(the_files, the_bands=None):\n ds_config = {}\n gdal_ptrs = []\n datatypes = []\n for the_file in the_files:\n g = gdal.Open(the_file)\n gdal_ptrs.append(gdal.Open(the_file))\n datatypes.append(GDAL2NUMPY[g.GetRasterBand(1).DataType])\n\n block_size = g.GetRasterBand(1).GetBlockSize()\n nx = g.RasterXSize\n ny = g.RasterYSize\n if the_bands is None:\n the_bands = np.arange(g.RasterCount) + 1\n proj = g.GetProjectionRef()\n geoT = g.GetGeoTransform()\n ds_config['nx'] = nx\n ds_config['ny'] = ny\n ds_config['nb'] = g.RasterCount\n ds_config['geoT'] = geoT\n ds_config['proj'] = proj\n block_size = [block_size[0]*2, block_size[1]*2]\n print(\"Blocksize is (%d,%d)\" % (block_size[0], block_size[1]))\n # block_size = [ 256, 256 ]\n # store these numbers in variables that may change later\n nx_valid = block_size[0]\n ny_valid = block_size[1]\n # find total x and y blocks to be read\n nx_blocks = (int)((nx + block_size[0] - 1) / block_size[0])\n ny_blocks = (int)((ny + block_size[1] - 1) / block_size[1])\n buf_size = block_size[0] * block_size[1]\n ################################################################\n # start looping through blocks of data\n ################################################################\n # loop through X-lines\n for X in range(nx_blocks):\n # change the block size of the final piece\n if X == nx_blocks - 1:\n nx_valid = nx - X * block_size[0]\n buf_size = nx_valid * ny_valid\n\n # find X offset\n this_X = X * block_size[0]\n\n # reset buffer size for start of Y loop\n ny_valid = block_size[1]\n buf_size = nx_valid * ny_valid\n\n # loop through Y lines\n for Y in range(ny_blocks):\n # change the block size of the final piece\n if Y == ny_blocks - 1:\n ny_valid = ny - Y * block_size[1]\n buf_size = nx_valid * ny_valid\n\n # find Y offset\n this_Y = Y * block_size[1]\n data_in = []\n for ig, ptr in enumerate(gdal_ptrs):\n buf = ptr.ReadRaster(this_X, this_Y, nx_valid, ny_valid,\n buf_xsize=nx_valid, buf_ysize=ny_valid,\n band_list=the_bands)\n a = np.frombuffer(buf, dtype=datatypes[ig])\n data_in.append(a.reshape((\n len(the_bands), ny_valid, nx_valid)).squeeze())\n\n yield (ds_config, this_X, this_Y, nx_valid, ny_valid,\n data_in)", "def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\tsavedir = path_train + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)\n\t\t\tsavedir = path_label + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)", "def WriteBlobToFile(self, request):\n # First chunk truncates the file, later chunks append.\n if request.offset == 0:\n mode = \"w+b\"\n else:\n mode = \"r+b\"\n\n temp_file = tempfiles.CreateGRRTempFile(\n filename=request.write_path, mode=mode)\n with temp_file:\n path = temp_file.name\n temp_file.seek(0, 2)\n if temp_file.tell() != request.offset:\n raise IOError(\"Chunks out of order Error.\")\n\n # Write the new chunk.\n temp_file.write(request.executable.data)\n\n return path", "def split_data(path_to_data, path_to_save_train,\n path_to_save_val, split_size = 0.1):\n \n folders = os.listdir(path_to_data)\n\n # get the data and split it\n for folder in folders:\n full_path = os.path.join(path_to_data, folder)\n images_paths = glob.glob(os.path.join(full_path, '*.png')) # image paths\n\n # split the data\n x_train, x_val = train_test_split(images_paths, test_size = split_size)\n\n for x in x_train:\n path_to_folder = os.path.join(path_to_save_train, folder)\n\n if not os.path.isdir(path_to_folder): # if the dir. not exist\n os.makedirs(path_to_folder) # create the directory\n \n print(\"Copying \", x, \" to \", path_to_folder)\n shutil.copy(x, path_to_folder)\n\n for x in x_val:\n path_to_folder = os.path.join(path_to_save_val, folder)\n\n if not os.path.isdir(path_to_folder): # if the dir. not exist\n os.makedirs(path_to_folder) # create the directory\n \n print(\"Copying \", x, \" to \", path_to_folder)\n shutil.copy(x, path_to_folder)", "def write_to_master_file(\n self, all_files=[], filename=sys.argv[2], separator=sys.argv[3]\n ) -> None:\n if filename == \"\":\n raise EnvironmentError(\"No filename provided!\")\n\n first_file = all_files[0]\n\n with open(filename, \"w+\") as master:\n with open(first_file, \"r+\") as initial_write:\n for line in initial_write:\n master.write(line)\n\n if len(all_files) > 1:\n for i in range(1, len(all_files)):\n master.write(separator)\n with open(all_files[i], \"r+\") as file_to_append:\n for line in file_to_append:\n master.write(line)", "def data_split(path):\n path = Path(path)\n with open(path / 'metadata.csv', encoding='utf-8') as f:\n lines = [line.strip() for line in f]\n n = len(lines)\n index = np.random.RandomState(313).permutation(n)\n split = dict(train=[lines[i] for i in index[:-int(0.1 * n)]],\n valid=[lines[i] for i in index[-int(0.1 * n):]])\n for key in ['train', 'valid']:\n with open(path / f'metadata_{key}.csv', 'w') as writer:\n writer.write('\\n'.join(split[key]))", "def temp_split(filename):\n filename, ext = filename.rsplit('.')\n data = np.load(filename + \".\" + ext)\n # define basic constants from parent\n A = data['a']\n A_SIZE = A.shape[0]\n A_SHAPE = A.shape\n ORIGINAL_SIZE = data['original_size']\n B = data['b']\n # basics\n ki, kj, m = np.sum(A, 1), np.sum(A, 0), np.sum(np.sum(A, 1))\n # eval & evec\n eval, evec = linalg.eigh(B)\n # split\n g1_order, g1_arrays, g2_order, g2_arrays = create_g(A, evec)\n g1, g2 = create_g_matrix(g1_order, g1_arrays), create_g_matrix(g2_order, g2_arrays)\n # threshold (q)\n q1 = create_q(A_SIZE, B, g1_order, m)\n q2 = create_q(A_SIZE, B, g2_order, m)\n # B of G\n b1 = create_b_of_g(B, g1_order)\n b2 = create_b_of_g(B, g2_order)\n # a_elems\n a1_elems = []\n a2_elems = []\n original_elems = data['a_elems']\n for i in g1_order:\n a1_elems.append(original_elems[i])\n for i in g2_order:\n a2_elems.append(original_elems[i])\n return Part(filename + ',1', ext, q1, g1.shape[0], ','.join([str(x) for x in a1_elems])), \\\n Part(filename + ',2', ext, q2, g2.shape[0], ','.join([str(x) for x in a2_elems]))", "def split(filepath, nsamples):\n start = np.cumsum([0] + list(nsamples[:-1]))\n if filepath[-10:] == 'analog.brw':\n filename = filepath[:-10]\n analog = read_3brain_analog(filepath)\n for i, (s,n) in enumerate(zip(start, nsamples)):\n name = f\"{filename}_part_{i}_analog.npz\"\n print(f\"Saving {name}\")\n sampling_rate = glia.sampling_rate(filepath)\n np.savez(name, analog=analog[s:s+n],\n sampling_rate=sampling_rate)\n elif filepath[-4:] == \".bxr\":\n filename = filepath[:-4]\n # split spike-sorted data\n with h5py.File(filepath, 'r') as h5:\n # shared setup for the concatenated arrays\n sampling_rate = float(h5[\"3BRecInfo\"][\"3BRecVars\"][\"SamplingRate\"][0])\n channel_map = h5[\"3BRecInfo\"][\"3BMeaStreams\"][\"Raw\"][\"Chs\"][()]\n \n # map 3brain unit num\n # numbers typically from -4 to 9000\n # where negative numbers appear across multiple channels\n # and thus are presumably bad units...?\n # positive-numbered units appear on one channel\n unit_id_2_num = {}\n\n n_unit_nums = 0\n if \"SpikeUnits\" in h5[\"3BResults\"][\"3BChEvents\"]:\n for chunk in iter_chunks(h5['3BResults/3BChEvents/SpikeUnits'], 10000):\n n_unit_nums = max(n_unit_nums, chunk.max())\n \n unit_map = {}\n channel_unit_count = {}\n\n\n # operate on each of the concatenated arrays, one at a time\n for i, (s,n) in enumerate(zip(start, nsamples)):\n startTime = s / sampling_rate\n first_idx = None\n for chunk in iter_chunks(h5['3BResults/3BChEvents/SpikeTimes'], 10000):\n valid_idxs = np.argwhere(h5[\"3BResults/3BChEvents/SpikeTimes\"] > s)\n if len(valid_idxs) > 0:\n first_idx = valid_idxs[0][0]\n break\n assert not first_idx is None\n print(f\"identified start idx of {first_idx}.\")\n\n # for simplicity, we just iterate again, could have faster implementation\n last_idx = len(h5['3BResults/3BChEvents/SpikeTimes'])\n chunk_size = 10000\n for j, chunk in enumerate(iter_chunks(h5['3BResults/3BChEvents/SpikeTimes'], chunk_size)):\n invalid_idxs = np.argwhere(chunk > s + n)\n if len(invalid_idxs) > 0:\n last_idx = invalid_idxs[0][0] + j*chunk_size\n break\n print(f\"identified stop idx of {last_idx}.\")\n \n spike_channel_ids = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeChIDs\"][first_idx:last_idx]\n spike_unit_ids = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeUnits\"][first_idx:last_idx]\n # poorly named; time is in units of 1/sampling_rate\n # aka sample number\n # subtract to adjust start time\n spike_times = h5[\"3BResults\"][\"3BChEvents\"][\"SpikeTimes\"][first_idx:last_idx] - s\n \n\n \n csv_name = f'{filename}_part_{i}_spikes.csv'\n spikes = zip(spike_channel_ids, spike_unit_ids, spike_times)\n tot_spikes = spike_times.shape[0]\n print(f\"creating {csv_name} ...\")\n with open(csv_name, 'w', newline='') as csvfile:\n fieldnames = ['channel_i', 'channel_j', 'unit', \"spike_time\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for channel, unit_id, spike_time in tqdm(spikes,\n total=tot_spikes):\n c = channel_map[channel]\n # convert to tuple\n # account for 1-indexing\n c = (c[0]-1,c[1]-1)\n \n # count num units on channel\n # first check if we've seen this channel before\n if not c in channel_unit_count:\n # if not, initialize channel_unit_count for the channel\n channel_unit_count[c] = 1\n unit_num = 0\n # add unit\n unit_id_2_num[unit_id] = unit_num\n else:\n \n # then check if we've seen this unit before\n if not unit_id in unit_id_2_num:\n # if not, assign unit_num for this new unit\n unit_num = channel_unit_count[c]\n unit_id_2_num[unit_id] = unit_num\n channel_unit_count[c] += 1\n else:\n # otherwise, look it up\n unit_num = unit_id_2_num[unit_id]\n \n \n t = spike_time / sampling_rate\n writer.writerow({\"channel_i\": c[0],\n \"channel_j\": c[1],\n \"unit\": unit_num,\n \"spike_time\": t})\n \n np.save(f\"{filename}_channel_map.npy\", channel_map)", "def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r" ]
[ "0.73668265", "0.70914865", "0.7090537", "0.7067825", "0.7001469", "0.68641955", "0.67107993", "0.65979683", "0.65870893", "0.6580552", "0.6572284", "0.65702844", "0.6551708", "0.64280057", "0.6383532", "0.6352524", "0.63487375", "0.629163", "0.62073165", "0.6201502", "0.61509836", "0.6134791", "0.61319137", "0.612877", "0.6123829", "0.6093117", "0.6089306", "0.6063661", "0.60080594", "0.60020274", "0.599134", "0.59675574", "0.5965684", "0.59588504", "0.59507984", "0.5946695", "0.59277713", "0.59020627", "0.5897366", "0.5881426", "0.5872857", "0.58690405", "0.58437407", "0.5840009", "0.58389646", "0.5829432", "0.5788848", "0.57760197", "0.5730165", "0.5729749", "0.57204604", "0.56978476", "0.56950253", "0.5683508", "0.5679406", "0.5664924", "0.56531113", "0.56367964", "0.56304955", "0.5608512", "0.5601443", "0.5576275", "0.5570728", "0.55607986", "0.55410457", "0.5537618", "0.55106014", "0.55063707", "0.5500217", "0.54955727", "0.54872805", "0.5486411", "0.54803133", "0.5471687", "0.54598975", "0.5456179", "0.54366285", "0.5434683", "0.54345804", "0.54343444", "0.5431285", "0.5419533", "0.5415466", "0.54017574", "0.53996336", "0.53981113", "0.5392366", "0.53880495", "0.5387268", "0.53780377", "0.5370984", "0.535077", "0.5331447", "0.532952", "0.53209215", "0.5312861", "0.53120404", "0.5308736", "0.5291105", "0.52866036" ]
0.745905
0
Combine existing chunks to recreate the file. The chunks must be present in the cwd. The new file will be written to cwd.
def combine(self): import re print 'Creating file', self.__filename bname = (os.path.split(self.__filename))[1] bname2 = bname # bugfix: if file contains characters like +,.,[] # properly escape them, otherwise re will fail to match. for a, b in zip(['+', '.', '[', ']','$', '(', ')'], ['\+','\.','\[','\]','\$', '\(', '\)']): bname2 = bname2.replace(a, b) chunkre = re.compile(bname2 + '-' + '[0-9]+') chunkfiles = [] for f in os.listdir("."): print f if chunkre.match(f): chunkfiles.append(f) print 'Number of chunks', len(chunkfiles), '\n' chunkfiles.sort(self.sort_index) data='' for f in chunkfiles: try: print 'Appending chunk', os.path.join(".", f) data += open(f, 'rb').read() except (OSError, IOError, EOFError), e: print e continue try: f = open(bname, 'wb') f.write(data) f.close() except (OSError, IOError, EOFError), e: raise FileSplitterException, str(e) print 'Wrote file', bname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_vcf_chunks(out_dir, path_name, path_size, chunks, overwrite):\n vcf_path = os.path.join(out_dir, path_name + \".vcf\")\n if overwrite or not os.path.isfile(vcf_path):\n first = True\n for chunk_i, chunk in enumerate(chunks):\n clip_path = chunk_base_name(path_name, out_dir, chunk_i, \"_clip.vcf\")\n if os.path.isfile(clip_path):\n if first is True:\n # copy everything including the header\n run(\"cat {} > {}\".format(clip_path, vcf_path))\n first = False\n else:\n # add on everythin but header\n run(\"grep -v \\\"^#\\\" {} >> {}\".format(clip_path, vcf_path), check=False)\n \n # add a compressed indexed version\n if overwrite or not os.path.isfile(vcf_path + \".gz\"):\n run(\"bgzip -c {} > {}\".format(vcf_path, vcf_path + \".gz\"))\n run(\"tabix -f -p vcf {}\".format(vcf_path + \".gz\"))", "def join_chunks(self):\n if self.state == self.STATE_UPLOADING and self.total_chunks_uploaded == self.total_chunks:\n\n # create file and write chunks in the right order\n temp_file = open(self.full_path, \"wb\")\n for chunk in self.chunks.all():\n chunk_bytes = chunk.file.read()\n temp_file.write(chunk_bytes)\n temp_file.close()\n\n # set state as completed\n self.state = self.STATE_COMPLETED\n super(FlowFile, self).save()\n\n # delete chunks automatically if is activated in settings\n if FLOWJS_AUTO_DELETE_CHUNKS:\n self.chunks.all().delete()", "def write_chunks(file, chunks):\n\n\tfor c in chunks:\n\n\t\tchunk(file, c[0], c[1])", "def _concatenate_inner(self, chunks, direction):\n tmp_bucket = []\n source_chunks = chunks if direction else chunks[::-1]\n target_chunks = ChunkList()\n for chunk in source_chunks:\n if (\n # if the chunk has matched dependency, do concatenation.\n chunk.dependency == direction or\n # if the chunk is SPACE, concatenate to the previous chunk.\n (direction == False and chunk.is_space())\n ):\n tmp_bucket.append(chunk)\n continue\n tmp_bucket.append(chunk)\n if not direction: tmp_bucket = tmp_bucket[::-1]\n new_word = ''.join([tmp_chunk.word for tmp_chunk in tmp_bucket])\n chunk.update_word(new_word)\n target_chunks.append(chunk)\n tmp_bucket = []\n if tmp_bucket: target_chunks += tmp_bucket\n return target_chunks if direction else target_chunks[::-1]", "def _concatenate_parts_to_file_for_pipe(self,\n outfile,\n image_parts,\n source_dir,\n debug=False):\n close_all_fds([outfile])\n part_count = len(image_parts)\n part_file = None\n try:\n for part in image_parts:\n self.log.debug(\"Concatenating Part:\" + str(part.filename))\n sha1sum = hashlib.sha1()\n part_file_path = source_dir + \"/\" + part.filename\n with open(part_file_path) as part_file:\n data = part_file.read(euca2ools.bundle.pipes._BUFSIZE)\n while data:\n sha1sum.update(data)\n outfile.write(data)\n outfile.flush()\n data = part_file.read(euca2ools.bundle.pipes._BUFSIZE)\n part_digest = sha1sum.hexdigest()\n self.log.debug(\n \"PART NUMBER:\" + str(image_parts.index(part) + 1) +\n \"/\" + str(part_count))\n self.log.debug('Part sha1sum:' + str(part_digest))\n self.log.debug('Expected sum:' + str(part.hexdigest))\n if part_digest != part.hexdigest:\n raise ValueError('Input part file may be corrupt:{0} '\n .format(part.filename),\n '(expected digest: {0}, actual: {1})'\n .format(part.hexdigest, part_digest))\n except IOError as ioe:\n # HACK\n self.log.debug('Error in _concatenate_parts_to_file_for_pipe.' +\n str(ioe))\n if not debug:\n return\n raise ioe\n finally:\n if part_file:\n part_file.close()\n self.log.debug('Concatentate done')\n self.log.debug('Closing write end of pipe after writing')\n outfile.close()", "def _copy(self):\n for d in self._current_chunk:\n self.out.write(d)", "def merge_chunks(self, data):\r\n fn = \"speech_%s_%s.mp3\" % (\r\n data[\"lang\"], data[\"datetime\"].strftime(\"%Y%m%d-%H%M%S\"))\r\n filename_main = unique_path(fn)\r\n with open(filename_main, \"wb\") as f:\r\n # MP3s can be simply concatenated together, result is legible.\r\n for i, filename in enumerate(data[\"filenames\"]):\r\n f.write(open(filename, \"rb\").read())\r\n # Add more silence for separators like commas and periods.\r\n silence_count = 0\r\n if data[\"chunks\"][i][-1] in [\".\",\"?\",\"!\"]:\r\n silence_count = conf.SilenceCountLong\r\n elif data[\"chunks\"][i][-1] in [\",\",\":\",\";\",\"(\",\")\"]:\r\n silence_count = conf.SilenceCountShort\r\n f.write(base64.decodestring(conf.Silence) * silence_count)\r\n for filename in data[\"filenames\"]:\r\n try:\r\n os.unlink(filename)\r\n except Exception: pass\r\n data.update(filenames=[filename_main], current=filename_main, count=1)", "def packFiles(source, filesPerBlock, dest):\n\tfileCount = 1\n\t\n\ttmpFileName = \"tmp.h5\"\t\n\n\n\toutFile = createBlockFile(tmpFileName)\t\n\tfor dirname, subdirs, files in os.walk(source):\t\n\t print 'Scanning ' + dirname + '...'\t\n\t for f in files:\t\n\t if f.endswith('.h5'):\t\n\t inFile = h5py.File(os.path.join(dirname, f), 'r')\t\n\t outFile.copy(inFile, outFile['songs'], f)\t\n\t inFile.close()\t\n\t fileCount = fileCount + 1\t\n\t if(fileCount > filesPerBlock):\t\n\t outFile.close()\t\n\t upload(tmpFileName, bucket)\t\n\t fileCount = 1\t\n\t outFile = createBlockFile(tmpFileName)\t\n\n \toutFile.close()\n \tif fileCount > 1:\n\t \tupload(tmpFileName, bucket)\n\n\tos.remove(tmpFileName)", "def hadd(new_name, files, chunk_size=900):\n \n if len(files) <= chunk_size:\n return hadd_ex(new_name, files)\n \n files = files[:]\n new_files = []\n while files:\n these = files[:chunk_size]\n files = files[chunk_size:]\n\n this_fn = new_name + '_%i' % len(new_files)\n new_files.append(this_fn)\n\n if not hadd_ex(this_fn, these):\n print '\\033[36;7m PROBLEM hadding \\033[m', new_name, 'in chunks of', chunk_size, 'on', this_fn\n return False\n\n assert len(new_files) < chunk_size\n\n ok = hadd_ex(new_name, new_files)\n if not ok:\n print '\\033[36;7m PROBLEM hadding', new_name, 'in chunks of', chunk_size, 'assembling final file'\n return False\n\n for fn in new_files:\n os.remove(fn)\n\n return True", "def combine_chunks(chunks, output_path, decompress=False, encrypt_key=None):\n\n msg = 'combining %s chunks' % len(chunks)\n logger.info(msg)\n\n salt_length = 0\n key_length = 0\n if encrypt_key:\n if encrypt_key.binary_salt:\n salt_length = len(encrypt_key.binary_salt)\n assert salt_length == 16, salt_length\n key_length = len(encrypt_key.binary_key)\n assert key_length == 16, key_length\n f = open(output_path, 'wb')\n if decompress:\n decompressor = zlib.decompressobj()\n if encrypt_key:\n # salt, then iv\n iv = chunks[0].read(byte_range=[salt_length, salt_length + key_length])\n decryptor = Cipher(\n CIPHER_MODE, encrypt_key.binary_key, iv, CIPHER_DECODE\n )\n # sort out any parity chunks\n parity_chunks = []\n while chunks[-1].parity:\n parity_chunks.insert(0, chunks.pop())\n if parity_chunks and has_par2():\n cwd1 = os.getcwd()\n os.chdir(os.path.dirname(parity_chunks[0].file_path))\n # The files have to end in .par2 to work.\n for parity_chunk in parity_chunks:\n new_name = '%s.%s' % (parity_chunk.filename, 'par2')\n os.rename(parity_chunk.filename, new_name)\n parity_chunk.file_path = os.path.join(\n os.path.dirname(parity_chunk.file_path), new_name)\n # It won't recognize them by name, so put them on the command line.\n par2_repair([p.filename for p in parity_chunks])\n os.chdir(cwd1)\n for (i, chunk) in enumerate(chunks):\n chunk.verify_checksum()\n if i == 0 and encrypt_key:\n chunk_size = chunk.size\n if encrypt_key.binary_salt:\n # strip salt and IV\n data = chunk.read(\n byte_range=[len(encrypt_key.binary_key) + \\\n len(encrypt_key.binary_salt),\n chunk.size-4]\n )\n else:\n # skip the IV\n data = chunk.read(\n byte_range=[len(encrypt_key.binary_key), chunk.size-4]\n )\n else:\n data = chunk.read()\n if encrypt_key:\n data = decryptor.update(data)\n assert not decryptor.final()\n if decompress:\n data = decompressor.decompress(data)\n assert not decompressor.unused_data\n f.write(data)\n f.close()\n logger.debug('file size is %s' % os.stat(output_path).st_size)", "def merge_files():\n # abs path of data folder\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\\\\KORD\")\n file_list = os.listdir(work_folder)\n with open(os.path.join(work_folder, \"..\\\\merged_history_KORD.csv\"), \"w\") as outfile:\n for line in open(os.path.join(work_folder, file_list[0])):\n outfile.write(line)\n print \"write the first line\"\n for i in range(1, len(file_list)):\n with open(os.path.join(work_folder, file_list[i])) as infile:\n infile.next()\n for line in infile:\n outfile.write(line)", "def mergeAndSaveFile(dumpMetaFile, chunkSizeFile, outFile):\n dump = open (dumpMetaFile, \"r\")\n chunk = open (chunkSizeFile, \"r\")\n out = open (outFile, \"w\")\n \n cline = \"\"\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n\n while dump:\n dline = dump.readline()\n if not dline:\n break\n dline = dline.rstrip(\"\\n\")\n \n # Split line parts \n dlineParts = dline.split(' ')\n \n # Read lines from chunkSize\n numEntries = int(dlineParts[2])\n \n entries = []\n for i in range(numEntries):\n entries.append([dlineParts[i*3 + 3], dlineParts[i*3 + 4], dlineParts[i*3 + 5], 0])\n #entries[i][0] = dlineParts[i*3 + 3]\n #entries[i][1] = dlineParts[i*3 + 4]\n #entries[i][2] = dlineParts[i*3 + 5]\n #entries[i][3] = 0\n\n while True:\n clineParts = cline.split(' ')\n if ((dlineParts[0] == clineParts[0]) and (dlineParts[1] == clineParts[1])):\n for i in range(numEntries):\n if ((entries[i][0] == clineParts[3]) and (entries[i][1] == clineParts[4])):\n entries[i][3] = clineParts[2]\n else:\n break\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n if not cline:\n break\n\n # Print output\n out.write(dlineParts[0]+\" \"+dlineParts[1]+\" \"+dlineParts[2]+\" \")\n for i in range(numEntries):\n out.write(str(entries[i][3])+\" \"+entries[i][0]+\" \"+entries[i][1]+\" \"+entries[i][2]+\" \")\n out.write(\"\\n\")\n out.close()", "def _chunks_merge(chunks):\n chunks_ = []\n while chunks:\n chunk, chunks = chunks\n chunks_.append(chunk)\n return chunks_[0][:0].join(reversed(chunks_)) if chunks_ else b\"\"", "def merge_root_files(self, force=False):\n self.OutFilePath.parent.mkdir(exist_ok=True)\n cmd = f'hadd{\" -f\" if force else \"\"} {self.proteus_raw_file_path()} {self.Raw.OutFilePath} {self.Ref.OutFilePath} {self.Adc2Vcal.OutFilePath}'\n pinfo(cmd)\n check_call(cmd, shell=True)", "def __concatonate_files(self, new_file_name, parent_folder):\n\n # make the output directory\n output_file = self.save_directory + \"/\" + new_file_name\n\n # check if save_directory exists\n if not os.path.exists(self.save_directory):\n try:\n # make the directory\n os.makedirs(self.save_directory)\n except PermissionError:\n # if the user is unable to write to this directory, we should not continue\n print(\"You do not have the correct permissions for creating a directory here. Please try again.\")\n exit(-1)\n\n barcode_files = []\n for root, directory, files in os.walk(parent_folder):\n # we need to know where each file is in the barcode folder so we can read data from it\n for name in files:\n barcode_files.append( os.path.join(root, name) )\n\n with open(output_file, 'w') as writer:\n for name in barcode_files:\n with open(name, 'r') as reader:\n for line in reader:\n writer.write(line)", "def file_sync_write_chunks(radosobject, chunksize, offset, chunks, size=None):\n padding = 0\n cursize = chunksize * offset\n radosobject.seek(cursize)\n for chunk in chunks:\n if padding:\n radosobject.sync_write(buffer(zeros(chunksize), 0, padding))\n if size is not None and cursize + chunksize >= size:\n chunk = chunk[:chunksize - (cursize - size)]\n radosobject.sync_write(chunk)\n cursize += len(chunk)\n break\n radosobject.sync_write(chunk)\n padding = chunksize - len(chunk)\n\n padding = size - cursize if size is not None else 0\n if padding <= 0:\n return\n\n q, r = divmod(padding, chunksize)\n for x in xrange(q):\n radosobject.sunc_write(zeros(chunksize))\n radosobject.sync_write(buffer(zeros(chunksize), 0, r))", "def append(self, file, idx):\n\n # print \"append %s %d\" % (file, idx)\n src = \"%s/%s\" % (self._dir, file)\n dst = \"%s/.%d.new\" % (self._tempdir, idx)\n copyfile(src, dst)\n result = self._run(\"%s --%d --block-size %d --bits %d --quiet --threads %d %s --mode %s --rehash %s %s\" %\n (self._ishakesumd, self._mode, self._block_size, self._output_bits, self._threads,\n self._profile, self._alg, self._hash, self._tempdir))\n os.remove(dst)\n return result", "def create_initial_file():\n\n merge_file = tempfile.NamedTemporaryFile()\n\n # spin the sources for the base file\n for source in sort_sources(\n recursive_glob(settings[\"datapath\"], settings[\"hostfilename\"])\n ):\n\n start = \"# Start {}\\n\\n\".format(os.path.basename(os.path.dirname(source)))\n end = \"\\n# End {}\\n\\n\".format(os.path.basename(os.path.dirname(source)))\n\n with open(source, \"r\", encoding=\"UTF-8\") as curFile:\n write_data(merge_file, start + curFile.read() + end)\n\n # spin the sources for extensions to the base file\n for source in settings[\"extensions\"]:\n for filename in sort_sources(\n recursive_glob(\n path_join_robust(settings[\"extensionspath\"], source),\n settings[\"hostfilename\"],\n )\n ):\n with open(filename, \"r\") as curFile:\n write_data(merge_file, curFile.read())\n\n maybe_copy_example_file(settings[\"blacklistfile\"])\n\n if os.path.isfile(settings[\"blacklistfile\"]):\n with open(settings[\"blacklistfile\"], \"r\") as curFile:\n write_data(merge_file, curFile.read())\n\n return merge_file", "def concat_chunks(file_list: list, output_path: str, verbose_level=0) -> str:\n temp_file_name = 'temp_' + str(len(file_list)) + \\\n str(int(round(time.time() * 1000))) + '.wav'\n files_str = ' '.join(file_list)\n if str(verbose_level) == '2':\n print('sox -V%s %s %s' % (verbose_level, files_str, output_path +\n os.sep + temp_file_name))\n os.system('sox -V%s %s %s' % (verbose_level, files_str, output_path +\n os.sep + temp_file_name))\n return temp_file_name", "def _rechunking(self, compressor, parallel=False, replace=False):\n target_path = tempfile.TemporaryDirectory()\n source_sf = strax.DataDirectory(self.path)\n st= self.st\n st.set_context_config(dict(allow_rechunk=False,\n n_chunks=10))\n st.storage = [source_sf]\n run_id = '0'\n st.make(run_id, self.target)\n assert st.is_stored(run_id, self.target)\n assert strax.utils.dir_size_mb(self.path) > 0\n original_n_files = len(glob.glob(os.path.join(self.path, '*', '*')))\n assert original_n_files > 3 # At least two files + metadata\n _, backend_key = source_sf.find(st.key_for(run_id, self.target))\n strax.rechunker(source_directory=backend_key,\n dest_directory=target_path.name if not replace else None,\n replace=True,\n compressor=compressor,\n target_size_mb=strax.default_chunk_size_mb * 2,\n parallel=parallel,\n max_workers=4,\n _timeout=5,\n )\n assert st.is_stored(run_id, self.target)\n # Should be empty, we just replaced the source\n assert strax.utils.dir_size_mb(target_path.name) == 0\n new_n_files = len(glob.glob(os.path.join(self.path, '*', '*',)))\n assert original_n_files > new_n_files\n st.set_context_config(dict(forbid_creation_of='*'))\n st.get_array(run_id, self.target)\n target_path.cleanup()", "def write_chunk(chunk, token):\n dest = rem_dir('grab')\n # input(dest)\n file_name = '{}_{}'.format('cpf_temp', token)\n dest_file_name = os.path.join(os.path.abspath(dest), file_name)\n # input(dest_file_name)\n WRITE_STREAM = open(dest_file_name, 'wb')\n WRITE_STREAM.write(chunk)\n WRITE_STREAM.close()\n\n return True", "def _concatenate_group(group, first_elem):\n target_file_name = re.sub(pattern_lane_num, r\"\\1\", os.path.basename(first_elem))\n target_path = os.path.join(tempfile.gettempdir(), target_file_name)\n\n # Overwriting all files by default\n with open(target_path, \"wb\") as outf:\n for fname in group:\n with open(fname, \"rb\") as inf:\n # TODO: check for newline at the end of file first?\n shutil.copyfileobj(inf, outf)\n return target_path", "def write_to_master_file(\n self, all_files=[], filename=sys.argv[2], separator=sys.argv[3]\n ) -> None:\n if filename == \"\":\n raise EnvironmentError(\"No filename provided!\")\n\n first_file = all_files[0]\n\n with open(filename, \"w+\") as master:\n with open(first_file, \"r+\") as initial_write:\n for line in initial_write:\n master.write(line)\n\n if len(all_files) > 1:\n for i in range(1, len(all_files)):\n master.write(separator)\n with open(all_files[i], \"r+\") as file_to_append:\n for line in file_to_append:\n master.write(line)", "def add_merge_job(dax, final_name, chunk, level, job_number, final):\n j = Job(name=\"merge.sh\")\n out_file_name = final_name + \"-%d-%d.tar.gz\" %(level, job_number)\n out_file = File(out_file_name)\n if final:\n out_file_name = final_name\n out_file = File(final_name)\n j.uses(out_file, link=Link.OUTPUT, transfer=final)\n j.addArguments(out_file)\n for f in chunk:\n flfn = File(f)\n j.uses(flfn, link=Link.INPUT)\n j.addArguments(flfn)\n j.addProfile(Profile(Namespace.CONDOR, 'request_disk', '100 GB'))\n dax.addJob(j)\n return out_file_name", "def writeto(sourcePaths, targetPath, eosDownload=False):\n\n LOG.info('merge %s -> %s', sourcePaths, targetPath)\n\n target = ROOT.TFile.Open(targetPath, 'recreate')\n # This is critical (and safe) - see https://root-forum.cern.ch/t/tfile-close-slow/24179\n ROOT.gROOT.GetListOfFiles().Remove(target)\n\n _nadd = 0\n\n for path in sourcePaths:\n pathOrig = path\n pathReal = os.path.realpath(pathOrig)\n if eosDownload and pathReal.startswith('/eos'):\n for _ in range(5):\n with tempfile.NamedTemporaryFile(suffix='.root', delete=False) as tmp:\n pass\n proc = subprocess.Popen(['xrdcp', '-f', 'root://eoscms.cern.ch/' + pathReal, tmp.name])\n proc.communicate()\n if proc.returncode == 0:\n path = tmp.name\n break\n else:\n try:\n os.unlink(tmp.name)\n except:\n pass\n time.sleep(5)\n else:\n raise RuntimeError('Failed to download ' + pathOrig)\n\n start = time.time()\n source = ROOT.TFile.Open(path)\n ROOT.gROOT.GetListOfFiles().Remove(source)\n\n nnew, nadd = merge(source, target)\n\n source.Close()\n target.Close() # closing target at each iteration to flush out in-memory objects\n\n LOG.info('%s -> %s: %d new, %d merged (%.1f s)', pathOrig, targetPath, nnew, nadd, time.time() - start)\n\n _nadd += nadd\n if pathOrig != sourcePaths[-1]:\n if _nadd > 1000000:\n # purge duplicate keys by compressing\n os.rename(targetPath, targetPath + '.tmp')\n writeto([targetPath + '.tmp'], targetPath)\n os.unlink(targetPath + '.tmp')\n _nadd = 0\n \n target = ROOT.TFile.Open(targetPath, 'update')\n ROOT.gROOT.GetListOfFiles().Remove(target)\n\n if eosDownload and pathReal.startswith('/eos'):\n try:\n os.unlink(path)\n except:\n pass", "def assemble_file(names):\n md5 = hashlib.md5()\n filename = ''.join([name.split('-')[-1] for name in names])\n fpath = os.path.join(FILES_DIR, filename)\n with open(fpath, \"wb\") as dst:\n for name in names:\n for chunk in chunked_reader(os.path.join(DATA_DIR, name)):\n md5.update(chunk)\n dst.write(chunk)\n\n return fpath, md5.digest().hex()", "def combine_tokens(tokens):\n partial_ipcdumps = []\n for start_index in range(0, len(tokens), TOKENS_PER_IPCDUMP):\n end_index = min(start_index + TOKENS_PER_IPCDUMP, len(tokens))\n current_tokens = tokens[start_index:end_index]\n partial_ipcdumps.append(\n create_partial_ipc_dump(current_tokens, file_path))\n\n combined_file_path = None\n if len(partial_ipcdumps) > 1:\n combined_file_path = combine_ipc_dumps(partial_ipcdumps, file_path)\n elif len(partial_ipcdumps) == 1:\n combined_file_path = partial_ipcdumps[0]\n\n if not combined_file_path:\n # This can happen in the case of a timeout or other error. The actual\n # error should already be logged, so no need to do it again here.\n return b''\n\n # TODO(mbarbella): Allow token combining functions to write files directly.\n handle = open(combined_file_path, 'rb')\n result = handle.read()\n handle.close()\n\n shell.remove_file(combined_file_path)\n return result", "def run(self):\n first_index, last_index = \\\n self.get_initial_blocks()\n while last_index - first_index > self.block_size:\n first_index, last_index = \\\n self.join_blocks(first_index, last_index)\n self.merge_blocks(self.output_file_name, first_index, last_index)", "def mergeFile():\n with open(\"output.txt\",'w') as o:\n o.write(data1)\n o.write(data2)\n o.write(data3)", "def merge_songs(in_dir, output_path):\n\twith open(output_path, 'a') as out_file:\n\t\tfor file in os.listdir(in_dir):\n\t\t\twith open(in_dir + file, 'r') as lyrics_file:\n\t\t\t\tout_file.write(lyrics_file.read())", "def genRST(path):\n \n cplrRST = open(path, \"w\")\n for path in files:\n appendFile(path, cplrRST)\n cplrRST.close()", "def copy_chunks(chunk, token):\n # Open a thread to write this chunk\n thread = copy(chunk, token)\n thread.start()\n thread.join()", "def combineAllGraphFiles(chroms, final_out):\n outfile = open(final_out,'w');\n outfile.close();\n \n for chrom in chroms:\n graph_file = chrom + \".graph\";\n try:\n if os.system('%s %s >> %s' %\n (cat, graph_file, final_out)): raise\n except: sys.stderr.write(\"cat failed at %s\\n\" % chrom)", "def do_merge_all():\n for rawd, merged in TOMERGE:\n mylogger.info(\"cleaning \" + merged)\n ensure_dir(merged)\n cleandir(merged)\n mylogger.info(\"merging \" + rawd + \" to \" + merged)\n build_merged_dir(build_sensor_file_map(rawd), merged)\n\n # add timestamp file\n\tf = open(TIMESTAMP_FILE,\"w\")\n\tf.write(str(datetime.datetime.now()))\n\tf.close()", "def reMake(self, final_path, map_, chunk_path, delete_residuals=False):\n map_ = {int(k): v for k, v in map_.items()}\n print(\"[+] Remake started...\")\n try:\n file_content_string = \"\"\n for i in range(0, len(map_)):\n file_content_string += open(chunk_path + map_[i], \"r\").read()\n if delete_residuals == True: remove(chunk_path + map_[i])\n file_content = b64.b64decode(file_content_string)\n with open(final_path, \"wb\") as f:\n f.write(file_content)\n print(\"[+] Remake done.\")\n except Exception as e:\n print(e)\n print(\"[+] Remake went wrong.\")", "def generate_overlayfs_stacking(self, working_file_name):\n\n # Reopenthe working file\n working_file = open(working_file_name, \"a\")\n\n\n working_file.write(\"generate_overlayfs_stacking\\n\")\n\n # We are done here, now close the file\n working_file.close()", "def concat(file_list: list, output_dir: str, chunk_size: int=40,\n num_workers: int=None, name: str=None, rate=None,\n trim_silence_threshold: float=0, min_duration: int=None,\n exist_ok=False, verbose_level=0):\n # Todo: remove trim silence feature from this function to a new function\n if len(file_list) == 0:\n raise ValueError('Not possible to process an empty list of files.')\n\n os.makedirs(output_dir, exist_ok=True)\n file_list = update_list_fixing(file_list, target_rate=rate, channels=1,\n min_duration=min_duration,\n verbose_level=verbose_level,\n num_workers=num_workers)\n print('[INFO] creating data set [%s]' % output_dir)\n\n # Temp files (will be removed after concatenation process)\n temp_files = set()\n\n # Concat all files\n print('[INFO] concatenating chunks of size %d' % chunk_size)\n while len(file_list) > 1: # Will concat chunk of files until lasts one left\n print('[remaining files to process: %d]' % len(file_list))\n # Create chunks\n chunks = [file_list[i:i + chunk_size]\n for i in range(0, len(file_list), chunk_size)]\n # Reset file list\n file_list = []\n\n if str(verbose_level) == '2' and workers == 1:\n # This code is duplicated for debugging purposes\n for chunk in chunks:\n temp_file = concat_chunks(file_list=chunk,\n output_path=output_dir + os.sep,\n verbose_level=verbose_level)\n if os.path.isfile(output_dir + os.sep + temp_file):\n # Add file to temp_files:\n temp_files.add(output_dir + os.sep + temp_file)\n # Add file to file_list to process again\n file_list.append(output_dir + os.sep + temp_file)\n else:\n # Make parallel calls to concat chunks\n with concurrent.futures.ProcessPoolExecutor(num_workers) \\\n as executor:\n futures = [\n executor.submit(fn=concat_chunks,\n file_list=chunk,\n output_path=output_dir + os.sep,\n verbose_level=verbose_level)\n for chunk in chunks]\n\n kwargs = {\n 'total': len(futures),\n 'unit': 'chunks',\n 'unit_scale': True,\n 'leave': True\n }\n\n for f in tqdm(concurrent.futures.as_completed(futures),\n **kwargs):\n pass\n\n for f in futures:\n if os.path.isfile(output_dir + os.sep + f.result()):\n # Add file to temp_files:\n temp_files.add(output_dir + os.sep + f.result())\n # Add file to file_list to process again\n file_list.append(output_dir + os.sep + f.result())\n # Remove temporary files:\n if len(temp_files) == 0:\n print(\"[FATAL ERROR]: the concatenated file is missing. You might want \"\n \"to run again with the chunk_size=2, workers=1, and the \"\n \"verbosity_level=2 parameters for debugging purposes.\")\n exit(-1)\n final_file = file_list[0]\n temp_files.remove(final_file)\n for file in temp_files:\n try:\n os.remove(file)\n except FileNotFoundError:\n print('[WARN] File not found:', file)\n\n if trim_silence_threshold is not None and trim_silence_threshold > 0:\n temp_file = final_file + '_temp_trs.wav'\n cmd = 'sox -V' + str(verbose_level) + ' ' + final_file + ' ' + \\\n temp_file + ' silence 1 0.1 {}% -1 0.1 {}%'.\\\n format(trim_silence_threshold, trim_silence_threshold)\n os.system(cmd)\n os.remove(final_file)\n os.rename(temp_file, final_file)\n\n if name is not None:\n if os.path.isfile(output_dir + os.sep + name + '.wav'):\n if exist_ok:\n os.remove(output_dir + os.sep + name + '.wav')\n else:\n raise FileExistsError\n os.rename(file_list[0], output_dir + os.sep + name + '.wav')\n\n rmtree(temp_folder)", "def merge_test_files():\n for syscall_type in SYSCALLS:\n self_file = open(f\"{TEMP_DIR}/{syscall_type}-self-split.test\")\n nonself_file = open(f\"{TEMP_DIR}/{syscall_type}-nonself-split.test\")\n merged_file = open(f\"{TEMP_DIR}/{syscall_type}-merged-split.test\", \"w\")\n merged_lines = self_file.readlines()\n merged_lines.extend(nonself_file.readlines())\n merged_file.writelines(merged_lines)\n self_file.close()\n nonself_file.close()\n merged_file.close()", "def run_chunked(self):\n files, empty_dirs = self._list_files()\n dest = self.opts[\"dest\"]\n gzip = self.opts[\"gzip\"]\n tgt = self.opts[\"tgt\"]\n timeout = self.opts[\"timeout\"]\n selected_target_option = self.opts.get(\"selected_target_option\")\n\n dest_is_dir = (\n bool(empty_dirs) or len(files) > 1 or bool(re.search(r\"[\\\\/]$\", dest))\n )\n\n reader = (\n salt.utils.gzip_util.compress_file\n if gzip\n else salt.utils.itertools.read_file\n )\n\n _res = salt.utils.minions.CkMinions(self.opts).check_minions(\n tgt, tgt_type=selected_target_option or \"glob\"\n )\n minions = _res[\"minions\"]\n\n def _get_remote_path(fn_):\n if fn_ in self.opts[\"src\"]:\n # This was a filename explicitly passed on the CLI\n return (\n os.path.join(dest, os.path.basename(fn_)) if dest_is_dir else dest\n )\n else:\n for path in self.opts[\"src\"]:\n relpath = os.path.relpath(fn_, path + os.sep)\n if relpath.startswith(parent):\n # File is not within this dir\n continue\n return os.path.join(dest, os.path.basename(path), relpath)\n else: # pylint: disable=useless-else-on-loop\n # Should not happen\n log.error(\"Failed to find remote path for %s\", fn_)\n return None\n\n ret = {}\n parent = \"..\" + os.sep\n\n with salt.client.get_local_client(self.opts[\"conf_file\"]) as local:\n for fn_, mode in files.items():\n remote_path = _get_remote_path(fn_)\n\n index = 1\n failed = {}\n for chunk in reader(fn_, chunk_size=self.opts[\"salt_cp_chunk_size\"]):\n chunk = base64.b64encode(salt.utils.stringutils.to_bytes(chunk))\n append = index > 1\n log.debug(\n \"Copying %s to %starget '%s' as %s%s\",\n fn_,\n \"{} \".format(selected_target_option)\n if selected_target_option\n else \"\",\n tgt,\n remote_path,\n \" (chunk #{})\".format(index) if append else \"\",\n )\n args = [\n tgt,\n \"cp.recv_chunked\",\n [remote_path, chunk, append, gzip, mode],\n timeout,\n ]\n if selected_target_option is not None:\n args.append(selected_target_option)\n\n result = local.cmd(*args)\n\n if not result:\n # Publish failed\n msg = (\n \"Publish failed.{} It may be necessary to \"\n \"decrease salt_cp_chunk_size (current value: \"\n \"{})\".format(\n \" File partially transferred.\" if index > 1 else \"\",\n self.opts[\"salt_cp_chunk_size\"],\n )\n )\n for minion in minions:\n ret.setdefault(minion, {})[remote_path] = msg\n break\n\n for minion_id, minion_ret in result.items():\n ret.setdefault(minion_id, {})[remote_path] = minion_ret\n # Catch first error message for a given minion, we will\n # rewrite the results after we're done iterating through\n # the chunks.\n if minion_ret is not True and minion_id not in failed:\n failed[minion_id] = minion_ret\n\n index += 1\n\n for minion_id, msg in failed.items():\n ret[minion_id][remote_path] = msg\n\n for dirname in empty_dirs:\n remote_path = _get_remote_path(dirname)\n log.debug(\n \"Creating empty dir %s on %starget '%s'\",\n dirname,\n \"{} \".format(\n selected_target_option\n ) # pylint: disable=str-format-in-logging\n if selected_target_option\n else \"\",\n tgt,\n )\n args = [tgt, \"cp.recv_chunked\", [remote_path, None], timeout]\n if selected_target_option is not None:\n args.append(selected_target_option)\n\n for minion_id, minion_ret in local.cmd(*args).items():\n ret.setdefault(minion_id, {})[remote_path] = minion_ret\n\n return ret", "def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()", "def merge_blocks(self, fname, first_index, last_index):\n number_generators = [self.numbers_from_file(\n join(self.tempdir.name, BLOCK_FILE_NAME.format(index)))\n for index in range(first_index, last_index)]\n numbers = [next(number_generator)\n for number_generator in number_generators]\n writer = self.numbers_to_file(fname)\n next(writer)\n while True:\n minvalue, minindex = self.argmin(numbers)\n if minindex is None:\n break\n writer.send(minvalue)\n try:\n numbers[minindex] = next(number_generators[minindex])\n except StopIteration:\n numbers[minindex] = inf\n writer.close()", "def copy_file(input, output):\n for f in input:\n while True:\n chunk = f.read(1024)\n if not chunk:\n break\n output.write(chunk)\n output.flush()", "def chunk_input(self, input_files, chunksize):\n part_lists = [] # Lists of partial files\n known_nlines = None\n part_suffix = \"\"\n chunk_nlines = chunksize * 2\n\n for input_file in input_files:\n # Count number of lines in the file\n nlines = int(command.execute_with_output(\"wc -l %s\" % input_file)\n .strip().split()[0])\n # Number of lines should be the same in paired files\n if known_nlines is not None:\n msg = \"Mismatched line counts in supposedly paired files: {}\".format(\n input_files)\n assert nlines == known_nlines, msg\n known_nlines = nlines\n\n # Set number of pieces and names\n numparts = (nlines + chunk_nlines - 1) // chunk_nlines\n ndigits = len(str(numparts - 1))\n part_suffix = \"-chunksize-%d-numparts-%d-part-\" % (chunksize, numparts)\n out_prefix_base = os.path.basename(input_file) + part_suffix\n out_prefix = os.path.join(self.chunks_result_dir_local, out_prefix_base)\n\n # Split large file into smaller named pieces\n command.execute(\"split -a %d --numeric-suffixes -l %d %s %s\" %\n (ndigits, chunk_nlines, input_file, out_prefix))\n command.execute_with_retries(f\"aws s3 sync --only-show-errors {self.chunks_result_dir_local}/ {self.chunks_result_dir_s3}/ --exclude '*' --include '{out_prefix_base}*'\")\n\n # Get the partial file names\n partial_files = []\n paths = command.execute_with_output(\"ls %s*\" % out_prefix).rstrip().split(\"\\n\")\n for pf in paths:\n partial_files.append(os.path.basename(pf))\n\n # Check that the partial files match our expected chunking pattern\n pattern = \"{:0%dd}\" % ndigits\n expected_partial_files = [(out_prefix_base + pattern.format(i))\n for i in range(numparts)]\n msg = \"something went wrong with chunking: {} != {}\".format(\n partial_files, expected_partial_files)\n assert expected_partial_files == partial_files, msg\n part_lists.append(partial_files)\n\n # Ex: [[\"input_R1.fasta-part-1\", \"input_R2.fasta-part-1\"],\n # [\"input_R1.fasta-part-2\", \"input_R2.fasta-part-2\"],\n # [\"input_R1.fasta-part-3\", \"input_R2.fasta-part-3\"],...]\n input_chunks = [list(part) for part in zip(*part_lists)]\n return part_suffix, input_chunks", "def get(self):\n # calculate the total size\n file_size = self.num_chunk * self.chunk_size\n\n if self.verbose:\n print 'Estimated download size: %d' % file_size\n\n # iterate through entire calculated file size with the specified chunk\n # size, create new threads to process the download in parallel\n for location in range(0, file_size, self.chunk_size):\n count = len(self.threads)\n\n if self.verbose:\n print 'Creating download thread %d' % count\n\n # create thread arguments and new thread with function to target\n # for processing and being processing\n thread_args = (location, count)\n thread = threading.Thread(target=self._download, args=thread_args)\n self.threads.append(thread)\n thread.start()\n\n # wait until all active threads are complete\n while threading.active_count() > 1:\n time.sleep(0.1)\n\n # create final file path that all downloads will merge into\n filepath = os.path.join(os.getcwd(), self.filename)\n\n if self.verbose:\n print 'Downloads complete, file merging at %s' % filepath\n\n # iterate through all temp files and write to final file\n with open(filepath, 'wb') as open_file:\n for i in range(self.num_chunk):\n\n # recreate the temporary file path to get chunk from\n temp_name = self.filename + '_part_%d' % i\n temp_path = os.path.join(os.getcwd(), temp_name)\n\n # check if temp file exists before trying to write it\n if not os.path.isfile(temp_path):\n continue\n\n # copy the temporary file into the final files\n # delete the temporary file once completed\n shutil.copyfileobj(open(temp_path, 'rb'), open_file)\n os.remove(temp_path)\n\n # if no file was written then remove\n if os.path.getsize(filepath) < 1:\n os.remove(filepath)\n\n if self.verbose:\n print 'No data to write to file for %s' % self.filename\n\n if self.verbose:\n print 'Files merged and deleted. File saved at %s' % filepath", "def _merge_files(files: List[str], output: str, delete: bool = True) -> None:\r\n\r\n if not files:\r\n return\r\n\r\n first = True\r\n\r\n ## Open the single concatenated output file\r\n with open(output, 'w') as outfl:\r\n\r\n ## Loop through input files...\r\n for fpath in files:\r\n\r\n ## Read each input file and format line x line\r\n with open(fpath, 'r') as infl:\r\n\r\n if not first:\r\n ## Skip the header\r\n next(infl)\r\n else:\r\n first = False\r\n\r\n outfl.write(infl.read())\r\n\r\n ## Remove the file once we're done\r\n if delete:\r\n Path(fpath).unlink()", "def join_blocks(self, first_index, last_index):\n index = last_index\n for i in range(first_index, last_index, self.block_size):\n if i + self.block_size <= last_index:\n self.merge_blocks(\n join(self.tempdir.name, BLOCK_FILE_NAME.format(index)),\n i, i+self.block_size)\n else:\n self.merge_blocks(\n join(self.tempdir.name, BLOCK_FILE_NAME.format(index)),\n i, last_index)\n index += 1\n return last_index, index", "def combine_host():\n lines = []\n for path in [google_hosts, my_hosts]:\n with open(path, 'r') as f:\n lines += f.readlines()\n with open(output_hosts, 'w') as f:\n f.writelines(line for line in lines)", "def append(in_file, out_file):\n return subprocess.call(['ncks', '-h', '-A', in_file, out_file])", "def WriteBlobToFile(self, request):\n # First chunk truncates the file, later chunks append.\n if request.offset == 0:\n mode = \"w+b\"\n else:\n mode = \"r+b\"\n\n temp_file = tempfiles.CreateGRRTempFile(\n filename=request.write_path, mode=mode)\n with temp_file:\n path = temp_file.name\n temp_file.seek(0, 2)\n if temp_file.tell() != request.offset:\n raise IOError(\"Chunks out of order Error.\")\n\n # Write the new chunk.\n temp_file.write(request.executable.data)\n\n return path", "def write_zip_vanilla(zip, to_leave_vanilla):\n for from_file in to_leave_vanilla:\n with open(from_file) as file:\n contents = file.read()\n zip.writestr(os.path.join('data/minecraft/', from_file), contents)", "def merge(parent_folder):\n parent_folder = Path(parent_folder)\n\n address_csv_files = sorted(parent_folder.glob('*_step_*.csv'))\n\n frames = []\n\n #: read all csv's delimiter='|', quoting=csv.QUOTE_MINIMAL\n for address_csv_file in address_csv_files:\n temp = pd.read_csv(\n address_csv_file, sep='|', encoding='utf-8', names=['type', 'id', 'county', 'senate', 'house', 'census']\n )\n\n frames.append(temp)\n\n #: merge all csv's\n merged = pd.concat(frames)\n merged.to_csv(parent_folder / 'all.csv', sep='|', header=False, index=False, encoding='utf-8')", "def merge(self, output_file: Path = None, prepend_file_name: bool = False) -> None:\n if output_file is None:\n output_file = self.input_dir / \"merged.fasta\"\n else:\n output_file = Path(output_file)\n logger.info(\"Merging FASTA files in input directory\")\n cmd_str = f\"printf '%s\\\\0' * | xargs -0 cat > {output_file}\"\n if prepend_file_name:\n with tempfile.TemporaryDirectory() as tempdir:\n self.prepend_filename_to_record_names(output_dir=tempdir)\n utils.terminal_execute(cmd_str, work_dir=tempdir)\n else:\n utils.terminal_execute(cmd_str, work_dir=self.input_dir)\n logging.shutdown()", "def expand(self, sourcefile):\n with open(sourcefile, 'rb') as src_file: # Öffne die zu expandierende Datei\n if src_file.read(3) == b'rl3': # Wenn sie eine RL3 Datei ist\n extension_counter = src_file.read(1) # Lese die Anzahl der Bytes der Endung aus\n extension_orig = src_file.read(\n int.from_bytes(extension_counter, 'big')) # Lese die Endung auf Basis der Anzahl aus\n outputfile = os.path.splitext(sourcefile)[0] # Splitte den Dateinamen vom Pfad\n if os.path.isfile(\n outputfile + \".\" + extension_orig.decode(\"utf-8\")): # Überprüfe ob die Datei existiert\n number = 1 # Setz Dateinummer auf eins\n while os.path.isfile(outputfile + str(number) + \".\" + extension_orig.decode(\n \"utf-8\")): # Wiederhohle solange bis die Datei nicht existiert\n number += 1 # Erhöhe die Dateinummer\n outputfile += str(number) # Füge dem Dateiname die Nummer hinzu\n outputfile += \".\" + extension_orig.decode(\"utf-8\") # Füge dem Dateinamen die Endung hinzu\n with open(outputfile, 'wb') as dest_file: # Öffne die Zieldatei\n chunk = src_file.read(self.chunk_size) # Lese die Bytes aus\n counter = False # Aktuelles Byte ist keine Zähler\n value = False # Aktuelles Byte ist nicht der Wert\n count = 0 # Null Wiederhohlungen vom Wert\n while chunk: # Solange Bytes da sind\n for byte in chunk: # Gehe durch jedes Byte\n if byte == ord(\n self.MARKER) and not counter and not value: # Wenn das Byte ein Markierungszeichen ist und Zähler und Wert nicht aktiv sind\n counter = True # Aktiviere den Zähler\n elif counter: # Wenn der Zähler aktiv ist\n if byte == 0: # Wenn das aktuelle Byte null ist\n dest_file.write(ord(self.MARKER).to_bytes(1, 'big')) # Schreibe den Marker\n counter = False # Desktiviere den Zähler\n else: # Sonst\n count = byte # Setze die Anzahl auf den Wert des Bytes\n counter = False # Deaktiviere den Zähler\n value = True # Aktiviere den Wert\n elif value: # Wenn der Wert aktiv ist\n for i in range(count + (self.MAXBYTES - 255)): # Für die Aazahl im Zähler\n dest_file.write(byte.to_bytes(1, 'big')) # Schreibe die Bytes\n value = False # Deaktiviere den Wert\n else: # Sonst\n dest_file.write(byte.to_bytes(1, 'big')) # Schreibe das Byte\n chunk = src_file.read(self.chunk_size) # Lese neue Bytes ein\n if counter: # Wenn der Zähler aktiv ist\n dest_file.write(ord(self.MARKER).to_bytes(1, 'big')) # Schreibe den Marker\n else: # Sonst\n raise RLedError # Werfe den RLedError", "def save(self, file: Union[str, BinaryIO]=None) -> bytes:\n # Store all the chunks data as zlib compressed nbt data\n chunks_data = []\n for chunk in self.chunks:\n if chunk is None:\n chunks_data.append(None)\n continue\n chunk_data = BytesIO()\n if isinstance(chunk, Chunk):\n nbt_data = nbt.NBTFile()\n nbt_data.tags.append(nbt.TAG_Int(name='DataVersion', value=chunk.version))\n nbt_data.tags.append(chunk.data)\n else:\n nbt_data = chunk.save()\n nbt_data.write_file(buffer=chunk_data)\n chunk_data.seek(0)\n chunk_data = zlib.compress(chunk_data.read())\n chunks_data.append(chunk_data)\n\n # This is what is added after the location and timestamp header\n chunks_bytes = bytes()\n offsets = []\n for chunk in chunks_data:\n if chunk is None:\n offsets.append(None)\n continue\n # 4 bytes are for length, b'\\x02' is the compression type which is 2 since its using zlib\n to_add = (len(chunk)+1).to_bytes(4, 'big') + b'\\x02' + chunk\n\n # offset in 4KiB sectors\n sector_offset = len(chunks_bytes) // 4096\n sector_count = math.ceil(len(to_add) / 4096)\n offsets.append((sector_offset, sector_count))\n\n # Padding to be a multiple of 4KiB long\n to_add += bytes(4096 - (len(to_add) % 4096))\n chunks_bytes += to_add\n\n locations_header = bytes()\n for offset in offsets:\n # None means the chunk is not an actual chunk in the region\n # and will be 4 null bytes, which represents non-generated chunks to minecraft\n if offset is None:\n locations_header += bytes(4)\n else:\n # offset is (sector offset, sector count)\n locations_header += (offset[0] + 2).to_bytes(3, 'big') + offset[1].to_bytes(1, 'big')\n\n # Set them all as 0\n timestamps_header = bytes(4096)\n\n final = locations_header + timestamps_header + chunks_bytes\n\n # Pad file to be a multiple of 4KiB in size\n # as Minecraft only accepts region files that are like that\n final += bytes(4096 - (len(final) % 4096))\n assert len(final) % 4096 == 0 # just in case\n\n # Save to a file if it was given\n if file:\n if isinstance(file, str):\n with open(file, 'wb') as f:\n f.write(final)\n else:\n file.write(final)\n return final", "def concatenate_files(file_one, file_contents, file_headers, output_file):\n with open(file_one, 'r') as input_file:\n with open(output_file, 'w') as output_file:\n for index, line in enumerate(input_file):\n line = line.strip()\n if index == 0:\n write_header(output_file, line, file_headers)\n else:\n if not write_gene_line(output_file, line, file_contents):\n write_zero_expression(output_file, file_contents, line)", "def writeChunk(chunk):", "def up(job, inputFileID1, inputFileID2, memory=sortMemory):\n with job.fileStore.writeGlobalFileStream() as (fileHandle, outputFileStoreID):\n with job.fileStore.readGlobalFileStream( inputFileID1 ) as inputFileHandle1:\n with job.fileStore.readGlobalFileStream( inputFileID2 ) as inputFileHandle2:\n merge(inputFileHandle1, inputFileHandle2, fileHandle)\n job.fileStore.logToMaster( \"Merging %s and %s to %s\"\n % (inputFileID1, inputFileID2, outputFileStoreID) )\n #Cleanup up the input files - these deletes will occur after the completion is successful. \n job.fileStore.deleteGlobalFile(inputFileID1)\n job.fileStore.deleteGlobalFile(inputFileID2)\n return outputFileStoreID", "def compress_stream(src, dst):\n with gzip.GzipFile(fileobj=dst, mode='wb') as gz:\n for block in iterfile(src):\n gz.write(block)", "def write_input_files(pst, pst_path=\".\"):\n par = pst.parameter_data.copy()\n par.index = par.index.str.lower()\n par.loc[:, \"parval1_trans\"] = (par.parval1 * par.scale) + par.offset\n pairs = np.array(list(zip(pst.template_files, pst.input_files)))\n num_tpl = len(pairs)\n chunk_len = 50\n num_chunk_floor = num_tpl // chunk_len\n main_chunks = (\n pairs[: num_chunk_floor * chunk_len].reshape([-1, chunk_len, 2]).tolist()\n ) # the list of files broken down into chunks\n remainder = pairs[num_chunk_floor * chunk_len :].tolist() # remaining files\n chunks = main_chunks + [remainder]\n # procs = []\n # for chunk in chunks:\n # # write_to_template(pst.parameter_data.parval1_trans,os.path.join(pst_path,tpl_file),\n # # os.path.join(pst_path,in_file))\n # p = mp.Process(\n # target=_write_chunk_to_template,\n # args=[chunk, pst.parameter_data.parval1_trans, pst_path],\n # )\n # p.start()\n # procs.append(p)\n # for p in procs:\n # p.join()\n pool = mp.Pool(processes=min(mp.cpu_count(), len(chunks), 60))\n x = [\n pool.apply_async(\n _write_chunk_to_template,\n args=(chunk, par.parval1_trans, pst_path),\n )\n for i, chunk in enumerate(chunks)\n ]\n [xx.get() for xx in x]\n pool.close()\n pool.join()", "def merge_recs(part_one, part_two, output):\n start_op_length = 28\n with open(part_one, 'rb') as a_handle, \\\n open(part_two, 'rb') as b_handle, \\\n open(output, 'wb') as merged:\n\n a_data = a_handle.read()\n b_data = b_handle.read()\n\n postgame_pos, _ = find_postgame(a_data, len(a_data))\n if postgame_pos:\n a_data_end = postgame_pos - LOOKAHEAD\n else:\n a_data_end = len(a_data)\n b_header_len, = struct.unpack('<I', b_data[:4])\n chapter = mgz.body.operation.build({\n 'type': 'action',\n 'op': 1,\n 'length': 2,\n 'action': {\n 'type': 'chapter',\n 'player_id': 0xff # our merge marker\n }\n })\n\n # part A with no postgame struct\n merged.write(a_data[:a_data_end])\n # chapter action\n merged.write(chapter)\n # offset to start of part B operations\n merged.write(struct.pack('<I', a_data_end + len(chapter) + b_header_len))\n # part B header (now a \"saved chapter\")\n merged.write(b_data[4:b_header_len])\n # part B operations with no start operation\n merged.write(b_data[b_header_len + start_op_length:])", "def _collapse(in_file):\n out_file = append_stem(in_file, \".trimming\").replace(\".gz\", \"\")\n if file_exists(out_file):\n return out_file\n seqs = collapse(in_file)\n write_output(out_file, seqs, minimum=1, size=16)\n return out_file", "def splitting():\n n = 1\n with open('numbers.txt', 'r+') as f:\n f.readline()\n seek_2 = f.tell()\n seek_1 = 0\n\n while seek_1 != seek_2:\n print(n)\n n += 1\n with open('numbers.txt', 'r+') as f, open('numbers.txt', 'r+') as f_2:\n f.seek(seek_1)\n f_2.seek(seek_2)\n seek_1, seek_2 = merge(f, f_2)\n\n make_result_file(seek_1)", "def baseline_rechunk(indir_path, outdir_path, O, I, R, file_format, addition, distributed, debug_mode=False, clean_out_dir=False, dont_write=False):\n\n print(f\"Setting arguments...\")\n global DEBUG_LOCAL\n global DONT_WRITE\n global tracker\n global outdirs_dict, outdir_index\n outdirs_dict = dict()\n outdir_index = 0\n tracker = Tracker()\n DEBUG_LOCAL = True if debug_mode else False\n DONT_WRITE = True if dont_write else False\n\n print(\"Addition mode:\", addition)\n print(\"DONT_WRITE: \", DONT_WRITE)\n\n O, I, R = tuple(O), tuple(I), tuple(R)\n\n file_manager = get_file_manager(file_format)\n\n infiles_partition = get_blocks_shape(R, I)\n infiles_volumes = get_named_volumes(infiles_partition, I)\n outfiles_partition = get_blocks_shape(R, O)\n outfiles_volumes = get_named_volumes(outfiles_partition, O)\n outfiles_volumes = outfiles_volumes.values()\n\n if distributed:\n repartition_dict = None\n \n json_filename = '/disk0/gtimothee/repartition_dict.json'\n if not os.path.isfile(json_filename):\n # print(\"cannot find association dict json file\")\n sys.exit(1)\n else:\n pass # print(f\"json file found\")\n\n try: \n with open(json_filename) as f:\n repartition_dict = json.load(f)\n except Exception as e: \n print(e)\n # print(\"error (1)\")\n sys.exit(1)\n\n if repartition_dict == None:\n # print(\"error (2)\")\n sys.exit(1)\n else:\n pass # print(f\"Found reparition dict: {repartition_dict}\")\n\n input_files = repartition_dict.values()\n else:\n input_files = file_manager.get_input_files(indir_path)\n\n t_read = 0\n t_write = 0\n\n vols_written = list()\n nb_infile_openings = 0\n nb_infile_seeks = 0\n nb_outfile_openings = 0\n nb_outfile_seeks = 0\n buffer_index = 1\n for input_file in input_files:\n print(f\"Treating buffer: {buffer_index}...\")\n buffer_index += 1\n nb_infile_openings += 1\n\n involume = get_volume(input_file, infiles_volumes, infiles_partition)\n t1 = time.time()\n if not DONT_WRITE:\n data = file_manager.read_data_from_fp(input_file, slices=None)\n else:\n data = None\n t1 = time.time() - t1\n t_read += t1\n \n for outvolume in outfiles_volumes:\n if hypercubes_overlap(involume, outvolume):\n shape, t2, nb_outfile_seeks_tmp = write_to_outfile(involume, outvolume, data, outfiles_partition, outdir_path, O, file_manager, addition, tracker)\n t_write += t2\n vols_written.append(shape)\n # nb_outfile_openings += 1 already included in nb_outfile_seeks\n nb_outfile_seeks += nb_outfile_seeks_tmp\n \n file_manager.close_infiles()\n\n if DONT_WRITE:\n assert tracker.is_complete(((0,0,0), R))\n\n # print(\"\\nShapes written:\")\n # for row in vols_written: \n # print(row)\n\n if clean_out_dir:\n print(\"Cleaning output directory\")\n file_manager.clean_directory(outdir_path)\n\n get_opened_files()\n\n return t_read, t_write, [nb_outfile_openings, nb_outfile_seeks, nb_infile_openings, nb_infile_seeks]", "def csv_files_in_folder_merger(file):\n stack = []\n for file_in_list in file:\n stack.append(file_to_generator(file_in_list))\n stacklijst = pd.concat(stack)\n\n return stacklijst", "def compress(src,dstfile):\n\tafile = zipfile.ZipFile(dstfile,\"w\",zipfile.ZIP_DEFLATED)\n\tfor root,dirs,files in os.walk(src):\n\t\tfor filename in files:\n\t\t\tabspath = osp.join(root,filename)\n\t\t\trelpath = osp.relpath(abspath,src)\n\t\t\tafile.write(abspath, relpath)\n\tafile.close();", "def join(frompackage, todir, ifclean=False):\r\n configobj = open(os.path.join(frompackage, 'config.in'), 'r')\r\n tofilename = configobj.readline()[:-1]\r\n buffersize = int(configobj.readline(), 10)\r\n tofileobj = open(os.path.join(todir, tofilename), 'wb')\r\n for line in configobj.readlines():\r\n partname = line[:-1]\r\n if not partname: break\r\n partfile = open(os.path.join(frompackage, partname), 'rb')\r\n buffer = partfile.read(buffersize)\r\n tofileobj.write(buffer)\r\n partfile.close()\r\n if ifclean: os.remove(os.path.join(frompackage, partname))\r\n print(partname, 'joined successfully!')\r\n tofileobj.close()\r\n configobj.close()", "def gatherfiles(self):\n\t\tfrom subprocess import Popen,PIPE\n\t\timport os\n\t\timport tarfile\n\t\timport glob\n\t\t\n\t\tprint \"=== \",self.nameID,\": Joining all the files in one\"\n\t\t# FIXME: Only there are 1 file, not needed the hadd\n\t\tfinalfile = os.path.join(\"Results\",self.outputfile)\n\t\t# FIXED BUG: just cp when there is only one file, otherwise\n\t\t# there are problems with the TTree\n\t\tif len(self.outputfiles) == 1:\n\t\t\t# Note that when there is only 1 file, always its #task=1\n\t\t\tcommand = [ 'cp', self.outputfiles[1], finalfile ]\n\t\telse:\n\t\t\tcommand = [ 'haddPlus', finalfile ]\n\t\t\tfor f in self.outputfiles.itervalues():\n\t\t\t\tcommand.append( f )\n\t\tp = Popen( command ,stdout=PIPE,stderr=PIPE ).communicate()\n\t\t# Checking if everything was allright\n\t\ttotalevts = self.getevents(finalfile,True)\n\t\tif totalevts != self.nevents:\n\t\t\tmessage = \"\\033[33;1mclustermanager.gatherfiles: WARNING\\033[0m the total file\"\n\t\t\tmessage += \"'\"+finalfile+\"' do not contain all the events:\\n\"\n\t\t\tmessage += \"Total events to be processed:\"+str(self.nevents)+\"\\n\"\n\t\t\tmessage += \"Total events in '\"+finalfile+\"':\"+str(totalevts)+\"\\n\"\n\t\t\tprint message\n\t\t\treturn \n\t\t# If everything was fine, deleting the files \n\t\t# and cleaning the directory\n\t\tfor f in self.outputfiles.itervalues():\n\t\t\tos.remove( f )\n\t\t# Taring and compressing\n\t\tfilestotar = glob.glob(\"./*.*\")\n\t\tfilestotar.append( \".storedmanager\")\n\t\ttar = tarfile.open(os.path.basename(self.cwd)+\".tar.gz\",\"w:gz\")\n\t\tfor f in filestotar:\n\t\t\ttar.add(f)\n\t\ttar.close()\n\t\t# if everything was fine, deleting the files\n\t\tif os.path.exists(os.path.basename(self.cwd)+\".tar.gz\"):\n\t\t\tfor f in filestotar:\n\t\t\t\tos.remove(f)\n\t\telse:\n\t\t\tmessage = \"\\033[33;1mclustermanager.gatherfiles: WARNING\\033[0m I can't manage\\n\"\n\t\t\tmessage += \"to create the backup .tar.gz file\\n\"\n\t\t\tprint message\n\n\t\tprint \"Created \"+finalfile\n\t\tprint \"========= Process Completed =========\"", "def duplicate_file():\n file = TEST_CONTENT_REPO / PACKS_DIR / \"Sample01\" / TEST_PLAYBOOKS_DIR / \"playbook-sample_test1.yml\"\n new_file = TEST_CONTENT_REPO / PACKS_DIR / \"Sample02\" / TEST_PLAYBOOKS_DIR / \"playbook-sample_test1.yml\"\n try:\n copyfile(file, new_file)\n yield\n finally:\n new_file.unlink()", "def combine_and_write(search_str, column_name='population'):\n comb = combine_dfs(find_dfs(search_str), column_name=column_name)\n ofl = search_str.replace('{sample}', 'combined')\n print('Writing to {}'.format(ofl))\n comb.to_pickle(ofl)\n return comb", "def _copy_chunk(self, last_pk):\n self.execute(self.commands.copy_chunk(\n self.name,\n self._join_cols(self.intersection.dest_columns),\n self._qualify(self.source.name, self.intersection.origin_columns),\n self.source.name,\n self.primary_key_column,\n last_pk,\n self.chunk_size\n ))\n self.commit()", "def write_chunk(self, outfile, tag, data):\n outfile.write(struct.pack(\"!i\", len(data)))\n outfile.write(tag)\n outfile.write(data)\n checksum = zlib.crc32(tag)\n checksum = zlib.crc32(data, checksum)\n outfile.write(struct.pack(\"!i\", checksum))", "def putchunk(self, *args, **kwargs):\n return _image.image_putchunk(self, *args, **kwargs)", "def _in_place(self, path, dialect, encoding):\n tmpfd, tmpfname = tempfile.mkstemp(prefix=\"clevercsv_\", suffix=\".csv\")\n tmpid = os.fdopen(tmpfd, \"w\", newline=\"\", encoding=encoding)\n self._write_to_stream(path, tmpid, dialect, encoding)\n tmpid.close()\n\n previous_sha1 = sha1sum(path)\n new_sha1 = sha1sum(tmpfname)\n if previous_sha1 == new_sha1:\n os.unlink(tmpfname)\n return 0\n\n shutil.move(tmpfname, path)\n return 2", "def mergeFiles(prefix_prev, prefix_new, input_folder_1, input_folder_2, output_folder, years, months):\n\n assert output_folder != input_folder_1\n assert output_folder != input_folder_2\n\n for c_year in years:\n for c_month in months:\n try:\n print(F\"Merging files for year {c_year} - {c_month}\")\n\n output_file = join(output_folder, F\"{prefix_new}_{c_year}_{c_month:02d}.nc\")\n if os.path.exists(output_file):\n print(F\"Removing file {output_file} before writing...\")\n os.remove(output_file)\n\n f1 = join(input_folder_1,F\"{prefix_prev}_{c_year}_{c_month:02d}.nc\")\n f2 = join(input_folder_2,F\"{prefix_new}_{c_year}_{c_month:02d}.nc\")\n\n ds1 = xr.open_dataset(f1)\n ds2 = xr.open_dataset(f2)\n\n dmerged = xr.concat([ds1, ds2], dim='obs')\n \n dmerged.to_netcdf(output_file)\n ds1.close()\n ds2.close()\n print(\"Done!\")\n\n except Exception as e:\n print(F\"Failed for {c_year}_{c_month}: {e}\", flush=True)", "def concatenate_fastq(path, isfastq, sample_name):\n \n r1 = []\n r2 = []\n filenames = get_filesnames_in_dir(path)\n \n for i in filenames:\n if \"fake_genome\" in i:\n continue\n elif \"R1\" in i:\n r1.append(i)\n elif \"R2\" in i:\n r2.append(i)\n if isfastq:\n nameR1 = sample_name + \"-R1.fastq\"\n nameR2 = sample_name + \"-R2.fastq\"\n else:\n nameR1 = sample_name + \"-R1.fasta\"\n nameR2 = sample_name + \"-R2.fasta\"\n\n #concatinate R1\n with open(path + nameR1, 'w') as outfile:\n for fname in sorted(r1):\n with open(path + fname) as infile:\n outfile.write(infile.read())\n outfile.write(\"\\n\")\n\n #concatinate R2\n with open(path + nameR2, 'w') as outfile:\n for fname in sorted(r2):\n with open(path + fname) as infile:\n outfile.write(infile.read())\n outfile.write(\"\\n\")\n\n \n for i in r1 + r2:\n os.remove(path + i)", "def make_up(self, base_path='./data/'):\n for csv_file_path in [f\"{base_path}{_}\" for _ in os.listdir(base_path)]:\n self.append_file(csv_file_path)", "def append_files(in_file1, character, in_file2, out_file):\n return_data = 0\n\n write_data = ''\n\n i = 0\n try:\n with open(in_file1, 'rt') as fi1:\n lines1 = fi1.readlines() # Read all the lines in fi1 as a tuple\n \n with open(in_file2, 'rt') as fi2:\n lines2 = fi2.readlines() # Read all the lines in fi2 as a tuple\n \n with open(out_file, 'at') as fo:\n fo.seek(0,2)\n while i < len(lines1):\n lines1[i] = lines1[i].rstrip('\\n')\n #lines1[i] = lines1[i].rstrip('\\r')\n fo.write(lines1[i] + character + lines2[i])\n i = i + 1\n print(write_data)\n except IOError:\n print(\"Error in reading/writing file.\")\n return_data = 2\n else:\n print('Operation completed successfully.')\n return_data = 1\n finally:\n fi2.close()\n fi1.close()\n fo.close()\n print(\"done\")\n return return_data", "def merge_rgb_geotiffs(dax, final_name, inputs, level):\n max_files = 60\n new_outputs = []\n\n input_chunks = [inputs[i:i + max_files] for i in xrange(0, len(inputs), max_files)]\n\n job_count = 0\n for chunk in input_chunks:\n job_count = job_count + 1\n f = add_merge_job(dax, final_name, chunk, level, job_count, False)\n new_outputs.append(f)\n\n # end condition - only one chunk\n if len(new_outputs) <= max_files:\n return add_merge_job(dax, final_name, new_outputs, level + 1, 1, True)\n\n return merge_rgb_geotiffs(dax, final_name, new_outputs, level + 1)", "def split_data_into_exchanges(source_path, destination_path):\n for subdir, dirs, files in os.walk(source_path):\n for file in files:\n source_full_file = os.path.join(subdir, file)\n print(source_full_file)\n df = pd.read_csv(source_full_file)\n for group_name, df in df.groupby(['Ticker', 'Exchange']):\n file_name = destination_path / str(df['Date'].iloc[0]) / convertTuple(group_name)\n utils.make_dir(file_name)\n with open(file_name, \"w+\") as f:\n df.to_csv(f, index=False)", "def cat_files(files, output):\n for file in files:\n with open(file, 'r') as fd:\n shutil.copyfileobj(fd, output)", "def cat_files(files, output):\n for file in files:\n with open(file, 'r') as fd:\n shutil.copyfileobj(fd, output)", "def concatRepeats2Bed(repeatPath, repeatfile):\n\n o = open(repeatfile, 'w')\n repdir = os.listdir(repeatPath)\n for chrom in repdir:\n if chrom.startswith('chr') and chrom.endswith('.fa.out'):\n for line in open(os.path.join(repeatPath,chrom)):\n if line.strip():\n try: #do this because there can be header lines that start with strings. And there can be empty lines\n t = line.strip().split()\n i = int(t[0])\n o.write('%s\\t%s\\t%s\\t%s\\n' %(t[4], t[5], t[6], t[8]))\n except ValueError:\n continue\n o.close()", "def moveNewFragmentsToTmpDir(options,nextTaskNum):\n for i in range(1,nextTaskNum):\n frag = getFragmentPath(options.tmpDir, options.fragBase, i)\n newfrag = getFragmentPath(\"%s%stmp\" % (options.tmpDir, os.sep), options.fragBase, i)\n os.rename(newfrag,frag)\n os.rmdir(\"%s%stmp\" % (options.tmpDir, os.sep))", "def step_parallel(in_csv_filename, terrestrial_data, marine_data, ancillary_path,\n out_csv_filename, from_gbif=True):\n csv_filename_pairs, header = get_chunk_files(\n in_csv_filename, out_csv_filename=out_csv_filename)\n\n# in_csv_fn, out_csv_fn = csv_filename_pairs[0]\n# intersect_csv_and_shapefiles(in_csv_fn, terrestrial_data,\n# marine_data, ancillary_path, out_csv_fn, False)\n\n with ProcessPoolExecutor() as executor:\n for in_csv_fn, out_csv_fn in csv_filename_pairs:\n executor.submit(\n intersect_csv_and_shapefiles, in_csv_fn, terrestrial_data,\n marine_data, ancillary_path, out_csv_fn, from_gbif)\n\n try:\n outf = open(out_csv_filename, 'w', encoding='utf-8')\n outf.write('{}'.format(header))\n smfile_linecount = 0\n for _, small_csv_fn in csv_filename_pairs:\n curr_linecount = get_line_count(small_csv_fn) - 1\n print('Appending {} records from {}'.format(\n curr_linecount, small_csv_fn))\n # Do not count header\n smfile_linecount += (curr_linecount)\n lineno = 0\n try:\n for line in open(small_csv_fn, 'r', encoding='utf-8'):\n # Skip header in each file\n if lineno == 0:\n pass\n else:\n outf.write('{}'.format(line))\n lineno += 1\n except Exception as inner_err:\n print('Failed to write {} to merged file; {}'.format(small_csv_fn, inner_err))\n except Exception as outer_err:\n print('Failed to write to {}; {}'.format(out_csv_filename, outer_err))\n finally:\n outf.close()\n\n lgfile_linecount = get_line_count(out_csv_filename) - 1\n print('Total {} of {} records written to {}'.format(\n lgfile_linecount, smfile_linecount, out_csv_filename))", "def dump_current_chunk(self, use_where):\n log.info(\"Dumping raw data onto local disk for further investigation\")\n log.info(\"Columns will be dumped in following order: \")\n log.info(\", \".join(self._pk_for_filter + self.checksum_column_list))\n for table_name in [self.table_name, self.new_table_name]:\n if table_name == self.new_table_name:\n # index for new schema can be any indexes that provides\n # uniqueness and covering old PK lookup\n idx_for_checksum = self.find_coverage_index()\n outfile = self._outfile_name(\n suffix=\".new\",\n chunk_id=0,\n # MySQL does create the file with the extension itself\n skip_compressed_extension=True,\n )\n else:\n # index for old schema should always be PK\n idx_for_checksum = \"PRIMARY\"\n outfile = self._outfile_name(\n suffix=\".old\",\n chunk_id=0,\n # MySQL does create the file with the extension itself\n skip_compressed_extension=True,\n )\n log.info(\"Dump offending chunk from {} into {}\".format(table_name, outfile))\n self.execute_sql(\n sql.dump_current_chunk(\n table_name,\n self.checksum_column_list,\n self._pk_for_filter,\n self.range_start_vars_array,\n self.select_chunk_size,\n idx_for_checksum,\n use_where,\n enable_outfile_compression=self.enable_outfile_compression,\n ),\n (outfile,),\n )", "def big_dedup_file(in_fname, out_fname, n_bins):\n filehandles = []\n for i in range(n_bins):\n filehandles.append(open(f'temp{i}.txt', 'w'))\n handle_iter = itertools.cycle(filehandles)\n with open(in_fname, 'r') as in_file:\n for line in in_file:\n next(handle_iter).write(line)\n for filehandle in filehandles:\n filehandle.close()\n\n with open(out_fname, 'w') as out_file:\n for i in range(n_bins):\n with open(f'temp{i}.txt', 'r') as tempfile:\n # deduplicate\n lines = list(set(tempfile.read().split('\\n')))\n random.shuffle(lines)\n out_file.write('\\n'.join(lines))\n logging.info(f'pseudodeduplicated {in_fname}, {out_fname} is also pseudorandomized')", "def render_merged(self, context):\r\n\r\n output, files, filter = self.resolve(context)\r\n\r\n # make paths absolute\r\n output_path = _abspath(output)\r\n source_paths = [_abspath(s) for s in files]\r\n\r\n # check if the asset should be (re)created\r\n if not os.path.exists(output_path):\r\n if not settings.ASSETS_AUTO_CREATE:\r\n # render the sources after all\r\n return self.render_sources(context)\r\n else:\r\n update_needed = True\r\n else:\r\n update_needed = get_updater()(output_path, source_paths)\r\n\r\n if update_needed:\r\n create_merged(source_paths, output_path, filter)\r\n last_modified = os.stat(output_path).st_mtime\r\n # TODO: do asset tracking here\r\n #get_tracker()()\r\n\r\n # modify the output url for expire header handling\r\n if settings.ASSETS_EXPIRE == 'querystring':\r\n outputfile = \"%s?%d\" % (output, last_modified)\r\n elif settings.ASSETS_EXPIRE == 'filename':\r\n name = output.rsplit('.', 1)\r\n if len(name) > 1: return \"%s.%d.%s\" % (name[0], last_modified, name[1])\r\n else: outputfile = \"%s.%d\" % (name, last_modified)\r\n elif not settings.ASSETS_EXPIRE:\r\n outputfile = output\r\n else:\r\n raise ValueError('Unknown value for ASSETS_EXPIRE option: %s' %\r\n settings.ASSETS_EXPIRE)\r\n\r\n context.update({'ASSET_URL': _absurl(outputfile)})\r\n try:\r\n result = self.childnodes.render(context)\r\n finally:\r\n context.pop()\r\n return result", "def rechunk_zarr(source, dest, nb_workers):\n nb_chunks = utils.chunk_dims(dest.shape, dest.chunks)\n\n args = []\n for chunk_idx in product(*tuple(range(n) for n in nb_chunks)):\n args.append((source, dest, chunk_idx))\n\n with multiprocessing.Pool(nb_workers) as pool:\n pool.starmap(fetch_and_write_chunk, args)", "def splice_a_chunk_in_a_file(self, file_data, glitch_num):\n start_point, end_point = self.get_random_start_and_end_points_in_file(file_data)\n section = file_data[start_point:end_point]\n repeated = ''\n\n for i in range(1, glitch_num):\n repeated += section\n\n new_start_point, new_end_point = self.get_random_start_and_end_points_in_file(file_data)\n file_data = file_data[:new_start_point] + repeated + file_data[new_end_point:]\n return file_data", "def assumpfile1():\n afile = tempfile.NamedTemporaryFile(suffix='.json', mode='a', delete=False)\n afile.write(ASSUMP_CONTENTS)\n afile.close()\n # must close and then yield for Windows platform\n yield afile\n if os.path.isfile(afile.name):\n try:\n os.remove(afile.name)\n except OSError:\n pass # sometimes we can't remove a generated temporary file", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def _subprocess_pack_charms(\n charms: Mapping[str, pathlib.Path],\n command_args: Collection[str],\n) -> Dict[str, pathlib.Path]:\n if charms:\n charm_str = humanize_list(charms.keys(), \"and\")\n emit.progress(f\"Packing charms: {charm_str}...\")\n cwd = pathlib.Path(os.getcwd()).resolve()\n generated_charms = {}\n with tempfile.TemporaryDirectory(prefix=\"charmcraft-bundle-\", dir=cwd) as temp_dir:\n temp_dir = pathlib.Path(temp_dir)\n try:\n # Put all the charms in this temporary directory.\n os.chdir(temp_dir)\n for charm, project_dir in charms.items():\n full_command = [*command_args, f\"--project-dir={project_dir}\"]\n with emit.open_stream(f\"Packing charm {charm}...\") as stream:\n subprocess.check_call(full_command, stdout=stream, stderr=stream)\n duplicate_charms = {}\n for charm_file in temp_dir.glob(\"*.charm\"):\n charm_name = charm_file.name.partition(\"_\")[0]\n if charm_name not in charms:\n emit.debug(f\"Unknown charm file generated: {charm_file.name}\")\n continue\n if charm_name in generated_charms:\n if charm_name not in duplicate_charms:\n duplicate_charms[charm_name] = temp_dir.glob(f\"{charm_name}_*.charm\")\n continue\n generated_charms[charm_name] = charm_file\n if duplicate_charms:\n raise errors.DuplicateCharmsError(duplicate_charms)\n for charm, charm_file in generated_charms.items():\n destination = cwd / charm_file.name\n destination.unlink(missing_ok=True)\n generated_charms[charm] = shutil.move(charm_file, destination)\n finally:\n os.chdir(cwd)\n return generated_charms", "def _write_files(files, prefix=None, clobber=False):\n [_write_file(infile, prefix, clobber) for infile in files]", "def generate():\n with open(remote_path, \"rb\") as f:\n for chunk in iter(lambda: f.read(buffer_size), b''):\n yield chunk", "def join_files(folder):\n\n first_ds = _get_first_source_dataset(folder)\n first_layer = first_ds.GetLayer(0)\n\n drv = ogr.GetDriverByName('GeoJSON')\n tmpfile = tempfile.mktemp(suffix=\".json\")\n out_dsn = drv.CreateDataSource(tmpfile)\n out_layer = out_dsn.CopyLayer(first_layer, new_name=first_layer.GetName())\n\n for source in os.listdir(folder)[1:]:\n logging.info('Joining file %s to %s' % (source, tmpfile)) \n dsn = ogr.Open(os.path.join(folder, source))\n layer = dsn.GetLayer()\n nfeatures = layer.GetFeatureCount()\n\n for i in range(nfeatures):\n feature = layer.GetNextFeature()\n out_layer.CreateFeature(feature.Clone())\n\n out_dsn.Destroy()\n\n return tmpfile", "def county_file_merger(folder_path):\n\n print(\"\\n*******************--- Starting File Merger for .csv files ---*******************\")\n with open(\"result.csv\",\"wb\") as outfile:\n for filename in os.listdir(folder_path):\n with open(filename,\"rb\") as infile:\n for line in infile:\n outfile.write(line)\n infile.close()\n outfile.close()\n print(\"\\nResult saved to -----> result.csv \")\n print(\"\\n*******************--- Finished File Merger for .csv files ---*******************\")", "def file_append(filepath,contents):\n with open(filepath, 'a') as f:\n f.write(contents+'\\n')", "def prepopulate_memo(self):\n existing = self.gi.libraries.show_library(self.library_id, contents=True)\n\n uploading_to = [x for x in existing if x['id'] == self.folder_id]\n if len(uploading_to) == 0:\n raise Exception(\"Unknown folder [%s] in library [%s]\" %\n (self.folder_id, self.library_id))\n else:\n uploading_to = uploading_to[0]\n\n for x in existing:\n # We only care if it's a subdirectory of where we're uploading to\n if not x['name'].startswith(uploading_to['name']):\n continue\n\n name_part = x['name'].split(uploading_to['name'], 1)[-1]\n if name_part.startswith('/'):\n name_part = name_part[1:]\n self.memo_path[name_part] = x['id']", "def fs_cat(self, src: str, chunk_size: int = 256) -> None:\n cmd = (\n \"with open('%s') as f:\\n while 1:\\n\"\n \" b=f.read(%u)\\n if not b:break\\n print(b,end='')\" % (src, chunk_size)\n )\n self.exec_(cmd, data_consumer=stdout_write_bytes)", "def _clone_defaults(self, source, dest, context):\n\n for base, dirs, files in os.walk(source):\n relative = os.path.relpath(base, source)\n\n for d in dirs:\n os.makedirs(os.path.join(dest, relative, d))\n\n for filename in files:\n\n if not filename.endswith(self.valid_extensions):\n continue\n\n with open(os.path.join(base, filename), 'r') as f:\n data = f.read()\n\n with open(os.path.join(dest, relative, filename), 'w') as f:\n data = jinja2.Template(data).render(**context)\n f.write(data)" ]
[ "0.65807515", "0.6522695", "0.5830749", "0.56331223", "0.5603233", "0.55737466", "0.5561251", "0.5557075", "0.5543502", "0.55282074", "0.5523479", "0.55110836", "0.547458", "0.54524946", "0.5427168", "0.54180014", "0.5275911", "0.5269838", "0.52052176", "0.5198629", "0.51861894", "0.5165819", "0.51608825", "0.5132295", "0.5116679", "0.51163363", "0.51034325", "0.5090486", "0.50695324", "0.5039642", "0.503592", "0.49816626", "0.49623114", "0.49559385", "0.49537668", "0.49377453", "0.4926784", "0.49210858", "0.48931688", "0.4886572", "0.487042", "0.48639572", "0.48616463", "0.4860644", "0.4838468", "0.48368865", "0.48304576", "0.48118684", "0.4791553", "0.47897306", "0.47839844", "0.47794148", "0.47731197", "0.4772401", "0.4768573", "0.47659737", "0.47611213", "0.47506675", "0.4741712", "0.4734002", "0.4724027", "0.47206688", "0.47163403", "0.47159886", "0.47108588", "0.4710442", "0.47032404", "0.47019994", "0.46937528", "0.4685105", "0.46821353", "0.4679697", "0.46791053", "0.46786717", "0.4676741", "0.467534", "0.46740997", "0.46695536", "0.46605727", "0.46591017", "0.46591017", "0.46539667", "0.46523887", "0.46352926", "0.46350855", "0.4634152", "0.46316344", "0.46279332", "0.46276686", "0.46234274", "0.46216223", "0.46125168", "0.4609892", "0.46069872", "0.4604878", "0.46010008", "0.45983535", "0.45980063", "0.4596769", "0.45959267" ]
0.6982434
0
Returns an OAuth2 authorization token or None in case of errors. This is the flow for nonweb clients.
def get_token(client_id, client_secret, username, password): try: if oauth2db.check_client(client_id, client_secret): if oauth2db.check_user(username, password): token, refresh = oauth2db.generate_token(client_id, username) res = { "token": token } except: res = { "error": "" } if 'token' in res: return res['token'] else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_auth():\n try:\n token = oidc.get_access_token()\n except TypeError:\n # raised when the token isn't accessible to the oidc lib\n raise Unauthorized(\"missing auth token\")\n\n if not oidc.validate_token(token):\n terminate_session()\n raise Unauthorized(\"invalid auth token\")\n return token", "def accessToken(self):\n if session.token and 'expires' in session.token:\n expires = session.token['expires']\n # reuse token until expiration\n if expires == 0 or expires > time.time():\n return session.token['access_token']\n\n code = request.vars.code\n\n if code:\n data = dict(client_id=self.env.client_id,\n client_secret=self.env.client_secret,\n redirect_uri=session.redirect_uri,\n code=code,\n grant_type='authorization_code'\n )\n\n open_url = None\n opener = self.__build_url_opener(self.env.token_url)\n try:\n open_url = opener.open(self.env.token_url, urlencode(data),\n self.socket_timeout)\n except urllib2.HTTPError, e:\n tmp = e.read()\n raise Exception(tmp)\n finally:\n if session.code:\n del session.code\n if session.redirect_uri:\n del session.redirect_uri\n\n if open_url:\n try:\n data = open_url.read()\n resp_type = open_url.info().gettype()\n #: try json style first\n if not resp_type or resp_type[:16] == 'application/json':\n try:\n tokendata = json.loads(data)\n session.token = tokendata\n except Exception, e:\n raise Exception(\"Cannot parse oauth server response %s %s\" % (data, e))\n #: try with x-www-form-encoded\n else:\n tokendata = cgi.parse_qs(data)\n session.token = \\\n dict([(k, v[-1]) for k, v in tokendata.items()])\n #: we failed parsing\n if not tokendata:\n raise Exception(\"Cannot parse oauth server response %s\" % data)\n #: set expiration\n if 'expires_in' in session.token:\n exps = 'expires_in'\n elif 'expires' in session.token:\n exps = 'expires'\n else:\n exps = None\n session.token['expires'] = exps and \\\n int(session.token[exps]) + \\\n time.time()\n finally:\n opener.close()\n return session.token['access_token']\n\n session.token = None\n return None", "async def get_token(self, *args, **kwargs) -> Optional[OAuth2Token]:\n token_record = ...\n\n if token_record is not None:\n return OAuth2Token(\n access_token=token_record.access_token,\n refresh_token=token_record.refresh_token,\n scope=token_record.scope,\n issued_at=token_record.issued_at,\n expires_in=token_record.expires_in,\n client_id=token_record.client_id,\n token_type=token_record.token_type,\n revoked=token_record.revoked,\n )", "def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r", "def get_auth_token(self):\n return self.do_rpc('get_authorization',\n username=self._username,\n password=self._password)", "def get_token(self):\n message = {\n \"request\": \"access_token\",\n \"account\": self.account,\n \"min_valid_period\": self.validity,\n \"application_hint\": \"orpy\",\n }\n try:\n self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self._sock.connect(self.socket_path)\n self._sock.sendall(json.dumps(message).encode())\n\n data = \"\"\n while True:\n recv = self._sock.recv(16).decode()\n if recv:\n data += recv\n else:\n break\n except socket.error as err:\n raise exceptions.AuthExceptiob(\n err=\"Cannot communicate with the \" \"oidc-agent: %s\" % err\n )\n finally:\n self._sock.close()\n\n token = json.loads(data)\n if token.get(\"status\") == \"failure\":\n raise exceptions.AuthError(err=token.get(\"error\"))\n return token", "def get_token() -> str:\n try:\n bearer, authorization = request.headers['Authorization'].split()\n if 'bearer' not in bearer.lower():\n raise Forbidden('Invalid token. Please login!')\n return authorization\n\n except Exception:\n raise Forbidden('Token is required. Please login!')", "def createAccessTokenReplacement(self):\r\n\r\n url = self._config['OAUTH2ENDPOINT']['huddleAuthServer'] + \"request?response_type=code\" + \\\r\n \"&client_id=\" + self._config['OAUTH2']['clientID'] + \\\r\n \"&redirect_uri=\" + self._config['OAUTH2']['redirectUri']\r\n webbrowser.open_new(url)\r\n code = input('Please enter the code from your web browser:')\r\n\r\n response = self._oauth.obtainAccessTokenBy3LeggedOAuth(code)\r\n responseBody = json.loads(response['Body'])\r\n\r\n try:\r\n oauthToken = Token(responseBody)\r\n except TypeError as e:\r\n print (\"Bad response when requesting a token \" + str(response))\r\n sys.exit()\r\n\r\n return oauthToken", "def get_authorization_token(self):\n if not CentralStorageClient.is_authorized():\n self.authorize()\n\n return CentralStorageClient.token", "def __step2_get_oauth_request_token(self, oauth_id):\n\n c, r = http._post(\n self.auth_package.OAUTH+'auth/',\n data={\n 'action': 'accepted',\n 'oauth': oauth_id,\n 'login': self.auth_package.login,\n 'user_pwd': self.auth_package.password,\n 'account': 'r',\n 'credentials': 'r',\n\n },\n )\n data = r.read()\n c.close()\n\n if r.status == 302:\n location = r.getheader('location', '')\n if not location.startswith(self.auth_package.redirect_uri):\n raise Exception(\"Got an unexpected redirection to %s\"%location)\n query = urlparse.urlsplit(location).query\n query_dict = dict(urlparse.parse_qsl(query))\n if 'code' in query_dict:\n self._token = query_dict['code'] # Oauth Request Token\n else:\n raise Exception(\"Got unexpected http code %s (%s)\" % (r.status, r.reason))", "def get_oauth_token():\n return session.get('remote_oauth')", "def get_access_token(self):\n payload = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'resource': self.resource\n }\n res = requests.post(self.auth_url, data=payload)\n data = res.json()\n if res.status_code == 200:\n return data['access_token'], res\n\n return False, res", "def GetToken(self):\n if self.auth_token_:\n return self.auth_token_\n raise RuntimeError('ClientLoginAuthPolicy is not logged in.')", "async def oauth2_token(\n request: Request, oauth2_request=Depends(_oauth2_request)\n):", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def twitter_get_bearer_token(self):\n url = 'https://api.twitter.com/oauth2/token'\n headers = {'Authorization': 'Basic %s' % self.private_data['twitter']['bearer_credentials'],\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}\n data = 'grant_type=client_credentials'\n resp = requests.post(url, headers=headers, data=data)\n\n if resp.status_code == 200:\n content = json.loads(resp.content)\n if content['token_type'] == 'bearer' and 'access_token' in content:\n return content['access_token']\n else:\n return None\n else:\n print('ERROR: failed to retreive bearer token')\n return None", "def get_token(self, legs=2):\n if legs == 2:\n\n headers = {}\n\n headers.update({ 'Content-Type' : 'application/x-www-form-urlencoded' })\n\n data = {}\n\n data.update({'client_id' : self.clientId})\n data.update({'client_secret' : self.clientSecret})\n data.update({'grant_type' : 'client_credentials'})\n data.update({'scope' : self.scopes})\n\n resp = self.http.post(self.webAddress, headers=headers, data=data)\n\n if resp.status_code == 200:\n cont = resp.json()\n return (cont['access_token'], cont['expires_in'])\n\n raise ConnectionError(\"Request failed with code {}\".format(resp.status_code) +\n \" and message : {}\".format(resp.content) +\n \" during authentication.\")\n else:\n raise NotImplementedError(\"3-legged authentication has not been implemented.\")", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "def authorize(self, OAUTH_SETTINGS, consumer_key, consumer_secret, oauth_token, oauth_token_secret, oauth_verifier):\n\t\tconsumer = oauth2.Consumer(consumer_key, consumer_secret)\n\t\ttoken = oauth2.Token(oauth_token, oauth_token_secret)\n\t\tclient = oauth2.Client(consumer, token)\n\n\t\treq = oauth2.Request(method=\"GET\", url=OAUTH_SETTINGS['access_token_url'], parameters={\"oauth_verifier\": oauth_verifier})\n\t\tresp, content = client.request(req.to_url(), \"GET\")\n\t\tif resp['status'] != \"200\":\n\t\t\traise Exception(content)\n\n\t\tquery = urlparse.parse_qs(content)\n\t\treturn query['oauth_token'][0], query['oauth_token_secret'][0]", "def _get_access_token(self):\n if self._service_token:\n logger.info('Use service token: %s',\n 5 * '*' + self._service_token[50:])\n return self._service_token\n\n if not all([self.app_id, self._login, self._password]):\n raise ValueError(\n 'app_id=%s, login=%s password=%s (masked) must be given'\n % (self.app_id, self._login,\n '*' * len(self._password) if self._password else 'None'))\n\n logger.info(\"Getting access token for user '%s'\" % self._login)\n with self.http_session as s:\n if self._client_secret:\n url_query_params = self.do_direct_authorization(session=s)\n else:\n self.do_login(http_session=s)\n url_query_params = self.do_implicit_flow_authorization(session=s)\n logger.debug('url_query_params: %s', url_query_params)\n\n if 'access_token' in url_query_params:\n logger.info('Access token has been gotten')\n return url_query_params['access_token']\n else:\n raise VkAuthError('OAuth2 authorization error. Url params: %s'\n % url_query_params)", "def get_token(self) -> None:\n with self._lock:\n if not self._endpoint:\n raise AuthenticationTokenError(\n 'Token is invalid and endpoint (auth_endpoint) for obtaining is not set.')\n\n url = self._endpoint + '/app'\n data = {\n \"client_id\": self._client_id,\n \"client_secret\": self._client_secret,\n \"username\": self._username,\n \"password\": self._password\n }\n\n res = self.post(url, data)\n self._token_info.parse_token_result(res, 'Get token')", "def token(cls):\n if not (cls._consumer_key and cls._consumer_secret):\n raise NoCredentialsException\n if not cls._bearer_token:\n resp = requests.post(\n 'https://api.twitter.com/oauth2/token',\n auth=(os.getenv('CONSUMER_KEY'), os.getenv('CONSUMER_SECRET')),\n data={'grant_type': 'client_credentials'}\n )\n data = resp.json()\n token_type = data.get('token_type')\n if token_type != 'bearer':\n msg = (\n f'Expected token_type to equal \"bearer\", but got '\n f'{token_type} instead.'\n )\n raise AttributeError(msg)\n\n cls._bearer_token = OAuth2Bearer(data['access_token'])\n return cls._bearer_token", "def get_auth_token(cls, endpoint, headers):\n\n token = None\n scope = None\n resp = requests.post(endpoint, headers=headers)\n if resp.status_code == 200:\n auth_resp_json = resp.json()\n token = auth_resp_json[\"access_token\"]\n try:\n scope = auth_resp_json[\"scope\"]\n except KeyError:\n scope = None\n if resp.status_code == 401:\n token = \"BAD\"\n return token, scope", "def get_oauth_token(self, dev_cred):\n return self.request({\n \"method\": \"GET\",\n \"path\": \"/\" + UUID + \"/token/\" + str(dev_cred)\n })", "def getAuthObj(self):\n if self.accessToken is None:\n self.authenticate()\n\n return OAuth1(self.apiKey, client_secret = self.apiKeySecret,\n resource_owner_key = self.accessToken,\n resource_owner_secret = self.accessTokenSecret,\n signature_type = 'auth_header')", "def _get_access_token(self, url):\n if self.access_token:\n return self.access_token\n data = \"client_id=%s&client_secret=%s&grant_type=password&username=%s&password=%s&scope=write\" %\\\n (self.client_id, self.client_secret, self.username, self.password)\n\n parsed = urlparse(url)\n path = urlunparse(ParseResult(parsed.scheme, parsed.netloc, \"/oauth2/access_token\", None, None, None))\n\n auth_resp = urlopen(Request(path, data), timeout=10)\n if auth_resp.getcode() != 200:\n self.logger.error(\"Error with client credentials\")\n return self.access_token\n auth_resp_data = json.loads(auth_resp.read())\n\n if \"access_token\" in auth_resp_data:\n self.access_token = auth_resp_data[\"access_token\"]\n else:\n self.logger.error(\"Error with client credentials\")\n return self.access_token", "def fetch_oauth_access_token(consumer_token, request_token):\n url = get_oauth_access_token_url(consumer_token, request_token)\n request = urllib2.urlopen(url)\n token = _oauth_parse_response(request.read())\n request.close()\n return token", "def fetch_oauth_request_token(consumer_token):\n url = get_oauth_request_token_url(consumer_token)\n request = urllib2.urlopen(url)\n token = _oauth_parse_response(request.read())\n request.close()\n return token", "def get_access_token(self):\n logger.info('Try to get access token via OAuth')\n\n if self.user_login and not self.user_password:\n # Need user password\n pass\n\n if not self.user_login and self.user_password:\n # Need user login\n pass\n\n auth_session = requests.Session()\n\n login_form_response = auth_session.get(self.LOGIN_URL)\n\n login_form_action = re.findall(r'<form ?.* action=\"(.+)\"', login_form_response.text)\n if not login_form_action:\n raise VkAuthorizationError('vk.com changed login flow')\n\n # Login\n login_form_data = {\n 'email': self.user_login,\n 'pass': self.user_password,\n }\n\n response = auth_session.post(login_form_action[0], login_form_data)\n\n logger.info('Cookies %s', auth_session.cookies)\n logger.info('Login response url %s', response.url)\n\n if 'remixsid' in auth_session.cookies or 'remixsid6' in auth_session.cookies:\n pass\n elif 'sid=' in response.url:\n self.auth_captcha_is_needed(response.content, auth_session)\n elif 'act=authcheck' in response.url:\n self.auth_code_is_needed(response.content, auth_session)\n elif 'security_check' in response.url:\n self.phone_number_is_needed(response.content, auth_session)\n else:\n raise VkAuthorizationError('Authorization error (bad password)')\n\n # OAuth2\n oauth_data = {\n 'response_type': 'token',\n 'client_id': self.app_id,\n 'scope': self.scope,\n 'display': 'mobile',\n }\n response = auth_session.post('https://oauth.vk.com/authorize', oauth_data)\n logger.info('OAuth URL: %s %s', response.request.url, oauth_data)\n\n if 'access_token' not in response.url:\n form_action = re.findall(u'<form method=\"post\" action=\"(.+?)\">', response.text)\n if form_action:\n response = auth_session.get(form_action[0])\n else:\n try:\n json_data = response.json()\n except ValueError: # not json in response\n error_message = 'OAuth2 grant access error'\n else:\n error_message = 'VK error: [{0}] {1}'.format(\n json_data['error'],\n json_data['error_description']\n )\n auth_session.close()\n raise VkAuthorizationError(error_message)\n\n auth_session.close()\n\n parsed_url = urlparse(response.url)\n logger.info('Parsed URL: %s', parsed_url)\n\n token_dict = dict(parse_qsl(parsed_url.fragment))\n if 'access_token' in token_dict:\n self.access_token = token_dict['access_token']\n self.access_token_expires_in = token_dict['expires_in']\n else:\n raise VkAuthorizationError('OAuth2 authorization error')", "def get_oauth_token(\n self, token_url: str, *, client_secret: str, response_url: str, **kwargs\n ) -> dict:\n token = self.oauth.fetch_token(\n token_url,\n client_secret=client_secret,\n authorization_response=response_url,\n **kwargs,\n )\n return dict(token)", "def get_auth_token():\n \n form_fields = {\n \"client_id\": client_id,\n \"client_secret\":client_secret,\n \"code\": code,\n \"redirect_uri\": \"http://www.stackprinter.com\"\n }\n form_data = urllib.urlencode(form_fields)\n results = __gae_fetch(url = 'https://stackexchange.com/oauth/access_token',\n method = urlfetch.POST, \n payload = form_data,\n headers={'Content-Type': 'application/x-www-form-urlencoded'})\n response = results.content\n return response", "async def get_token(self):\n # TODO: turn this into a custom auth engine\n body = {\n \"applicationKey\": self.application_key,\n \"applicationSecret\": self.application_secret,\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"resourceOwnerId\": self.application_key,\n \"requestUId\": uuid.uuid4().hex,\n \"accept-language\": \"EN\",\n }\n\n auth_url = self.base_url / \"v1/oauth/token\"\n r = httpx.post(\n auth_url.url,\n json=body,\n headers=headers,\n # auth=(self.consumer_id, self.consumer_secret),\n cert=self.cert,\n )\n\n if r.status_code == 200:\n self.creds = SCBCredentialsResponse.parse_raw(r.content)\n return self.creds\n else:\n raise ConnectionError(r.json())", "def oauth2(self):\n from hubspot3.oauth2 import OAuth2Client\n\n return OAuth2Client(**self.auth, **self.options)", "def _GetAccessToken(self):\n\n # Encoding client authorization \n pair = \"{client_key}:{client_secret}\".format(client_key=self.client_key, client_secret=self.client_secret)\n authorization = 'MUthRmpVa1JUaVlxbDVUTElUYVFnOlRENmpYMTdGbmhPSzNodWdqWUZqVDU0YzVjWGNQeko3'\n\n # Getting the access token\n access_token_headers = { \"Authorization\": \"Basic {authorization}\".format(authorization=authorization) }\n request_endpoint = \"/oauth/token?grant_type=authorization_code&code={code}&redirect_uri=https://80a3bb863001.ngrok.io\".format(code=self.code)\n print(request_endpoint)\n self.conn.request(\"POST\", request_endpoint, headers=access_token_headers)\n res = self.conn.getresponse()\n response = json.loads(res.read().decode(\"utf-8\"))\n\n try:\n return response[\"access_token\"]\n except KeyError:\n print(\"Request for access token failed for the following reason: {reason}\".format(reason=response[\"reason\"]))", "def setup_oauth():\n # Request token\n oauth = OAuth1(CONSUMER_KEY, client_secret=CONSUMER_SECRET)\n r = requests.post(url=REQUEST_TOKEN_URL, auth=oauth)\n credentials = parse_qs(r.content)\n\n resource_owner_key = credentials.get('oauth_token')[0]\n resource_owner_secret = credentials.get('oauth_token_secret')[0]\n\n # Authorize\n authorize_url = AUTHORIZE_URL + resource_owner_key\n print 'Please go here and authorize: ' + authorize_url\n\n verifier = raw_input('Please input the verifier: ')\n oauth = OAuth1(CONSUMER_KEY,\n client_secret=CONSUMER_SECRET,\n resource_owner_key=resource_owner_key,\n resource_owner_secret=resource_owner_secret,\n verifier=verifier)\n\n # Finally, Obtain the Access Token\n r = requests.post(url=ACCESS_TOKEN_URL, auth=oauth)\n credentials = parse_qs(r.content)\n token = credentials.get('oauth_token')[0]\n secret = credentials.get('oauth_token_secret')[0]\n\n return token, secret", "def _request_token(self):\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret\n }\n\n response = self._http_request(\n method='POST',\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n full_url=self.auth_url,\n data=params\n )\n access_token = response.get('access_token')\n auth_header = {'Authorization': f'Bearer {access_token}'}\n return auth_header", "def get_access_token(request):\n user = request.user\n flow = _create_flow(request)\n\n flow.params['state'] = _build_state_value(request, user)\n credentials = StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').get()\n\n authorize_url = flow.step1_get_authorize_url()\n redirect_response_object = HttpResponseRedirect(authorize_url)\n if credentials is None or credentials.invalid:\n return redirect_response_object\n\n # Find out if credentials is expired\n refresh_failed = False\n if credentials.access_token is None or credentials.access_token_expired:\n try:\n credentials.refresh(httplib2.Http())\n except AccessTokenRefreshError:\n return redirect_response_object\n except Exception:\n refresh_failed = True\n\n port_value = _validate_port(request.GET.get('port'))\n if port_value is None:\n return HttpTextResponse('Access Token: %s' % (credentials.access_token,))\n\n # Send access token along to localhost client\n redirect_template_args = {'port': port_value}\n if refresh_failed:\n quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)\n redirect_template_args['error'] = quoted_error\n client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args\n else:\n quoted_access_token = urllib.quote(credentials.access_token)\n redirect_template_args['token'] = quoted_access_token\n client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args\n\n return HttpResponseRedirect(client_uri)", "def get_access_token(request):\n user = request.user\n flow = _create_flow(request)\n\n flow.params['state'] = _build_state_value(request, user)\n credentials = StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').get()\n\n authorize_url = flow.step1_get_authorize_url()\n redirect_response_object = HttpResponseRedirect(authorize_url)\n if credentials is None or credentials.invalid:\n return redirect_response_object\n\n # Find out if credentials is expired\n refresh_failed = False\n if credentials.access_token is None or credentials.access_token_expired:\n try:\n credentials.refresh(httplib2.Http())\n except AccessTokenRefreshError:\n return redirect_response_object\n except:\n refresh_failed = True\n\n port_value = _validate_port(request.GET.get('port'))\n if port_value is None:\n return HttpTextResponse('Access Token: %s' % (credentials.access_token,))\n\n # Send access token along to localhost client\n redirect_template_args = {'port': port_value}\n if refresh_failed:\n quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)\n redirect_template_args['error'] = quoted_error\n client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args\n else:\n quoted_access_token = urllib.quote(credentials.access_token)\n redirect_template_args['token'] = quoted_access_token\n client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args\n\n return HttpResponseRedirect(client_uri)", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def obtain_access_token():\n\tpost_data = {'grant_type': 'client_credentials',\n\t\t\t\t 'client_id': conos_config['client_id'],\n\t\t\t\t 'client_secret': conos_config['client_secret']}\n\n\ttry:\n\t\tresponse = requests.post(url=conos_config['sts_url'], data=post_data, timeout=60) # 60 seconds\n\t\tif response.ok:\n\t\t\treturn 'Bearer ' + response.json()['access_token']\n\t\telse:\n\t\t\tprint('\\nERROR: Can not obtain access token')\n\t\t\tprint('\\nResponse error: ', response.json())\n\t\t\tresponse.raise_for_status()\n\texcept requests.exceptions.RequestException as e:\n\t\t# All exceptions that Requests explicitly raises inherit from requests.exceptions.RequestException\n\t\tprint(\"Root cause: \", e)\n\t\tsys.exit(1)", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def get_token(*args, **kwargs):\n try:\n response = server.create_token_response()\n except (JWTError, JWTExpiredError) as e:\n # - in Authlib 0.11, create_token_response does not raise OAuth2Error\n # - fence.jwt.errors.JWTError: blacklisted refresh token\n # - JWTExpiredError (cdiserrors.AuthNError subclass): expired\n # refresh token\n # Returns code 400 per OAuth2 spec\n body = {\"error\": \"invalid_grant\", \"error_description\": e.message}\n response = flask.Response(\n json.dumps(body), mimetype=\"application/json\", status=400\n )\n return response", "def obtainAccessTokenBy3LeggedOAuth(self, auth_code):\r\n header = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'}\r\n url = self._config['OAUTH2ENDPOINT']['huddleAccessTokenServer']\r\n\r\n body = {\"grant_type\": \"authorization_code\",\r\n \"client_id\": self._config['OAUTH2']['clientID'],\r\n \"redirect_uri\": self._config['OAUTH2']['redirectUri'],\r\n \"code\": auth_code}\r\n\r\n return self._adapter.postRequest(url, header, parse.urlencode(body))", "def request_token(self, **kwargs):\n # type: (Any) -> Token\n token = self._request(\n self._client.fetch_token,\n self._token_endpoint,\n grant_type=self.GRANT_AUTHORIZATION_CODE,\n **kwargs\n )\n self.set_token(token)\n return token", "def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token", "def get_oauth():\n\n # initial app authorization request - not tied to specific user\n request_token, request_token_secret = goodreads.get_request_token(header_auth=True)\n\n # assign request tokens to session for future use\n session['request_token'] = request_token\n session['request_token_secret'] = request_token_secret\n\n # url takes user to Goodreads and presents them with option to authorize readerboard\n authorize_url = goodreads.get_authorize_url(request_token)\n\n # send user to goodreads\n return redirect(authorize_url)", "def fetch_token(auth_url: str) -> Tuple[str, OAuth2Session, str, str]:\n # make sure our URL ends with a /\n if auth_url[-1] != '/':\n auth_url += '/'\n\n _logger.info(f\"starting procedure with auth_url {auth_url}\")\n exists = get_current_metadata(auth_url)\n\n if exists:\n _logger.info(\"token exists, restoring\")\n token, token_endpoint, auth_endpoint, api_url, display_name, support_contact, profile_id, con_type, country_id, _, _ = exists\n oauth = OAuth2Session(client_id=CLIENT_ID, token=token, auto_refresh_url=token_endpoint)\n api_url, token_endpoint, auth_endpoint = get_info(auth_url)\n else:\n _logger.info(\"fetching token\")\n api_url, token_endpoint, auth_endpoint = get_info(auth_url)\n oauth = oauth2.run_challenge(token_endpoint, auth_endpoint, EDUVPN)\n\n try:\n oauth.refresh_token(token_url=token_endpoint)\n except InvalidGrantError as e:\n _logger.warning(f\"token invalid: {e}\")\n oauth = oauth2.run_challenge(token_endpoint, auth_endpoint, EDUVPN)\n\n return api_url, oauth, token_endpoint, auth_endpoint", "def acquire_oauth2_credentials():\n if os.path.isfile(\"%s/cre.json\" % file_path):\n f = open(\"%s/cre.json\" % file_path, \"r\")\n credentials = client.OAuth2Credentials.from_json(f.read())\n f.close()\n else: \n flow = client.flow_from_clientsecrets(\n \"%s/client_secrets.json\" % file_path,\n scope='https://www.googleapis.com/auth/analytics.readonly',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n auth_uri = flow.step1_get_authorize_url()\n webbrowser.open(auth_uri)\n auth_code = input('Enter the authentication code: ')\n credentials = flow.step2_exchange(auth_code)\n write_credentials(\"%s/cre.json\" % file_path, credentials)\n return credentials", "def get_access_token():\n if request.method == \"GET\":\n return render_template(\"index.html\")\n elif request.method == \"POST\":\n # Authenticate\n auth = Authorization()\n response = auth.post()\n return render_template(\"index.html\", data=response[0])", "def get_auth_token(self, request: Request, type=\"Bearer\") -> str:\n if \"Authorization\" not in request.headers:\n raise AuthenticationRequiredException\n try:\n auth_type, auth_code = request.headers[\"Authorization\"].split(' ')\n assert auth_type == type\n except Exception:\n raise AuthenticationSchemeInvalidException\n return auth_code", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def _get_auth_token(self):\n\n __logger__.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']", "def get_access_token(self, token_url):\n # type: (str) -> str\n\n payload = {\n \"grant_type\" : \"client_credentials\",\n \"client_id\" : self.client_id,\n \"client_secret\" : self.client_secret,\n \"scope\" : self.client_scope,\n }\n headers = {\n \"accept\" : \"application/json\",\n }\n resp = requests.post(f\"{self.base_url}/{token_url}\", data=payload, headers=headers)\n try:\n if (resp.ok):\n return resp.json().get('access_token')\n except (ValueError):\n self.__log.error (\"Error obtaining access token with credentials\")", "def get_access_token(self, path='/oauth/token', data={}):\n if data.keys():\n data.update(self.data)\n else:\n data = self.data.copy()\n data.update({\n 'grant_type': 'password',\n 'email': self.env.get('TESLA_EMAIL'),\n 'password': self.env.get('TESLA_PASSWORD')\n })\n try:\n req = requests.post(url='%s%s' % (self.url, path), data=data)\n # print(req.status_code)\n # print(req.content)\n self.token.update(req.json())\n except:\n raise 'invalid credentials'\n return self.token", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def oauth():\n code = request.args.get('code')\n if code:\n params = deepcopy(settings)\n url = \"{host}/oauth2/access_token/\".format(host=params.pop('host')) \n params['code'] = code\n params['client_id'] = params.pop('clientId')\n params['redirect_uri'] = params.pop('redirectURI')\n r = requests.post(url, data=params)\n if r.status_code == 500:\n f = open('error.html','w')\n f.write(r.content)\n f.close()\n if r.status_code == 200:\n data = json.loads(r.content)\n resp = make_response(render_template('oauth.html', settings=settings, access_token=data.get('access_token')))\n for k,v in data.items():\n resp.set_cookie(k, v)\n return resp\n access_token = request.cookies.get(\"access_token\")\n return render_template('oauth.html',settings=settings, access_token=access_token)", "def _request_access_token(self):\n resp = requests.get(self.TOKEN_URL_FORMAT.format(\n self.creds().consumer_key(), self.creds().app_secret())\n )\n status = resp.status_code\n\n # If the token request fails, try to use the configured app id\n # and secret. This probably won't work, but the docs say that it\n # should. for more info, see:\n # https://developers.facebook.com/docs/facebook-login/access-tokens\n token = \"%s|%s\" % (self.creds().consumer_key(),\n self.creds().app_secret())\n if status == 200:\n token = resp.text.split('access_token=')[1]\n else:\n self.logger.error(\n \"Facebook token request failed with status %d\" % status\n )\n return token", "def get_auth_token():\n auth_token_value = memcache.get('authtoken')\n if not auth_token_value:\n entity = Token.get_by_key_name(key_names = 'authtoken')\n if entity:\n auth_token_value= entity.value\n memcache.set('authtoken', auth_token_value)\n else:\n auth_token_value = None\n return auth_token_value", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n elif auth.split()[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n elif len(auth.split()) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be include type and token.'\n }, 401)\n elif len(auth.split()) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be Bearer token.'\n }, 401)\n else:\n token = auth.split()[1]\n return token", "def oauth(self) -> OAuth2Session:\n if not self._oauth:\n raise OAuth2NotSetError(OAuth2NotSetError.ERROR_MSG)\n\n return self._oauth", "def get_access_token():\n\n account = get_account()\n\n account.EnsureCredentials(dbus_interface=GOA_ACCOUNT)\n access_token, _ = account.GetAccessToken(dbus_interface=GOA_ACCOUNT_OAUTH2)\n return str(access_token)", "def getToken(self):\n \n data = '''\n {\n \"auth\": \n {\n \"username\" : \"%s\",\n \"password\" : \"%s\"\n }\n }\n ''' % (self.username, self.password)\n \n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'api.appnexus.com'\n }\n r = requests.post(self.auth_url, data=data, \n headers=headers)\n ac_data = r.json()\n \n if ac_data['response']['status'] != 'OK':\n self.stream_logger.error('Error while retrieving access token')\n self.stream_logger.error('Status code {0}'\\\n .format(ac_data['response']['status']))\n return False\n\n return ac_data['response']['token']", "def get_auth_token(self):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens,\n # and is created automatically when the authorization flow completes\n # for the first time.\n if os.path.exists(self.token_path):\n with open(self.token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n self.credentials_path, self.scopes)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(self.token_path, 'wb') as token:\n pickle.dump(creds, token)\n return creds", "def _getAuth(self):\r\n parameters = {\r\n 'service' : 'reader',\r\n 'Email' : self.username,\r\n 'Passwd' : self.password,\r\n 'accountType' : 'GOOGLE'}\r\n req = requests.post(ClientAuthMethod.CLIENT_URL, data=parameters)\r\n if req.status_code != 200:\r\n raise IOError(\"Error getting the Auth token, have you entered a\"\r\n \"correct username and password?\")\r\n data = req.text\r\n #Strip newline and non token text.\r\n token_dict = dict(x.split('=') for x in data.split('\\n') if x)\r\n return token_dict[\"Auth\"]", "def GetToken(cls, session, oauth2_cfg, code, tok_id=None):\n post_data = urlencode([\n ('code', code),\n ('client_id', oauth2_cfg['client_id']),\n ('client_secret', oauth2_cfg['client_secret']),\n ('redirect_uri', cls.RedirectURI(\n session.config, oauth2_cfg,\n session.ui.html_variables.get(\"http_host\"))),\n ('grant_type', 'authorization_code')])\n\n data = json.loads(cls.URLGet(\n session, oauth2_cfg['token_url'], data=post_data))\n\n tok_id = tok_id or ('%x' % time.time())\n session.config.oauth.tokens[tok_id] = {}\n tok_info = session.config.oauth.tokens[tok_id]\n tok_info.provider = oauth2_cfg._key\n tok_info.token_type = data['token_type']\n tok_info.access_token = data['access_token']\n tok_info.expires_at = int(time.time() + data['expires_in'])\n tok_info.refresh_token = data['refresh_token']\n if 'oauth' in session.config.sys.debug:\n session.ui.debug(\"Fetched OAuth2 token for %s\" % tok_id)\n\n return tok_id, tok_info", "def authenticate():\n with open(APP_KEYS_FILE) as f:\n app_keys = json.load(f)\n storage = Storage(USER_OAUTH_DATA_FILE)\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(\n OAuth2WebServerFlow(\n client_id=app_keys['APP_CLIENT_ID'],\n client_secret=app_keys['APP_CLIENT_SECRET'],\n scope=['https://www.googleapis.com/auth/reminders'],\n user_agent='google reminders cli tool'),\n storage,\n )\n auth_http = credentials.authorize(httplib2.Http())\n return auth_http", "def find_token_for_authorization(authorization):\n return None", "def get(code, redirect=False):\n if not code:\n raise Exception('get() requires code parameter')\n\n p = {\n 'client_id': c.client_id,\n 'client_secret': c.client_secret,\n 'grant_type': 'authorization_code',\n 'code': code\n }\n\n if redirect:\n p['redirect'] = redirect\n\n return r._post('/token/', p, '/oauth/v2', False)", "def get_token_auth_header():\n # Get authorization form request header\n auth = request.headers.get('Authorization', None)\n # Check if authorization header exists\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is MISSING!'\n }, abort(401))\n # If bearer token, then first part of string = 'bearer'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\"'\n }, abort(401))\n # Authorization header string length must be 2\n elif len(parts) != 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be a BEARER token'\n }, abort(401))\n\n token = parts[1]\n return token", "def get_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.signup_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.login_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "def get_access_token(self, renew=False):\n if self.access_token is None or renew:\n headers = {} # don't use json here, juse urlencode.\n url = self._url_for_op('token')\n data = urllib.urlencode({'grant_type': 'client_credentials',\n 'client_id':self.CLIENT_ID,\n 'client_secret':self.CLIENT_SECRET})\n req = urllib2.Request(url, data, headers)\n try:\n response = urllib2.urlopen(req).read()\n response = json.loads(response)\n except urllib2.HTTPError as e:\n raise ApiError(e.reason)\n except Exception, e:\n raise ApiError(e)\n self.access_token = response['access_token']\n return self.access_token", "def get_access_token(self) -> Optional[Text]:\n return self.access_token", "def unauthorized_token(self):\n scope = self.get_scope() or ''\n if scope:\n scope = '?scope=' + self.SCOPE_SEPARATOR.join(scope)\n return self.request(self.REQUEST_TOKEN_URL + scope,\n params=self.request_token_extra_arguments(),\n auth=self.oauth_auth()).content", "def do_implicit_flow_authorization(self, session):\n logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)\n auth_data = {\n 'client_id': self.app_id,\n 'display': 'mobile',\n 'response_type': 'token',\n 'scope': self.scope,\n 'redirect_uri': 'https://oauth.vk.com/blank.html',\n 'v': self.api_version\n }\n response = session.post(url=self.AUTHORIZE_URL,\n data=stringify_values(auth_data))\n url_query_params = parse_url_query_params(response.url)\n\n if 'expires_in' in url_query_params:\n logger.info('Token will be expired in %s sec.' %\n url_query_params['expires_in'])\n if 'access_token' in url_query_params:\n return url_query_params\n\n # Permissions are needed\n logger.info('Getting permissions')\n action_url = parse_form_action_url(response.text)\n logger.debug('Response form action: %s', action_url)\n\n if action_url:\n response = session.get(action_url)\n url_query_params = parse_url_query_params(response.url)\n return url_query_params\n try:\n response_json = response.json()\n except ValueError: # not JSON in response\n error_message = 'OAuth2 grant access error'\n logger.error(response.text)\n else:\n error_message = 'VK error: [{}] {}'.format(\n response_json['error'], response_json['error_description'])\n logger.error('Permissions obtained')\n raise VkAuthError(error_message)", "def get_authorization(self):\n auth = get_authorization_header()\n\n if not auth:\n return None\n\n auth_type, auth_info = auth\n\n if auth_type != b'basic':\n return None\n\n try:\n username, password = base64.b64decode(auth_info).split(b':', 1)\n except Exception:\n return None\n\n return Authorization(\"basic\", username=bytes_to_wsgi(username), password=bytes_to_wsgi(password))", "def authorize(self, oauth2_token):\r\n storage = file.Storage(oauth2_token)\r\n credentials = storage.get()\r\n http = credentials.authorize(httplib2.Http())\r\n self.service = discovery.build('youtube', 'v3', http=http)", "def get_token(self, code, redirect_uri):\n\n token_resp = post_to_remote(get_config('login.weibo.access_token_url') % (redirect_uri, code), {})\n if token_resp.get(\"error\") is not None:\n raise Exception(token_resp)\n\n return token_resp", "def initialize_oauth2_session(self):\n\n def token_updater(token):\n \"\"\"Stores oauth2 token on disk\"\"\"\n try:\n with open(self.OAUTH_TOKEN_PATH, 'w') as f:\n json.dump(token, f)\n except Exception as err:\n log.Error('Could not save the OAuth2 token to %s. This means '\n 'you may need to do the OAuth2 authorization '\n 'process again soon. Original error: %s' % (\n self.OAUTH_TOKEN_PATH, err))\n\n token = None\n try:\n with open(self.OAUTH_TOKEN_PATH) as f:\n token = json.load(f)\n except IOError as err:\n log.Notice('Could not load OAuth2 token. '\n 'Trying to create a new one. (original error: %s)' % err)\n\n self.http_client = OAuth2Session(\n self.CLIENT_ID,\n scope=self.OAUTH_SCOPE,\n redirect_uri=self.OAUTH_REDIRECT_URL,\n token=token,\n auto_refresh_kwargs={\n 'client_id': self.CLIENT_ID,\n 'client_secret': self.CLIENT_SECRET,\n },\n auto_refresh_url=self.OAUTH_TOKEN_URL,\n token_updater=token_updater)\n\n if token is not None:\n self.http_client.refresh_token(self.OAUTH_TOKEN_URL)\n\n endpoints_response = self.http_client.get(self.metadata_url +\n 'account/endpoint')\n if endpoints_response.status_code != requests.codes.ok:\n token = None\n\n if token is None:\n if not sys.stdout.isatty() or not sys.stdin.isatty():\n log.FatalError('The OAuth2 token could not be loaded from %s '\n 'and you are not running duplicity '\n 'interactively, so duplicity cannot possibly '\n 'access Amazon Drive.' % self.OAUTH_TOKEN_PATH)\n authorization_url, _ = self.http_client.authorization_url(\n self.OAUTH_AUTHORIZE_URL)\n\n print('')\n print('In order to allow duplicity to access Amazon Drive, please '\n 'open the following URL in a browser and copy the URL of the '\n 'page you see after authorization here:')\n print(authorization_url)\n print('')\n\n redirected_to = (raw_input('URL of the resulting page: ')\n .replace('http://', 'https://', 1)).strip()\n\n token = self.http_client.fetch_token(\n self.OAUTH_TOKEN_URL,\n client_secret=self.CLIENT_SECRET,\n authorization_response=redirected_to)\n\n endpoints_response = self.http_client.get(self.metadata_url +\n 'account/endpoint')\n endpoints_response.raise_for_status()\n token_updater(token)\n\n urls = endpoints_response.json()\n if 'metadataUrl' not in urls or 'contentUrl' not in urls:\n log.FatalError('Could not retrieve endpoint URLs for this account')\n self.metadata_url = urls['metadataUrl']\n self.content_url = urls['contentUrl']", "def obtain_bearer_token(host, path):\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n assert CLIENT_ID, \"Please supply your client_id.\"\n assert CLIENT_SECRET, \"Please supply your client_secret.\"\n data = urlencode({\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'grant_type': GRANT_TYPE,\n })\n headers = {\n 'content-type': 'application/x-www-form-urlencoded',\n }\n response = requests.request('POST', url, data=data, headers=headers)\n bearer_token = response.json()['access_token']\n return bearer_token", "def obtain_bearer_token(host, path):\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n assert CLIENT_ID, \"Please supply your client_id.\"\n assert CLIENT_SECRET, \"Please supply your client_secret.\"\n data = urlencode({\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'grant_type': GRANT_TYPE,\n })\n headers = {\n 'content-type': 'application/x-www-form-urlencoded',\n }\n response = requests.request('POST', url, data=data, headers=headers)\n bearer_token = response.json()['access_token']\n return bearer_token", "def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token", "def authorized():\n remote = oauth.remote_apps['SampleProv']\n try:\n resp = remote.authorized_response()\n app.logger.info(resp)\n\n if resp is None or resp.get('access_token') is None:\n return 'Access denied: reason=%s error=%s resp=%s' % (\n request.args['error'],\n request.args['error_description'],\n resp\n )\n except OAuthException:\n # If you get here, don't expect the error message to be helpful\n # Both AUTHLIB and FLASK_OAUTHCLIENT have less than stellar \n # debug logs and error messages.\n app.logger.info('401')\n abort(401)\n\n # Store the access token in the session.\n # This is bad because FLASK defaults to storing this in cookies.\n # If you do plan to try this, at least move the session storage\n # in flask to be serverside with a plugin.\n # I plan to provide a simple AuthLib example in the future.\n session['remote_oauth'] = (resp['access_token'], '')\n\n # Now that we have the token, try making a call to the Oauth provider API.\n resp = remote.get('me')\n\n if resp.status >= 200 and resp.status <= 299:\n return resp.data\n\n return resp", "def get_access_token(self):\n access_token = self._auth_provider._get_auth_value()\n return access_token", "def auth_token_api():\n data = request.get_json()\n if not data:\n response = jsonify({\n 'success': False,\n 'message': 'Missing request body'\n })\n response.status_code = 422\n return response\n\n # process argument\n login_type = data.get('auth_type')\n email = data.get('email').strip().lower()\n password = data.get('password')\n\n if not login_type or login_type not in ['email']:\n response = jsonify({\n 'success': False,\n 'message': 'Invalid auth_type'\n })\n response.status_code = 422\n return response\n\n # email authentication\n elif login_type == 'email':\n if not email:\n response = jsonify({\n 'success': False,\n 'message': 'Must provide email when auth_type is \"email\"'\n })\n response.status_code = 422\n return response\n user = db.session.query(User).filter(User.email == email, User.deleted == False).one_or_none()\n if not user:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid email'\n })\n response.status_code = 403\n return response\n # check the user's password\n password_valid = check_password_hash(user.password, password)\n if not password_valid:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid password'\n })\n response.status_code = 403\n return response\n\n token = generate_auth_token(user_id=user.user_id)\n response = jsonify({\n 'success': True,\n 'token': token\n })\n response.status_code == '200'\n return response", "def get_token(self, url):\n # A hack to avoid url-encoding the url, since the authorization service\n # doesn't work with correctly encoded urls\n\n parsed_url = urlparse.urlsplit(url)\n parsed_url = parsed_url._replace(path='/authorization/api')\n self.url = urlparse.urlunsplit(parsed_url)\n\n response = self.request(method='GET', url='/v1/token?url=' + url)\n return response.result.text", "def get_token(self, code, redirect_uri):\n\n token_resp = get_remote(get_config(\"login.qq.access_token_url\") % (redirect_uri, code))\n if token_resp.find('callback') == 0:\n error = json.loads(token_resp[10:-4])\n raise Exception(error)\n query = qs_dict(token_resp)\n return query[\"access_token\"]", "def request_access_token():\n\n # For Private application authentication, you must specifiy\n # grant_type=client_credentials and the service scope. For the \n # Content API, scope=contentapi\n post_data = {\"grant_type\": APP_CONFIG['GRANT_TYPE'],\n \"scope\": APP_CONFIG['SCOPE']}\n post_data_string = json.dumps(post_data)\n\n # Construct authentication string:\n # 1. Concatenate the client id, a colon character \":\", and the client secret into a single string\n # 2. URL encode the string from step 1\n # 3. Base64 encode the string from step 2\n authstr = to_native_string(\n b64encode(('%s:%s' % (APP_CONFIG['CLIENT_ID'], APP_CONFIG['CLIENT_SECRET'])).encode('utf-8'))).strip()\n\n # Construct an Authorization header with the value of 'Basic <base64 encoded auth string>'\n headers = {\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"Accept\": \"application/json\",\n \"Authorization\": \"Basic \" + authstr\n }\n\n r = s.post(APP_CONFIG['OAUTH_TOKEN_URL'], data=post_data_string, headers=headers, verify=(app.config['SSLVERIFY'] == 'True'))\n\n if r.status_code in (400,500):\n\n # Handle known error\n result = r.json() \n return jsonify(result)\n\n elif r.status_code == 200:\n\n result = r.json() \n access_token = result['access_token']\n token_type = result['token_type']\n timestamp = result.get('timestamp', None)\n expires_in = result.get('expires_in', None)\n token_expiry = None\n if expires_in is not None:\n token_expiry = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')\n token_expiry = token_expiry + datetime.timedelta(seconds=expires_in)\n token_expiry = token_expiry.isoformat()\n\n html = '<pre>';\n html += '<h3>Successfully retrieved access token!</h3>' \n html += '<pre>';\n html += 'access_token : ' + access_token\n html += '<pre>';\n html += 'token_type : ' + token_type\n html += '<pre>';\n html += 'expires_in (sec) : ' + str(expires_in)\n html += '<pre>';\n html += 'token_expiry : ' + token_expiry\n html += '<pre>';\n html += 'timestamp : ' + timestamp\n\n html += '<pre>';\n html += '<h3>Query Content API with Access Token</h3>'\n html += '<pre>';\n html += '<a href=\"/query-collection-myhuman?access_token='+access_token+'\">Query Collection: myhuman</a>'\n\n return html\n\n else:\n # Handle unknown error\n return (r.text, r.status_code, r.headers.items())", "def create_oauth2_access_token(self):\n if not isinstance(self.session, DropboxSession):\n raise ValueError(\"This call requires a DropboxClient that is configured with an \"\n \"OAuth 1 access token.\")\n url, params, headers = self.request(\"/oauth2/token_from_oauth1\", method='POST')\n\n r = self.rest_client.POST(url, params, headers)\n return r['access_token']", "def token(self):\n if not self._token or self._expires <= datetime.now():\n self._request_token()\n return self._token", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n\n elif len(parts) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, 401)\n\n elif len(parts) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, 401)\n\n token = parts[1]\n return token", "def get_token(self):\n oauth_provider = UserSocialAuth.objects.get(provider='drchrono')\n access_token = oauth_provider.extra_data['access_token']\n return access_token", "def get_access_token(self, request) -> str or Exception:\n pass", "def get_token(self, token_url, secret, secret_id, authorization_response, redirect_uri, scope, code,**kwargs):\n print(secret, authorization_response)\n if not validators.url(token_url) or not token_url.lower().startswith('https://'):\n logger.warning('')\n raise Exception\n # body = (code=secret, body='', redirect_uri=redirect_uri, **kwargs)\n\n auth = requests.auth.HTTPBasicAuth(kwargs.get('client_id'), kwargs.get('secret'))\n headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded;charser=UTF-8'}\n # body = self._client.prepare_request_body(code=secret,\n # redirect_uri=self.redirect_uri, **kwargs)\n # r = self.post(token_url, headers=headers, auth=auth)\n print(token_url)\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': secret_id,\n 'client_secret': secret,\n 'code': code\n }\n\n print('---->requests get')\n # r = requests.get(token_url, params=params)\n r = requests.post(token_url, params=params)\n print('Rrrrr')\n print(r.json())\n print(r.json()['access_token'])\n # if r.json().get('access_token'):\n # self.check_user_token(r.json()['access_token'])\n n = requests.get(\n # f'https://graph.facebook.com/me?fields={# scope}')\n f'https://graph.facebook.com/me/accounts')\n\n info = n.json()\n print(info)\n # r = requests.get(token_url, client_id=secret_id, redirect_uri=redirect_uri, client_secret=secret, scope=scope)\n\n # self.token = r.text, scope = self.scope\n token = {}\n return token", "def get_access_token(self):\n signed_jwt = self.generate_jwt(os.path.join(FILE_DIR, KEYFILE))\n if signed_jwt is None:\n return False\n url = HOMEGRAPH_TOKEN_URL\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n data = 'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&assertion=' + signed_jwt.decode(\n 'utf-8')\n\n r = requests.post(url, headers=headers, data=data)\n\n if r.status_code == requests.codes.ok:\n token_data = json.loads(r.text)\n self._access_token = token_data['access_token']\n return token_data['access_token']\n\n r.raise_for_status()\n return", "def get_token():\n\theaders = {\n\t\t'Authorization': 'Basic ' + (base64.b64encode((client_id + ':' + client_secret).encode(\"utf-8\"))).decode(\"utf-8\")}\n\toptions = {\n\t\t'grant_type': 'client_credentials',\n\t\t'json': True,\n\t}\n\n\tresponse = requests.post(\n\t\t'https://accounts.spotify.com/api/token',\n\t\theaders=headers,\n\t\tdata=options\n\t)\n\tif response.status_code == 200:\n\t\tcontent = json.loads(response.content.decode('utf-8'))\n\t\taccess_token = content.get('access_token', None)\n\t\treturn access_token\n\telse:\n\t\treturn None", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token", "async def async_get_access_token(self):\n if not self._oauth_session.valid_token:\n await self._oauth_session.async_ensure_token_valid()\n\n return self._oauth_session.token[\"access_token\"]", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token" ]
[ "0.6736975", "0.672694", "0.6723241", "0.667107", "0.66542256", "0.66330796", "0.6596639", "0.6589812", "0.6496207", "0.6487768", "0.64865583", "0.6483437", "0.64788604", "0.6448414", "0.6448318", "0.6448318", "0.6439682", "0.641158", "0.6389883", "0.63766307", "0.63674885", "0.6361495", "0.6353662", "0.6353378", "0.6350929", "0.62786317", "0.6269762", "0.6263243", "0.6230019", "0.6205442", "0.6192042", "0.6179691", "0.61678755", "0.6164009", "0.6151911", "0.6150027", "0.61443913", "0.6143606", "0.61420023", "0.6136813", "0.61324614", "0.6129557", "0.611889", "0.6117455", "0.6106124", "0.609451", "0.6089005", "0.6062808", "0.6062474", "0.6058998", "0.60552776", "0.60543895", "0.6049572", "0.6048229", "0.6047394", "0.6044459", "0.6044459", "0.6022941", "0.6019411", "0.60186875", "0.6015627", "0.60103613", "0.601022", "0.59961337", "0.5989122", "0.59880453", "0.5976261", "0.59761834", "0.5975691", "0.5969468", "0.5961777", "0.5944313", "0.5932821", "0.5932065", "0.5924172", "0.59068733", "0.59028256", "0.5901159", "0.59002525", "0.5897616", "0.58937943", "0.58937943", "0.5892468", "0.58783054", "0.58767337", "0.5876143", "0.5872771", "0.5872079", "0.58673424", "0.5863767", "0.58619577", "0.58593595", "0.58582246", "0.5856107", "0.5853987", "0.5851362", "0.5849968", "0.5846787", "0.5834656", "0.583449" ]
0.64407897
16
Returns a new valid token or None, in case of error.
def refresh_token(refresh_token): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_token(self):\n if not self.is_valid():\n logger.warn(\"TokenWall form data is not valid.\")\n return None\n \n tt = self.cleaned_data['token']\n logger.debug(\"Looking for token '%s'\"%tt)\n return Token.objects.get(value=tt)", "def token(self) -> Token:\n return getattr(self, \"tok\", None)", "def get_random_token(self):\n token = self.token_list[self.token_ptr]\n self.token_ptr -= 1\n if self.token_ptr == -1:\n self.reset_token_list()\n return token", "def _get_token(self):\n # Skip initial whitespace.\n pos = self._skip_whitespace()\n\n # Find the token here, if there's one.\n token = None\n\n for (token_type, regex) in TOKEN_REGEXEN:\n re_match = regex.match(self.body, pos)\n if re_match:\n token_content = next(g for g in re_match.groups() if g is not None)\n token = Token(token_type, token_content, re_match.end())\n break\n\n return token", "async def validate_token(self, token):", "def next_token(self) -> T.Optional[Token]:\n if self.has_finished():\n return None\n token_type = None\n token_chars = []\n if is_number_char(self.current):\n token_type = \"N\"\n while not self.has_finished() and is_number_char(self.current):\n token_chars.append(self.consume())\n elif is_char_token(self.current):\n if self.current in [\"(\", \")\"]:\n token_type = self.current\n elif self.current in [\"+\", \"-\"]:\n token_type = \"S\"\n elif self.current in [\"*\", \"/\"]:\n token_type = \"M\"\n else:\n raise ExprSyntaxError\n token_chars.append(self.consume())\n elif self.current.isspace():\n self.consume()\n return self.next_token()\n else:\n raise UnexpectedChar\n return Token(token_type, \"\".join(token_chars))", "def _parse_security_token(token):\n if not token:\n return None\n if ':' not in token:\n logging.warn('Malformed token: no signature separator')\n return None\n sig, body = token.split(':', 1)\n if _DISABLE_CRYPTO:\n plaintext = body\n else:\n key_storage = KeyStorage.get()\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n # Crypto requires byte strings\n hmac_key = hmac_key.encode('utf8')\n computed_sig = HMAC.HMAC(key=hmac_key,\n msg=body).hexdigest()\n if sig != computed_sig:\n logging.warn('Malformed token: invalid signature')\n return None\n try:\n plaintext = AES.new(key_storage.aes_key,\n AES.MODE_CBC).decrypt(body)\n except ValueError:\n logging.warn('Malformed token: wrong size')\n return None\n # Remove excess whitespace.\n plaintext = plaintext.strip()\n # The plaintext should contain at least one space.\n if ' ' not in plaintext:\n logging.warn('Malformed token: bad contents')\n return None\n parts = plaintext.split(' ')\n if len(parts) != 2:\n logging.warn('Malformed token: bad structure')\n return None\n timestamp, email = parts\n try:\n timestamp = int(timestamp, 16)\n except ValueError:\n logging.warn('Malformed token: bad timestamp')\n return None\n # Reject tokens that are too old or which have time-traveled. We\n # allow for 1s of clock skew.\n age_s = time.time() - timestamp\n if age_s < -1 or age_s > _TOKEN_TIMEOUT_S:\n logging.warn('Malformed token: expired (age=%ds)', age_s)\n return None\n cred = _Credentials()\n cred.email = email\n cred.security_token_is_stale = (age_s > 0.5 * _TOKEN_TIMEOUT_S)\n return cred", "def get_token(self, name):\n if self.kv.get(name):\n return self.kv.get(name)\n token = self.random_string(24)\n self.kv.set(name, token)\n return token", "async def get_token(self, *args, **kwargs) -> Optional[OAuth2Token]:\n token_record = ...\n\n if token_record is not None:\n return OAuth2Token(\n access_token=token_record.access_token,\n refresh_token=token_record.refresh_token,\n scope=token_record.scope,\n issued_at=token_record.issued_at,\n expires_in=token_record.expires_in,\n client_id=token_record.client_id,\n token_type=token_record.token_type,\n revoked=token_record.revoked,\n )", "def token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token\")", "def tokenfunc_():\r\n if len( lexer.pendingtokens ):\r\n return lexer.pendingtokens.pop(0)\r\n\r\n tok = lexer.token()\r\n\r\n if len( lexer.pendingtokens ) and ( tok and tok.type != 'EOL'):\r\n pending = lexer.pendingtokens.pop(0)\r\n lexer.pendingtokens.append(tok)\r\n return pending\r\n\r\n return tok", "def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "def test_unused_token_is_valid(self):\n assert self.token.is_valid()", "def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken", "def _get_token(self):\n self._skip()\n\n token = None\n # Checks single-quoted string.\n if self.current_char == \"'\":\n start_position = self.current_position\n while not (self.current_char != \"\\\\\" and self._peek() == \"'\"):\n self._next_char()\n if self.EOF:\n raise LexerError(\n start_position, f\"EOL while scanning string literal at position {start_position}\")\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.STRING, self.stream[start_position:self.current_position + 1])\n\n # Checks double-quoted string.\n elif self.current_char == '\"':\n start_position = self.current_position\n while not (self.current_char != \"\\\\\" and self._peek() == '\"'):\n self._next_char()\n if self.EOF:\n raise LexerError(\n start_position, f\"EOL while scanning string literal at position {start_position}\")\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.STRING, self.stream[start_position:self.current_position + 1])\n\n # Checks number begins with a digit.\n elif self.current_char.isdigit():\n start_position = self.current_position\n while self._peek().isdigit():\n self._next_char()\n if self._peek() == \".\":\n self._next_char()\n while self._peek().isdigit():\n self._next_char()\n if self._peek() in [\"d\", \"D\", \"f\", \"F\"]:\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.NUMBER, self.stream[start_position:self.current_position + 1])\n\n # Checks number begins with a dot.\n elif self.current_char == \".\":\n if self._peek().isdigit():\n start_position = self.current_position\n while self._peek().isdigit():\n self._next_char()\n if self._peek() in [\"d\", \"D\", \"f\", \"F\"]:\n self._next_char()\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.NUMBER, self.stream[start_position:self.current_position + 1])\n else:\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Separators(self.current_char).name, self.current_char)\n\n # Checks word begins with an alphabetic letter or an underscore.\n elif self.current_char.isalpha() or self.current_char == \"_\":\n start_position = self.current_position\n while True:\n if (self._peek() in [\" \", \"\\t\", \"\\r\", \"\\n\", \"\\0\"]\n or self._peek() in _token_names.SEPARATORS\n or self._peek() in _token_names.OPERATORS):\n break\n self._next_char()\n word = self.stream[start_position:self.current_position + 1]\n # Checks if word is a keyword.\n if word in _token_names.Keywords.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.Keywords(word).name, word)\n elif word in _token_names.KeywordsType.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.KeywordsType(word).name, word)\n elif word in _token_names.KeywordsAttribute.values():\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.KeywordsAttribute(word).name, word)\n # Otherwise put it as identifier.\n else:\n token = Token(self.line_number, self.line_start_position, start_position, self.current_position,\n _token_names.IDENTIFIER, word)\n\n # Checks if is a separator.\n elif self.current_char in _token_names.Separators.values():\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Separators(self.current_char).name, self.current_char)\n\n # Checks if is an operator.\n elif self.current_char in _token_names.Operators.values():\n last_position = self.current_position\n if self.current_char not in [\"&\", \"|\"] and self._peek() == \"=\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"+\" and self._peek() == \"+\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"-\" and self._peek() == \"-\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"&\" and self._peek() == \"&\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n elif self.current_char == \"|\" and self._peek() == \"|\":\n val = self.current_char + self._peek()\n self._next_char()\n token = Token(self.line_number, self.line_start_position, last_position, self.current_position,\n _token_names.Operators(val).name, val)\n else:\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.Operators(self.current_char).name, self.current_char)\n\n # Checks if is EOF\n elif self.current_char == \"\\0\":\n token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,\n _token_names.EOF, self.current_char)\n\n # Raise error if is an unknown token.\n else:\n raise LexerError(self.current_position)\n\n self._next_char()\n return token", "def get_new_token(self):\n # Save result of this API call into self instance __token\n self.__token = apidnac.ApiDNAC.api_get_token()\n # Save result to the defined parameter (\"token\") in file cache_config\n self.save_param('token', self.__token)\n # Return self instance __token\n return self.__token", "def token(self, id):\r\n return Token(self, id)", "def token(uncapped_token: Contract):\n return uncapped_token", "def token(cls, token):\n user_db = User.get_by('token', token)\n if not user_db:\n raise ValueError('Sorry, your token is either invalid or expired.')\n return token", "def retrieve_token():\n try:\n deserialized_message = json.loads(peek_app_token())\n\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n if expires_at and check_expired_time(expires_at):\n return deserialized_message.get('token')\n else: # Token expired, refresh it\n refresh_token()\n\n deserialized_message = peek_app_token()\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n try:\n assert(expires_at and check_expired_time(expires_at))\n return deserialized_message.get('token')\n except:\n raise # When all else fails\n\n except Exception as exc:\n log.error(f'Could not refresh token.\\n{exc}')\n traceback.print_exc(file=sys.stderr)\n\n return None", "def generate_new_token(cls):\n token = proquint.generate()\n\n # Try 100 times to generate a unique token.\n TRIALS = 100\n for __ in range(TRIALS):\n token = proquint.generate()\n if SecretToken.exists(token):\n continue\n break\n # after TRIALS attempts and we didn't get a unique token,\n # just raise an error.\n # See https://stackoverflow.com/a/9980160 on what for-else loop does.\n else:\n raise ValueError(\"Cannot generate new token\")\n\n # We found a unique token! Save it\n return token", "def first_token(self):\n if self.tokens:\n return self.tokens[0]\n return \"None\"", "def create_token(self, token_id, data):\n raise exception.NotImplemented() # pragma: no cover", "def get_token(cls, user, full_result=False):\n if user is None:\n return EMPTY_KNOX_TOKEN\n result = AuthToken.objects.create(user=user)\n return result if full_result else result[1]", "def token(self):\n if not self._token or self._expires <= datetime.now():\n self._request_token()\n return self._token", "def get_token(self, tid):\n if self.lliagraph:\n return self.lliagraph.get_token(tid)\n else:\n return None", "def Token(self) -> Token:\r\n\t\treturn self._token", "def new_token(*args, **kwargs):\n return uuid.uuid4().hex", "def token(uncapped_token):\n return uncapped_token", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def get_token(self):\n token_model = TokenModel.find_by_user_id(self.id)\n return token_model.token if token_model else None", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "def access_valid_token(token_code):\n token_code = remove_unicode(Bytes.for_string_or_unicode(token_code).as_encoded_str())\n\n prefix = token_code[:TOKEN_NAME_PREFIX_LENGTH]\n if len(prefix) != TOKEN_NAME_PREFIX_LENGTH:\n return None\n\n suffix = token_code[TOKEN_NAME_PREFIX_LENGTH:]\n\n # Lookup the token by its prefix.\n try:\n token = (\n AppSpecificAuthToken.select(AppSpecificAuthToken, User)\n .join(User)\n .where(\n AppSpecificAuthToken.token_name == prefix,\n (\n (AppSpecificAuthToken.expiration > datetime.now())\n | (AppSpecificAuthToken.expiration >> None)\n ),\n )\n .get()\n )\n\n if not token.token_secret.matches(suffix):\n return None\n\n assert len(prefix) == TOKEN_NAME_PREFIX_LENGTH\n assert len(suffix) >= MINIMUM_TOKEN_SUFFIX_LENGTH\n update_last_accessed(token)\n return token\n except AppSpecificAuthToken.DoesNotExist:\n pass\n\n return None", "def get_token(self):\n logging.debug(\"In the Token get_token() class method.\")\n\n if datetime.datetime.now() > self.token_expiry:\n logging.info(\"Token Expired.\")\n self.generate_tokens()\n return self.access_token", "def create_token(self):\n token = Token(PAREN.get(self.current_char), \"brace\")\n self.current_char = self.source.read(1)\n\n return token", "def auth0_token():\n redis_conn = token_redis_connection()\n token = redis_conn.get('auth0_token')\n token_valid = check_if_token_is_valid(token)\n if token is None or not token_valid:\n try:\n token = get_fresh_auth0_management_token()\n except (ValueError, requests.HTTPError) as e:\n logger.error('Failed to retrieve Auth0 token: %r', e)\n return\n redis_conn.set('auth0_token', token)\n return token", "def load_token(self):\n token = None\n\n if config.outlook_token:\n token = self.token_constructor(config.outlook_token)\n\n return token", "def _handle_token(self, token: str) -> Optional[str]:\n return token or self._token_handler.token", "def decode_token(self, token: str, max_age: int) -> Optional[object]:\n try:\n return self.serializer.loads(token, max_age)\n except (BadSignature, SignatureExpired) as e:\n return None", "def get_next_token(self):\n while self.current_char is not None:\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n if self.current_char.isdigit():\n return Tokenizer(INTEGER, self.integer())\n if self.current_char == '+':\n self.advance()\n return Tokenizer(Token.PLUS, '+')\n if self.current_char == '-':\n self.advance()\n return Tokenizer(Token.MINUS, '-')\n if self.current_char == '*':\n self.advance()\n return Tokenizer(Token.MULTIPLICATION, '*')\n if self.current_char == '/':\n self.advance()\n return Tokenizer(Token.DIVISION, '/')\n\n self.error()\n return Tokenizer(EOF, None)", "def __get_new_token(self):\n keystone = {\n 'username': self.username,\n 'password': self.password,\n 'project_name': self.project,\n 'auth_url': self.auth_uri\n }\n\n ks_client = ksclient.KSClient(**keystone)\n convert_time = ciso8601.parse_datetime(str(ks_client._keystone.auth_ref.expires))\n token_exp = time.mktime(convert_time.timetuple())\n #tmp_str = str(convert_time).split('.')\n #token_exp = time.mktime(time.strptime(tmp_str[0], '%Y-%m-%d %H:%M:%S'))\n factor = self.__correct_token_time()\n\n print (\"Get new Token: {}\".format(ks_client.token))\n print (\"Expiration time in UTC: {}\".format(ks_client._keystone.auth_ref.expires))\n print (\"Expiration time in seconds since beginning of time: {}\".format(token_exp))\n print (\"The FACTOR: {}\".format(factor))\n return ks_client.token, (token_exp + factor)", "def test_live_thread_token_is_valid(self):\n assert self.token.is_valid()", "def token(self) -> Optional[str]:\n return self._builder._token", "def get_token(id=None, name=None):\n\tif id is None and name is None:\n\t\tname = config['username']\n\treturn get_user(id=id, name=name, get_missing=False).token", "def _handle_token(self, token: str) -> Optional[str]:\n return token", "def get_token():\n if os.path.exists(AUTH_TOKEN_PATH):\n with open(str(AUTH_TOKEN_PATH), 'r') as TokenObj:\n try:\n data = TokenObj.read()\n except (OSError, IOError) as e:\n echo(e)\n data = json.loads(data)\n token = data[\"token\"]\n return token\n else:\n echo(\"\\nYour token file doesn't exists.\")\n echo(\"\\nIt should be present at ~/.evalai/token.json\\n\")\n return None", "def validate_token(self, token):\n\n try:\n if not token:\n raise AuthException(\"Needed a token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n # try to get from cache first\n now = time()\n token_info = self.token_cache.get(token)\n if token_info and token_info[\"expires\"] < now:\n # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del\n self.token_cache.pop(token, None)\n token_info = None\n\n # get from database if not in cache\n if not token_info:\n token_info = self.db.get_one(\"tokens\", {\"_id\": token})\n if token_info[\"expires\"] < now:\n raise AuthException(\"Expired Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n return token_info\n\n except DbException as e:\n if e.http_code == HTTPStatus.NOT_FOUND:\n raise AuthException(\"Invalid Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n else:\n raise\n except AuthException:\n if self.config[\"global\"].get(\"test.user_not_authorized\"):\n return {\"id\": \"fake-token-id-for-test\",\n \"project_id\": self.config[\"global\"].get(\"test.project_not_authorized\", \"admin\"),\n \"username\": self.config[\"global\"][\"test.user_not_authorized\"], \"admin\": True}\n else:\n raise\n except Exception:\n self.logger.exception(\"Error during token validation using internal backend\")\n raise AuthException(\"Error during token validation using internal backend\",\n http_code=HTTPStatus.UNAUTHORIZED)", "def getCachedToken( self ):\n if ( os.path.exists( TOKEN_PATH )):\n return open( TOKEN_PATH ).read()\n else :\n return None", "def _get_token(self, regex=None):\n\n item = self._lexer.get_token()\n\n if not item:\n raise ParseError(u'Unexpected end of file')\n else:\n line_no, token = item\n if regex and not regex.match(token):\n pattern = u\"Unexpected format in token '{0}' on line {1}\"\n token_val = common.from_utf8(token.strip())\n raise ParseError(pattern.format(token_val, line_no))\n\n return token", "def get_token(self, session, **kwargs):\n return None", "def getToken(self):\n query = \"SELECT token FROM token WHERE id = 1\"\n res = self.db.execute(query).fetchone()\n if res:\n return res[0]\n return False", "def token(self, pos: int, goal: \"LexerCore.Goal\" = InputElementRegExp) -> Optional[Token]:\n newlines = []\n while 1:\n pos, nls = self.process_skippable(pos)\n newlines.extend(nls)\n\n # Check for common tokens (IdentifierName, Punctuator, NumericLiteral, StringLiteral, Template)\n\n # Identifier\n ident = self.identifiername_match.match(self.src, pos=pos)\n if ident:\n span = ident.span()\n identifier_src = self.src[span[0] : span[1]]\n sv = identifier_name_string_value(identifier_src, syntax_error_ctor=self.syntax_error_ctor)\n id_token = Token(type=\"IDENTIFIER\", src=self.src, value=sv, span=Span(*span), newlines=newlines)\n # Check Early Errors (section 11.6.1.1)\n identifier_name_early_errors(id_token, syntax_error_ctor=self.syntax_error_ctor)\n return id_token\n\n # NumericLiteral\n intconvert = lambda base: lambda span: int(self.src[span[0] + 2 : span[1]], base)\n for matcher, converter in (\n (self.binaryintegerliteral_match, intconvert(2)),\n (self.octalintegerliteral_match, intconvert(8)),\n (self.hexintegerliteral_match, intconvert(16)),\n (self.decimalliteral_match, lambda span: float(self.src[span[0] : span[1]])),\n ):\n nl = matcher.match(self.src, pos=pos)\n if nl:\n span = nl.span()\n return Token(\n type=\"NUMERIC\", src=self.src, value=converter(span), span=Span(*span), newlines=newlines\n )\n\n # Punctuator\n punct = self.punctuator_match.match(self.src, pos=pos)\n if punct:\n span = punct.span()\n return Token(\n type=punct.group(0), value=punct.group(0), src=self.src, span=Span(*span), newlines=newlines\n )\n\n # StringLiteral\n for matcher in (self.doublestringliteral_match, self.singlestringliteral_match):\n sl = matcher.match(self.src, pos=pos)\n if sl:\n span = sl.span()\n return Token(\n type=\"STRING\",\n src=self.src,\n value=self._string_value(self.src[span[0] : span[1]]),\n span=Span(*span),\n newlines=newlines,\n )\n\n # DivPunctuator is available for the InputElementDiv and InputElementTemplateTail goals.\n if goal in (self.InputElementDiv, self.InputElementTemplateTail):\n dp = self.divpunctuator_match.match(self.src, pos=pos)\n if dp:\n span = dp.span()\n return Token(\n type=dp.group(0), value=dp.group(0), src=self.src, span=Span(*span), newlines=newlines\n )\n\n # RightBracePunctuator is available for InputElementDiv or InputElementRegExp\n if goal in (self.InputElementDiv, self.InputElementRegExp):\n dbp = self.rightbracepunctuator_match.match(self.src, pos=pos)\n if dbp:\n span = dbp.span()\n return Token(\n type=dbp.group(0), value=dbp.group(0), src=self.src, span=Span(*span), newlines=newlines\n )\n\n # Regular Expressions available only with InputElementRegExp and InputElementRegExpOrTemplateTail\n if goal in (self.InputElementRegExp, self.InputElementRegExpOrTemplateTail):\n regex_literal = self.regularexpressionliteral_match.match(self.src, pos=pos)\n if regex_literal:\n span = regex_literal.span()\n return Token(\n type=\"REGEXP\",\n value=RegExp(\n utf_16_encode(regex_literal.group(\"body\")), utf_16_encode(regex_literal.group(\"flags\"))\n ),\n src=self.src,\n span=Span(*span),\n newlines=newlines,\n )\n\n # All productions get NoSubstitutionTemplate and TemplateHead\n # But only the \"TemplateTail\" goals get TemplateMiddle or TemplateTail\n for valid_goals, matcher, tokentype in (\n (\n (\n self.InputElementDiv,\n self.InputElementRegExp,\n self.InputElementRegExpOrTemplateTail,\n self.InputElementTemplateTail,\n ),\n self.nosubstitutiontemplate_match,\n \"NOSUBSTITUTIONTEMPLATE\",\n ),\n (\n (\n self.InputElementDiv,\n self.InputElementRegExp,\n self.InputElementRegExpOrTemplateTail,\n self.InputElementTemplateTail,\n ),\n self.templatehead_match,\n \"TEMPLATEHEAD\",\n ),\n (\n (self.InputElementRegExpOrTemplateTail, self.InputElementTemplateTail),\n self.templatemiddle_match,\n \"TEMPLATEMIDDLE\",\n ),\n (\n (self.InputElementRegExpOrTemplateTail, self.InputElementTemplateTail),\n self.templatetail_match,\n \"TEMPLATETAIL\",\n ),\n ):\n if goal in valid_goals:\n tmpl = matcher.match(self.src, pos=pos)\n if tmpl:\n span = tmpl.span()\n return Token(\n type=tokentype,\n value=Template(\n tv=self._TemplateValue(tmpl.group(\"tchars\")),\n trv=self._TemplateRawValue(tmpl.group(\"tchars\")),\n ),\n src=self.src,\n span=Span(*span),\n newlines=newlines,\n )\n\n # The end. If we still have input and we haven't returned, then this is an unrecognized token.\n # You might think this means we should raise a syntax error, but because there are alternate\n # lexical goals that turns out to be a really bad idea.\n return None", "def find_token_for_authorization(authorization):\n return None", "def peek_for_token(self, ch, check_tok, yes_tok, no_tok):\n if self.peek_char() == check_tok:\n first = ch\n self.read_char()\n literal = first + self.char\n return Token(yes_tok, first + self.char)\n else:\n return Token(no_tok, ch)", "def get_first_token(node: ast.AST) -> Token:\n return node.first_token # type: ignore", "def returnToken(self, token):\n if self.hide_token:\n return None\n else:\n return token", "def get_next_token(self):\n while self.current_char is not None:\n\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n\n if self.current_char.isalpha():\n return self._id()\n\n if self.current_char.isdigit():\n return Token(INTEGER, self.integer())\n\n if self.current_char == '=':\n self.advance()\n return Token(ASSIGN, '=')\n\n if self.current_char == ';':\n self.advance()\n return Token(SEMI, ';')\n\n if self.current_char == '+':\n self.advance()\n return Token(PLUS, '+')\n\n if self.current_char == '-':\n self.advance()\n return Token(MINUS, '-')\n\n if self.current_char == '*':\n self.advance()\n return Token(MUL, '*')\n\n if self.current_char == '(':\n self.advance()\n return Token(OPEN_BRACE, '(')\n\n if self.current_char == ')':\n self.advance()\n return Token(CLOSE_BRACE, ')')\n\n self.error()\n\n return Token(EOF, None)", "def get_becode_token(self) -> Union[str, None]:\n\n # Set the token to False if the string is not valid\n if len(self.becode_token) < 200:\n return None\n\n return self.becode_token", "def validate_token(self, token):\n from expfactory.database.models import Participant\n\n p = Participant.query.filter(Participant.token == token).first()\n if p is not None:\n if p.token.endswith((\"finished\", \"revoked\")):\n p = None\n else:\n p = p.id\n return p", "def create_token(self, role=None, token_id=None, policies=None, meta=None,\n no_parent=False, lease=None, display_name=None,\n num_uses=None, no_default_policy=False,\n ttl=None, orphan=False, wrap_ttl=None, renewable=None,\n explicit_max_ttl=None):\n params = {\n 'id': token_id,\n 'policies': policies,\n 'meta': meta,\n 'no_parent': no_parent,\n 'display_name': display_name,\n 'num_uses': num_uses,\n 'no_default_policy': no_default_policy,\n 'renewable': renewable\n }\n\n if lease:\n params['lease'] = lease\n else:\n params['ttl'] = ttl\n params['explicit_max_ttl'] = explicit_max_ttl\n\n if explicit_max_ttl:\n params['explicit_max_ttl'] = explicit_max_ttl\n\n if orphan:\n return self._post('/v1/auth/token/create-orphan', json=params, wrap_ttl=wrap_ttl).json()\n elif role:\n return self._post('/v1/auth/token/create/{0}'.format(role), json=params, wrap_ttl=wrap_ttl).json()\n else:\n return self._post('/v1/auth/token/create', json=params, wrap_ttl=wrap_ttl).json()", "def build_token(self, text, language):\n for cls in self.classes:\n ret = cls.validate(text, language)\n if ret:\n return ret\n # No token find:\n raise InvalidTokenSyntax(text)", "def validate(cls, token):\n if not cls.JWT_REGEX.match(token):\n raise ValueError('Invalid JWT token')\n\n return token", "def load_token(token):\n \n #The Token itself was generated by User.get_auth_token. So it is up to \n #us to known the format of the token data itself. \n \n #The Token was encrypted using itsdangerous.URLSafeTimedSerializer which \n #allows us to have a max_age on the token itself. When the cookie is stored\n #on the users computer it also has a exipry date, but could be changed by\n #the user, so this feature allows us to enforce the exipry date of the token\n #server side and not rely on the users cookie to exipre. \n max_age = REMEMBER_COOKIE_DURATION.total_seconds()\n \n #Decrypt the Security Token, data = [username, hashpass]\n data = login_serializer.loads(token, max_age=max_age)\n \n #Find the User\n user = load_user(data[0])\n \n #Check Password and return user or None\n if user and data[1] == user.password:\n return user\n return None", "def validate_token(func):\n\n def wrapper(*args, **kwargs):\n # args[0] should be O365ManagementApi (self) because this function is\n # called from the O365ManagementApi class.\n try:\n if args[0].token.expiresOn < datetime.now():\n args[0].token = args[0].get_token()\n do_func = func(*args, **kwargs)\n return do_func\n except AttributeError as a:\n raise AttributeError(\"{0}: Existing token not valid or empty\".format(a))\n\n return wrapper", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def generic_token(self, character):\n return Token(self.tokentype[character], character)", "def gettok(self):\n try:\n self.next = next(self.tokens)\n except StopIteration:\n self.next = None", "def test_garbage_token(self):\n token = 'ffnnsdifsdjofjfosdjfodsjfosdjofj'\n result = self._token_checker.valid_token_to_id(token)\n self.assertEqual(result, None)", "def generate_new_token(uid):\n random_token = uuid.uuid4()\n token = TokenAuth(user_id=uid, token=random_token)\n token.save()\n return random_token", "def Token(l, token):\n\n return Red(l, lambda _: token)", "def get_token(self, name: str) -> Optional[BuiltinTypeSymbol]:\n\n symbol = self._symbols.get(name)\n return symbol", "def check_token(self, token):\n decoded_token = manage_tokens.decode(token)\n if decoded_token is None:\n return {'error': 'Token is invalid'}\n\n if 'email' not in decoded_token or 'expires' not in decoded_token \\\n or 'token' not in decoded_token:\n return {'error': 'Token is invalid'}\n\n self.email = decoded_token['email']\n self.user_in_db = User.users_db.get(decoded_token['email'])\n\n if not self.user_in_db:\n # User does not exist\n return {'error': 'User does not exist'}\n\n if self.user_in_db['token'] != decoded_token['token']:\n return {'error': 'Token is invalid'}\n\n if decoded_token['expires'] < time.time():\n return {'error': 'Token is expired'}\n\n return decoded_token", "def get_token(headers):\n bearer = headers.get('Authorization')\n if bearer:\n try:\n token_type, token = bearer.rsplit(' ', 1)\n except ValueError:\n raise TokenError('Wrong bearer string: %s', bearer)\n\n if token_type != 'Bearer':\n raise TokenError('Wrong token type: %s, must be %s',\n token_type, 'Bearer')\n return token\n raise TokenError('No token is given in the Authorization header')", "async def token(request: Request):\n return get_token()", "def get_token(self):\n\n return self._token", "def token(self):\n token = self.lex.token()\n if token is not None:\n print(token)\n return token", "def _set_token(self):\n f = open(\".cli_token\")\n data = f.read()\n if data is not None:\n self.token = data\n return self.token", "def token(db):\n token = TokenFactory()\n db.session.commit()\n return token", "def getJWTtoken(self):\n\n token = False\n try:\n res = self.s.get(self.url + 'tokens/jwt', auth=(self.username, self.password), verify=False)\n res.raise_for_status()\n except:\n logger.error(res)\n raise\n token = vsdModels.Token(**res.json())\n try:\n payload = jwt.decode(token.tokenValue, verify=False)\n\n except jwt.InvalidTokenError as e:\n logger.error('token invalid, try using Basic Auth{0}'.format(e))\n raise\n\n return token", "def get_next_token(self):\n while self.current_char is not None:\n\n if self.current_char.isspace():\n self.skip_whitespace()\n continue\n\n if self.current_char == '{':\n self.advance()\n self.skip_comment()\n continue\n\n if self.current_char.isalpha():\n return self._id()\n\n if self.current_char.isdigit():\n return self.number()\n\n if self.current_char == '\\'':\n return self._string()\n\n if self.current_char == ':' and self.peek() == '=':\n self.advance()\n self.advance()\n return Token(ASSIGN, ':=')\n\n if self.current_char == ';':\n self.advance()\n return Token(SEMI, ';')\n\n if self.current_char == ':':\n self.advance()\n return Token(COLON, ':')\n\n if self.current_char == ',':\n self.advance()\n return Token(COMMA, ',')\n\n if self.current_char == '+':\n self.advance()\n return Token(PLUS, '+')\n\n if self.current_char == '-':\n self.advance()\n return Token(MINUS, '-')\n\n if self.current_char == '*':\n self.advance()\n return Token(MUL, '*')\n\n if self.current_char == '/':\n self.advance()\n return Token(FLOAT_DIV, '/')\n\n if self.current_char == '(':\n self.advance()\n return Token(LPAREN, '(')\n\n if self.current_char == ')':\n self.advance()\n return Token(RPAREN, ')')\n\n if self.current_char == '.':\n self.advance()\n return Token(DOT, '.')\n\n if self.current_char == '<':\n self.advance()\n return Token(LESS_THAN, '<')\n\n if self.current_char == '>':\n self.advance()\n return Token(GREATER_THAN, '>')\n\n if self.current_char == '=':\n self.advance()\n return Token(EQUAL, '=')\n\n self.error()\n\n return Token(EOF, None)", "async def get_user_token_strict(token: Optional[str] = Depends(get_user_token)) -> str:\n if token:\n return token\n raise exceptions.AuthenticationException()", "def _create_auth_token(self, user=None):\n token, created = Token.objects.get_or_create(user=user)\n return token", "def find_one_or_create_token(cls, user_id=None, create=False):\n self = api.Environment(cls.pool.cursor(), SUPERUSER_ID, {})[cls._name]\n\n if not user_id:\n user_id = self.env.user.id\n\n access_token = (\n self.env[\"api.access_token\"]\n .sudo()\n .search([(\"user_id\", \" = \", user_id)], order=\"id DESC\", limit=1)\n )\n if access_token:\n access_token = access_token[0]\n if access_token.has_expired():\n access_token = None\n if not access_token and create:\n expires = datetime.now() + timedelta(\n seconds=100000)\n\n vals = {\n \"user_id\": user_id,\n \"scope\": False,\n \"expires\": expires.strftime(DEFAULT_SERVER_DATETIME_FORMAT),\n \"token\": nonce(),\n }\n\n print(expires.strftime(DEFAULT_SERVER_DATETIME_FORMAT))\n access_token = request.env[\"api.access_token\"].create(vals)\n if not access_token:\n return None\n return access_token.token", "def get_token(html, pattern):\n result = pattern.search(html)\n if result:\n return result.group(1)\n else:\n error('Failed to find token')\n return None", "def _handle_token(self, token: str) -> Optional[str]:\n raise RuntimeError('Cannot use _handle_token of this abstract class.')", "def create_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n expiration = int(data.get(\"expiration\"))\n\n pk = get_provider_private_key(use_universal_key=True)\n token = jwt.encode({\"exp\": expiration, \"address\": address}, pk, algorithm=\"HS256\")\n token = token.decode(\"utf-8\") if isinstance(token, bytes) else token\n\n valid, message = is_token_valid(token, address)\n if not valid:\n if message == \"Token is deleted.\":\n force_restore_token(token)\n else:\n return jsonify(error=message), 400\n\n return jsonify(token=token)", "def token_str(self) -> Optional[str]:\n return self._token_str", "def get_token(self, **kwargs):\n try:\n token = self.config[USER_SECTION_KEY][TOKEN_OPTION_KEY]\n return token\n except KeyError:\n # Return default value if provided\n if 'default' in kwargs:\n return kwargs['default']\n # Throw error otherwise\n raise", "def look_up_a_token():\n try:\n data = request.get_json(force=True)\n except Exception:\n data = None\n if data:\n tok = data['token']\n else:\n tok = request.headers.get('TOK_ID')\n request.data\n\n try:\n creation_time = int(round(datetime.timestamp(tokens[tok]), 0))\n issue_time = tokens[tok].isoformat()\n except Exception:\n _now = datetime.now(UTC)\n creation_time = int(round(datetime.timestamp(_now)))\n issue_time = _now.isoformat()\n tokens[tok] = _now\n expire_time = datetime.fromtimestamp(creation_time + 2764790)\n\n return jsonify({\n \"data\": {\n \"accessor\": \"8609694a-cdbc-db9b-d345-e782dbb562ed\",\n \"creation_time\": creation_time,\n \"creation_ttl\": 2764800,\n \"display_name\": \"fooname\",\n \"entity_id\": \"7d2e3179-f69b-450c-7179-ac8ee8bd8ca9\",\n \"expire_time\": expire_time.isoformat(),\n \"explicit_max_ttl\": 0,\n \"id\": tok,\n \"identity_policies\": [\n \"dev-group-policy\"\n ],\n \"issue_time\": issue_time,\n \"meta\": {\n \"username\": \"tesla\"\n },\n \"num_uses\": 0,\n \"orphan\": True,\n \"path\": \"auth/kubernetes/login\",\n \"policies\": [\n \"default\"\n ],\n \"renewable\": True,\n \"ttl\": 2764790\n }\n })", "def generateToken():\n token_length = random.randint(MIN_TOKEN_LEN, MAX_TOKEN_LEN)\n token = ''.join(random.choice(POSS_TOKEN_CHARS) for _ in range(token_length))\n return token", "def token(self):\n\n if not self.requests:\n return None\n return self.requests[0].token", "def prep_token(**kwargs):\n token = kwargs.get('token')\n if not token:\n token = oauth2_wrappers.gen_token()\n return token", "async def create_token(self, *args, **kwargs) -> OAuth2Token:\n token = await super().create_token(*args, **kwargs)\n # NOTE: Save data from token to db here.\n return token", "def get_next_token(self):\n text = self.text\n\n # is self.pos index past the end of the self.text ?\n # if so, then return EOF token because there is no more\n # input left to convert into tokens\n self.ignore_whitespaces()\n if self.pos > len(text) - 1:\n return Token(EOF, None)\n #### Logic for ignoring whitespaces and handling multiple\n #### digit input\n current_char = text[self.pos]\n\n if current_char == '+':\n token = Token(PLUS, current_char)\n self.pos += 1\n return token\n elif current_char == '-':\n token = Token(MINUS, current_char)\n self.pos += 1\n return token\n elif current_char == '*':\n token = Token(MULTIPLY, current_char)\n self.pos += 1\n return token\n elif current_char == '/':\n token = Token(DIVIDE, current_char)\n self.pos += 1\n return token\n\n value_str = \"\"\n while (current_char.isdigit() or current_char == '.'):\n value_str = value_str + current_char\n self.pos += 1\n if self.pos < len(text):\n current_char = text[self.pos]\n else:\n break\n\n if value_str.find('.') > -1:\n try:\n value = float(value_str)\n except ValueError:\n print \"Could not convert {value_str} to a float\".format(value_str)\n self.error()\n token = Token(FLOAT, value)\n return token\n elif value_str[0].isdigit():\n try:\n value = int(value_str)\n except ValueError:\n print \"Could not convert {value_str} to an Interger\".format(value_str)\n self.error()\n token = Token(INTEGER,value)\n return token\n\n\n ####\n \"\"\"\n # get a character at the position self.pos and decide\n # what token to create based on the single character\n current_char = text[self.pos]\n\n # if the character is a digit then convert it to\n # integer, create an INTEGER token, increment self.pos\n # index to point to the next character after the digit,\n # and return the INTEGER token\n if current_char.isdigit():\n token = Token(INTEGER, int(current_char))\n self.pos += 1\n return token\n\n if current_char == '+':\n token = Token(PLUS, current_char)\n self.pos += 1\n return token\n\n if current_char == '-':\n token = Token(MINUS, current_char)\n self.pos += 1\n return token\n\n if current_char == '*':\n token = Token(MULTIPLY, current_char)\n self.pos += 1\n return token\n\n if current_char == '/':\n token = Token(DIVIDE, current_char)\n self.pos += 1\n return token\n \"\"\"\n self.error()", "def get_token():\n # get authorization header:\n auth = request.headers.get('Authorization', None)\n \n # authorization header should be included:\n if auth is None:\n raise JWTError(\n {\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, \n 401\n )\n \n # authorization header should be 'Bearer [JWT]'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, \n 401\n )\n elif len(parts) == 1:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, \n 401\n )\n elif len(parts) > 2:\n raise JWTError(\n {\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, \n 401\n )\n\n # extract JWT:\n token = parts[1]\n\n return token", "def __current_authentication_token(self):\n if os.path.isfile(self.token_filename):\n with open(self.token_filename, 'r') as f:\n (stored_token, expires) = f.read().split(' ')\n t = time.time()\n if int(expires) > t:\n return stored_token\n return None", "def odb_token():\n return genToken()", "def find_token(self, start_token, tok_type, tok_str=None, reverse=False):\n # type: (Token, int, Optional[str], bool) -> Token\n t = start_token\n advance = self.prev_token if reverse else self.next_token\n while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):\n t = advance(t, include_extra=True)\n return t", "def create_token(self, consumer, token_type, timestamp, user=None):\n token, created = self.first_or_create(consumer=consumer, \n token_type=token_type, \n timestamp=timestamp,\n user=user)\n\n if created:\n token.key, token.secret = self.generate_random_codes()\n token.save()\n\n return token", "def token_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_type\")" ]
[ "0.68337315", "0.6518242", "0.63705957", "0.63319176", "0.6099937", "0.6092421", "0.60620815", "0.6054427", "0.60395783", "0.60344374", "0.5973788", "0.59650373", "0.5948255", "0.5936152", "0.5906422", "0.5888766", "0.5868331", "0.58436376", "0.5830226", "0.58285004", "0.5814211", "0.57976866", "0.5792274", "0.5776438", "0.57740706", "0.5767153", "0.5762447", "0.57513064", "0.57468826", "0.57407504", "0.57407504", "0.5736502", "0.5733446", "0.57158285", "0.5715115", "0.56943023", "0.56509435", "0.5640087", "0.56385374", "0.56362736", "0.5635982", "0.5631442", "0.5625208", "0.5619076", "0.5618772", "0.5606986", "0.5582794", "0.5576632", "0.55715847", "0.55604094", "0.5555846", "0.5542955", "0.5540541", "0.5539925", "0.55357504", "0.55298626", "0.5526703", "0.55152404", "0.55097306", "0.5504994", "0.5501507", "0.549075", "0.54876333", "0.54838985", "0.5452468", "0.5443552", "0.54424006", "0.5434083", "0.5433224", "0.5428485", "0.54205394", "0.5418067", "0.5415024", "0.54130054", "0.5408467", "0.5406686", "0.5399621", "0.53965366", "0.538952", "0.53857934", "0.5378441", "0.5375055", "0.5372139", "0.5371772", "0.5371437", "0.5356652", "0.5350599", "0.5347906", "0.5347498", "0.5343939", "0.53411925", "0.53392595", "0.5335547", "0.53235906", "0.53225833", "0.53224236", "0.5316493", "0.5314783", "0.5312771", "0.530455", "0.530157" ]
0.0
-1
two keras Model instances are equal enough
def assert_models_equal(first, second): # layer names and settings assert first.get_config() == second.get_config() # model weights assert len(first.get_weights()) == len(second.get_weights()) for w1, w2 in zip(first.get_weights(), second.get_weights()): np.testing.assert_array_equal(w1, w2) # optimizer assert first.optimizer.get_config() == second.optimizer.get_config()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_second_keras_model_created():\n X, _, _, _ = get_data()\n tf.random.set_seed(12345)\n initializer = tf.keras.initializers.Zeros()\n input_data = Input(shape=X[0].shape)\n xx = Dense(128, activation=\"relu\", kernel_initializer=initializer)(input_data)\n xx = Dense(128, activation=\"relu\", kernel_initializer=initializer)(xx)\n xx = Dense(64, activation=\"relu\", kernel_initializer=initializer)(xx)\n output = Dense(n_classes, activation=\"softmax\", kernel_initializer=initializer)(xx)\n _ = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n model2 = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n rightname = \"KerasModel\"\n assert (\n model2.model_type == rightname\n ), \"failed check for second model type being set in init()\"\n # noise multiplier should have been reset from default to one that matches rules.json\n assert model2.noise_multiplier == 0.7", "def test_same_weights(): # pylint : disable=too-many-locals\n # make models to test\n model1, X, _, _, _ = make_small_model(num_hidden_layers=1)\n model2, _, _, _, _ = make_small_model(num_hidden_layers=2)\n input_data = Input(shape=X[0].shape)\n initializer = tf.keras.initializers.Zeros()\n xx = Dense(64, activation=\"relu\", kernel_initializer=initializer)(input_data)\n output = Dense(n_classes, activation=\"softmax\", kernel_initializer=initializer)(xx)\n model1a = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n\n # same\n same1, _ = safekeras.same_weights(model1, model1)\n assert same1 is True\n\n # different num layers\n same2, _ = safekeras.same_weights(model1, model2)\n assert same2 is False\n\n # different sized layers\n same3, _ = safekeras.same_weights(model1, model1a)\n errstr = (\n \"model1 hidden layer has \"\n f\" {len(model1.layers[1].get_weights()[0][0])} units\"\n f\" but model2 has {len(model1a.layers[1].get_weights()[0][0])}.\\n\"\n )\n assert same3 is False, errstr", "def test_same_configs(): # pylint: disable=too-many-locals\n\n model1, X, _, _, _ = make_small_model(num_hidden_layers=1)\n model2, _, _, _, _ = make_small_model(num_hidden_layers=2)\n model2a, _, _, _, _ = make_small_model(num_hidden_layers=2)\n\n # different numbers of layers\n same1, msg1 = safekeras.same_configs(model1, model2)\n errstr = (\n f\"model1 has {len(model1.layers)} layers, but\\n\"\n f\"model2 has {len(model2.layers)} layers\\n\"\n )\n assert same1 is False, errstr\n correct_msg1 = get_reporting_string(name=\"different_layer_count\")\n errstr = f\"msg was: {msg1}\\n\" f\"should be : {correct_msg1}\"\n assert msg1 == correct_msg1, errstr\n\n # same architecture\n same2, msg2 = safekeras.same_configs(model2, model2a)\n correct_msg2 = get_reporting_string(name=\"same_ann_config\")\n assert msg2 == correct_msg2, (\n rf\"should report {correct_msg2}\\,\" f\" but got {msg2}.\\n\"\n )\n assert same2 is True, \"models are same!\"\n\n # same layers, different widths\n input_data = Input(shape=X[0].shape)\n initializer = tf.keras.initializers.Zeros()\n xx = Dense(64, activation=\"relu\", kernel_initializer=initializer)(input_data)\n output = Dense(n_classes, activation=\"softmax\", kernel_initializer=initializer)(xx)\n model1a = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n check_init_completed(model1a)\n same3, msg3 = safekeras.same_configs(model1, model1a)\n errmsg = \"Should report layers have different num nodes\"\n assert same3 is False, errmsg\n correct_msg3 = get_reporting_string(name=\"layer_configs_differ\", layer=1, length=1)\n correct_msg3 += get_reporting_string(\n name=\"param_changed_from_to\", key=\"units\", val=\"32\", cur_val=64\n )\n errstr = f\"got message: {msg3}\\n\" rf\"expected. : {correct_msg3}\\.\"\n assert msg3 == correct_msg3, errstr", "def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass", "def test_same_models_are_equal(dbdiskrepo):\n fit1 = fit_model()\n fit2 = fit_model()\n assert fit1.artifact.id == fit2.artifact.id\n assert fit1.artifact.value_id == fit2.artifact.value_id\n assert hash(fit1) == hash(fit2)", "def _compare_models(self, alpha1, alpha2):\n return np.array_equal(alpha1, alpha2)", "def test_reproducible(self):\n model_1 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_1.train(epochs=2)\n\n model_2 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_2.train(epochs=2)\n self.assertTrue(np.allclose(model_1.kv.syn0, model_2.kv.syn0))", "def test_same_seeds_result_in_same_models(dbdiskrepo):\n fit1 = fit_model(seed=0)\n fit2 = fit_model(seed=0)\n\n assert p.hash(fit1) == p.hash(fit2)\n assert fit1.artifact.id == fit2.artifact.id\n assert fit1.artifact.value_id == fit2.artifact.value_id", "def models_are_equivalent(model_a: TopLevelOscalModel, model_b: TopLevelOscalModel) -> bool:\n # this will change the second model as a side-effect\n model_b.metadata.last_modified = model_a.metadata.last_modified\n return model_a == model_b", "def test_simple_merge(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(6)(x1)\n x4 = merge([x2, x3], mode=\"concat\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )", "def test_different_seeds_result_in_different_models(dbdiskrepo):\n fit1 = fit_model(seed=0)\n fit2 = fit_model(seed=1)\n\n assert p.hash(fit1) != p.hash(fit2)\n assert fit1.artifact.id != fit2.artifact.id\n assert fit1.artifact.value_id != fit2.artifact.value_id", "def assert_wrappers_equal(first, second):\n assert first.sk_params == second.sk_params\n assert first.history_ == second.history_\n if not first.model_ or not second.model_:\n assert first.model_ == second.model_\n else:\n assert_models_equal(first.model, second.model)", "def test_equal_models_opt():\n dmd = DMD(svd_rank=2, opt=True)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2, opt=True)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd, rtol=0.05)", "def test_equal_models_default():\n dmd = DMD(svd_rank=2)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd)", "def test_equal_models_opt_exact():\n dmd = DMD(svd_rank=2, opt=True, exact=True)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2, opt=True, exact=True)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd)", "def test_equal_models_exact():\n dmd = DMD(svd_rank=2, exact=True)\n dmd.fit(X=sample_data)\n\n dmd_xy = DMD(svd_rank=2, exact=True)\n dmd_xy.fit(X=sample_data_1, Y=sample_data_2)\n\n assert_equal_models(dmd_xy, dmd)", "def test_checkpoints_are_equal():\n model1, X, y, Xval, yval = make_small_model(num_hidden_layers=1)\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model1.compile(loss=loss)\n model1.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n model1.save(\"fit.tf\")\n model1.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS * 2, batch_size=20)\n model1.save(\"refit.tf\")\n\n # same arch, different weights\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"refit.tf\")\n assert same is False, msg\n\n # should be same\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"fit.tf\")\n print(msg)\n assert same is True, msg\n\n # different architecture\n model2, X, y, Xval, yval = make_small_model(num_hidden_layers=3)\n model2.compile(loss=loss)\n model2.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n model2.save(\"fit2.tf\")\n\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"fit2.tf\")\n print(msg)\n assert same is False, msg\n\n # coping with trashed files\n cleanup_file(\"fit.tf/saved_model.pb\")\n same, msg = safekeras.check_checkpoint_equality(\"fit.tf\", \"fit2.tf\")\n assert same is False, msg\n same, msg = safekeras.check_checkpoint_equality(\"fit2.tf\", \"fit.tf\")\n assert same is False, msg\n\n same, msg = safekeras.check_checkpoint_equality(\"hello\", \"fit2.tf\")\n assert same is False\n assert \"Error re-loading model from\" in msg\n\n same, msg = safekeras.check_checkpoint_equality(\"fit2.tf\", \"hello\")\n assert same is False\n assert \"Error re-loading model from\" in msg\n\n for name in (\"fit.tf\", \"fit2.tf\", \"refit.tf\"):\n cleanup_file(name)", "def check_models(model1, model2, use_cross_validation=False, op='e'):\n # 1. Check model types\n model1_type = type(model1)\n model2_type = type(model2)\n assert model1_type == model2_type, \"The model types differ. The first model is of type {0} and the second \" \\\n \"models is of type {1}.\".format(model1_type, model2_type)\n\n # 2. Check model metrics\n if isinstance(model1,H2OBinomialModel): # 2a. Binomial\n # F1\n f1_1 = model1.F1(xval=use_cross_validation)\n f1_2 = model2.F1(xval=use_cross_validation)\n if op == 'e': assert f1_1[0][1] == f1_2[0][1], \"The first model has an F1 of {0} and the second model has an F1 of \" \\\n \"{1}. Expected the first to be == to the second.\".format(f1_1[0][1], f1_2[0][1])\n elif op == 'g': assert f1_1[0][1] > f1_2[0][1], \"The first model has an F1 of {0} and the second model has an F1 of \" \\\n \"{1}. Expected the first to be > than the second.\".format(f1_1[0][1], f1_2[0][1])\n elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], \"The first model has an F1 of {0} and the second model has an F1 of \" \\\n \"{1}. Expected the first to be >= than the second.\".format(f1_1[0][1], f1_2[0][1])\n elif isinstance(model1,H2ORegressionModel): # 2b. Regression\n # MSE\n mse1 = model1.mse(xval=use_cross_validation)\n mse2 = model2.mse(xval=use_cross_validation)\n if op == 'e': assert mse1 == mse2, \"The first model has an MSE of {0} and the second model has an MSE of \" \\\n \"{1}. Expected the first to be == to the second.\".format(mse1, mse2)\n elif op == 'g': assert mse1 > mse2, \"The first model has an MSE of {0} and the second model has an MSE of \" \\\n \"{1}. Expected the first to be > than the second.\".format(mse1, mse2)\n elif op == 'ge': assert mse1 >= mse2, \"The first model has an MSE of {0} and the second model has an MSE of \" \\\n \"{1}. Expected the first to be >= than the second.\".format(mse1, mse2)\n elif isinstance(model1,H2OMultinomialModel): # 2c. Multinomial\n # hit-ratio\n pass\n elif isinstance(model1,H2OClusteringModel): # 2d. Clustering\n # totss\n totss1 = model1.totss(xval=use_cross_validation)\n totss2 = model2.totss(xval=use_cross_validation)\n if op == 'e': assert totss1 == totss2, \"The first model has an TOTSS of {0} and the second model has an \" \\\n \"TOTSS of {1}. Expected the first to be == to the second.\".format(totss1,\n totss2)\n elif op == 'g': assert totss1 > totss2, \"The first model has an TOTSS of {0} and the second model has an \" \\\n \"TOTSS of {1}. Expected the first to be > than the second.\".format(totss1,\n totss2)\n elif op == 'ge': assert totss1 >= totss2, \"The first model has an TOTSS of {0} and the second model has an \" \\\n \"TOTSS of {1}. Expected the first to be >= than the second.\" \\\n \"\".format(totss1, totss2)", "def keras_model_2(X_train, y_train, max_epochs=20, batch_size=16, train_size=0.85):\n num_classes = len(np.unique(y_train))\n num_features = X_train.shape[1]\n\n print(\"Building model...\")\n\n model = Sequential()\n model.add(Dropout(0.1))\n\n model.add(Dense(num_features, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, num_classes, init='glorot_uniform'))\n model.add(Activation('softmax'))\n\n sgd = SGD(lr=0.1, decay=1e-6, momentum=0.1, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd)\n\n print(\"Training model...\")\n X = X_train\n y = np_utils.to_categorical(y_train)\n history = model.fit(X, y, nb_epoch=max_epochs, batch_size=batch_size, verbose=2, validation_split=1-train_size, show_accuracy=True)\n\n return model, history", "def check_models(model1, model2, use_cross_validation=False, op='e'):\n # 1. Check model types\n model1_type = model1.__class__.__name__\n model2_type = model1.__class__.__name__\n assert model1_type is model2_type, \"The model types differ. The first model is of type {0} and the second \" \\\n \"models is of type {1}.\".format(model1_type, model2_type)\n\n # 2. Check model metrics\n if isinstance(model1,H2OBinomialModel): # 2a. Binomial\n # F1\n f1_1 = model1.F1(xval=use_cross_validation)\n f1_2 = model2.F1(xval=use_cross_validation)\n if op == 'e': assert f1_1[0][1] == f1_2[0][1], \"The first model has an F1 of {0} and the second model has an F1 of \" \\\n \"{1}. Expected the first to be == to the second.\".format(f1_1[0][1], f1_2[0][1])\n elif op == 'g': assert f1_1[0][1] > f1_2[0][1], \"The first model has an F1 of {0} and the second model has an F1 of \" \\\n \"{1}. Expected the first to be > than the second.\".format(f1_1[0][1], f1_2[0][1])\n elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], \"The first model has an F1 of {0} and the second model has an F1 of \" \\\n \"{1}. Expected the first to be >= than the second.\".format(f1_1[0][1], f1_2[0][1])\n elif isinstance(model1,H2ORegressionModel): # 2b. Regression\n # MSE\n mse1 = model1.mse(xval=use_cross_validation)\n mse2 = model2.mse(xval=use_cross_validation)\n if op == 'e': assert mse1 == mse2, \"The first model has an MSE of {0} and the second model has an MSE of \" \\\n \"{1}. Expected the first to be == to the second.\".format(mse1, mse2)\n elif op == 'g': assert mse1 > mse2, \"The first model has an MSE of {0} and the second model has an MSE of \" \\\n \"{1}. Expected the first to be > than the second.\".format(mse1, mse2)\n elif op == 'ge': assert mse1 >= mse2, \"The first model has an MSE of {0} and the second model has an MSE of \" \\\n \"{1}. Expected the first to be >= than the second.\".format(mse1, mse2)\n elif isinstance(model1,H2OMultinomialModel): # 2c. Multinomial\n # hit-ratio\n pass\n elif isinstance(model1,H2OClusteringModel): # 2d. Clustering\n # totss\n totss1 = model1.totss(xval=use_cross_validation)\n totss2 = model2.totss(xval=use_cross_validation)\n if op == 'e': assert totss1 == totss2, \"The first model has an TOTSS of {0} and the second model has an \" \\\n \"TOTSS of {1}. Expected the first to be == to the second.\".format(totss1,\n totss2)\n elif op == 'g': assert totss1 > totss2, \"The first model has an TOTSS of {0} and the second model has an \" \\\n \"TOTSS of {1}. Expected the first to be > than the second.\".format(totss1,\n totss2)\n elif op == 'ge': assert totss1 >= totss2, \"The first model has an TOTSS of {0} and the second model has an \" \\\n \"TOTSS of {1}. Expected the first to be >= than the second.\" \\\n \"\".format(totss1, totss2)", "def test_torch_train_original_layer_multiple(self):\n model = Sequential(\n self.get_digital_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n analog_model = Sequential(\n self.get_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n self.set_weights_from_digital_model(analog_layer, layer)\n\n loss_func = mse_loss\n y_b = randn(3, 3, 6, 6)\n x_b = randn(3, 2, 4, 4)\n\n if self.use_cuda:\n y_b = y_b.cuda()\n x_b = x_b.cuda()\n\n self.train_model(model, loss_func, x_b, y_b)\n self.train_model(analog_model, loss_func, x_b, y_b)\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n weight, bias = self.get_weights_from_digital_model(analog_layer, layer)\n\n weight_analog, bias_analog = analog_layer.analog_tile.get_weights(realistic=False)\n\n self.assertTensorAlmostEqual(weight_analog, weight)\n if analog_layer.use_bias:\n self.assertTensorAlmostEqual(bias_analog, bias)", "def test_torch_train_original_layer_multiple(self):\n model = Sequential(\n self.get_digital_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n analog_model = Sequential(\n self.get_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n self.set_weights_from_digital_model(analog_layer, layer)\n\n loss_func = mse_loss\n y_b = randn(3, 3, 6)\n x_b = randn(3, 2, 4)\n\n if self.use_cuda:\n y_b = y_b.cuda()\n x_b = x_b.cuda()\n\n self.train_model(model, loss_func, x_b, y_b)\n self.train_model(analog_model, loss_func, x_b, y_b)\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n weight, bias = self.get_weights_from_digital_model(analog_layer, layer)\n\n weight_analog, bias_analog = analog_layer.analog_tile.get_weights(realistic=False)\n\n self.assertTensorAlmostEqual(weight_analog, weight)\n if analog_layer.use_bias:\n self.assertTensorAlmostEqual(bias_analog, bias)", "def test_torch_train_original_layer_multiple(self):\n model = Sequential(\n self.get_digital_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n analog_model = Sequential(\n self.get_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n self.set_weights_from_digital_model(analog_layer, layer)\n\n loss_func = mse_loss\n y_b = randn(3, 3, 6, 6, 6)\n x_b = randn(3, 2, 4, 4, 4)\n\n if self.use_cuda:\n y_b = y_b.cuda()\n x_b = x_b.cuda()\n\n self.train_model(model, loss_func, x_b, y_b)\n self.train_model(analog_model, loss_func, x_b, y_b)\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n weight, bias = self.get_weights_from_digital_model(analog_layer, layer)\n\n weight_analog, bias_analog = analog_layer.analog_tile.get_weights(realistic=False)\n\n self.assertTensorAlmostEqual(weight_analog, weight)\n if analog_layer.use_bias:\n self.assertTensorAlmostEqual(bias_analog, bias)", "def keras_model_1(X_train, y_train, max_epochs=20, batch_size=16, train_size=0.85):\n num_classes = len(np.unique(y_train))\n num_features = X_train.shape[1]\n\n print(\"Building model...\")\n\n model = Sequential()\n model.add(Dense(num_features, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, 400, init='glorot_uniform'))\n model.add(PReLU((400,)))\n model.add(BatchNormalization((400,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(400, num_classes, init='glorot_uniform'))\n model.add(Activation('softmax'))\n\n sgd = SGD(lr=0.1, decay=1e-6, momentum=0.4, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd)\n\n print(\"Training model...\")\n X = X_train\n y = np_utils.to_categorical(y_train)\n history = model.fit(X, y, nb_epoch=max_epochs, batch_size=batch_size, verbose=2, validation_split=1-train_size, show_accuracy=True)\n\n return model, history", "def test_noise_models_equal(self):\n roerror = [[0.9, 0.1], [0.5, 0.5]]\n error1 = pauli_error([['X', 1]], standard_gates=False)\n error2 = pauli_error([['X', 1]], standard_gates=True)\n\n model1 = NoiseModel()\n model1.add_all_qubit_quantum_error(error1, ['u3'], False)\n model1.add_quantum_error(error1, ['u3'], [2], False)\n model1.add_nonlocal_quantum_error(error1, ['cx'], [0, 1], [3], False)\n model1.add_all_qubit_readout_error(roerror, False)\n model1.add_readout_error(roerror, [0], False)\n\n model2 = NoiseModel()\n model2.add_all_qubit_quantum_error(error2, ['u3'], False)\n model2.add_quantum_error(error2, ['u3'], [2], False)\n model2.add_nonlocal_quantum_error(error2, ['cx'], [0, 1], [3], False)\n model2.add_all_qubit_readout_error(roerror, False)\n model2.add_readout_error(roerror, [0], False)\n self.assertEqual(model1, model2)", "def compare_models(model_1: torch.nn.Module, model_2: torch.nn.Module, is_equal: bool = True):\n with contextlib.nullcontext() if is_equal else pytest.raises(Exception):\n # Compare model module attributes since algorithms like StochasticDepth monkeypatch\n # on new attributes. We only check this on ComposerClassifier models that have .module\n if isinstance(model_1, ComposerClassifier) and isinstance(model_2, ComposerClassifier):\n model_1_modules = list(model_1.module.modules())\n model_2_modules = list(model_2.module.modules())\n assert len(model_1_modules) == len(model_2_modules)\n for module_1, module_2 in zip(model_1_modules, model_2_modules):\n assert sorted(list(module_1.__dict__.keys())) == sorted(list(module_2.__dict__.keys()))\n # Compare model parameters\n for (name0, tensor0), (name1, tensor1) in zip(model_1.state_dict().items(), model_2.state_dict().items()):\n assert name0 == name1\n assert torch.equal(tensor0, tensor1)", "def get_model_1(parameters):\n # Parameters\n n1 = parameters['n1']\n k1 = parameters['k1']\n n2 = parameters['n2']\n n3 = parameters['n3']\n k2 = parameters['k2']\n n4 = parameters['n4']\n n5 = parameters['n5']\n NUM_FILTERS_C1 = 20\n \n # Sequential model\n model = keras.models.Sequential()\n \n # Add C1 layer\n # ------------\n # Input_shape (batch, rows, cols, channels) = (-, n1, 1, 1)\n # Output_shape (batch, rows, cols, channels) = (-, n2, 1, NUM_FILTERS_C1)\n model.add(keras.layers.Conv2D(filters=NUM_FILTERS_C1,\n kernel_size=(k1, 1),\n padding='valid',\n data_format=\"channels_last\",\n activation='tanh',\n kernel_initializer='random_uniform',\n bias_initializer='random_uniform',\n input_shape=(n1,1,1)))\n \n # Add M2 layer\n # ------------\n # Input_shape (batch, rows, cols, channels) = (-, n2, 1, NUM_FILTERS_C1)\n # Output_shape (batch, rows, cols, channels) = (-, n3, 1, NUM_FILTERS_C1)\n model.add(keras.layers.MaxPooling2D(pool_size=(k2, 1),\n padding='same',\n data_format=\"channels_last\"))\n \n # Flatten before dense layer\n model.add(keras.layers.Flatten())\n \n # Add F3 layer\n # ------------\n # Intput_shape (batch, rows, cols, channels) = (-, n3 x 1 x NUM_FILTERS_C1)\n # Output_shape (batch, dim) = (-, n4)\n model.add(keras.layers.Dense(units=n4,\n activation='tanh',\n kernel_initializer='random_uniform',\n bias_initializer='random_uniform'))\n \n # Add F4 layer\n # ------------\n # Intput_shape (batch, dim) = (1, n4)\n # Output_shape (batch, dim) = (1, n5)\n model.add(keras.layers.Dense(units=n5,\n activation='softmax',\n kernel_initializer='random_uniform',\n bias_initializer='random_uniform'))\n \n # Compile model\n model.compile(optimizer='sgd',\n loss='mean_squared_error',\n metrics=['accuracy'])\n \n # Print the model summary to output file\n # To print to stdout: model.summary()\n with open(OUTPUT_FILE, 'a') as f:\n # Pass the file handle in as a lambda function to make it callable\n model.summary(print_fn=lambda x: f.write(x + '\\n'))\n \n # Return the model\n return model", "def test_training_multiple(self):\n model = PoincareModel(self.data_large, burn_in=0, negative=3)\n model.train(epochs=2)\n old_vectors = np.copy(model.kv.syn0)\n\n model.train(epochs=1)\n self.assertFalse(np.allclose(old_vectors, model.kv.syn0))\n\n old_vectors = np.copy(model.kv.syn0)\n model.train(epochs=0)\n self.assertTrue(np.allclose(old_vectors, model.kv.syn0))", "def test_init_variants():\n # get data\n X, _, _, _ = get_data()\n # set seed and kernel initialisers for repeatability\n tf.random.set_seed(12345)\n initializer = tf.keras.initializers.Zeros()\n\n # define model architecture\n input_data = Input(shape=X[0].shape)\n xx = Dense(32, activation=\"relu\", kernel_initializer=initializer)(input_data)\n output = Dense(n_classes, activation=\"softmax\", kernel_initializer=initializer)(xx)\n\n # standard way\n model = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n check_init_completed(model)\n\n # inputs and outputs not in kwargs\n model2 = SafeKerasModel(\n input_data,\n output,\n \"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n )\n check_init_completed(model2)\n\n # batch size zero\n model3 = SafeKerasModel(\n inputs=input_data,\n outputs=output,\n name=\"test\",\n num_samples=X.shape[0],\n epochs=EPOCHS,\n batch_size=0,\n )\n errstr = \"failed to correct batch_size=0 in init\"\n assert model3.batch_size == 32, errstr", "def get_model_2(parameters):\n # Parameters\n BANDS = parameters['num_features']\n CLASSES = parameters['num_classes']\n \n # Sequential model\n model = keras.models.Sequential()\n \n # Add convolution (1)\n # -------------------\n # Input_shape (batch, rows, cols, channels) = (-, 9, BANDS, 1)\n # Output_shape (batch, rows, cols, channels) = (-, BANDS - 15, 1, 32)\n model.add(keras.layers.Conv2D(filters=32,\n kernel_size=(9, 16),\n padding='same',\n data_format=\"channels_last\",\n activation='tanh',\n input_shape=(9, BANDS,1)))\n \n # Add convolution (2)\n # -------------------\n # Input_shape (batch, rows, cols, channels) = (-, BANDS - 15, 1, 32)\n # Output_shape (batch, rows, cols, channels) = (-, BANDS - 30, 1, 32)\n model.add(keras.layers.Conv2D(filters=32,\n kernel_size=(1, 16),\n padding='same',\n data_format=\"channels_last\",\n activation='tanh'))\n \n # Add convolution (3)\n # -------------------\n # Input_shape (batch, rows, cols, channels) = (-, BANDS - 30, 1, 32)\n # Output_shape (batch, rows, cols, channels) = (-, BANDS - 45, 1, 32)\n model.add(keras.layers.Conv2D(filters=32,\n kernel_size=(1, 16),\n padding='same',\n data_format=\"channels_last\",\n activation='tanh'))\n \n # Flatten before dense layer\n model.add(keras.layers.Flatten())\n \n # Add fully connected (4)\n # -----------------------\n # Intput_shape (batch, rows, cols, channels) = (-, (BANDS - 45) x 1 x 32)\n # Output_shape (batch, dim) = (-, 800)\n model.add(keras.layers.Dense(units=800,\n activation='tanh'))\n \n # Add fully connected (5)\n # -----------------------\n # Intput_shape (batch, dim) = (-, 800)\n # Output_shape (batch, dim) = (-, 800)\n model.add(keras.layers.Dense(units=800,\n activation='softmax'))\n \n # Add fully connected to reduce to number of categories\n # -----------------------------------------------------\n # Intput_shape (batch, dim) = (-, 800)\n # Output_shape (batch, dim) = (-, CLASSES)\n model.add(keras.layers.Dense(units=CLASSES,\n activation='softmax'))\n \n # Compile model\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n \n # Print the model summary to output file\n # To print to stdout: model.summary()\n with open(OUTPUT_FILE, 'a') as f:\n # Pass the file handle in as a lambda function to make it callable\n model.summary(print_fn=lambda x: f.write(x + '\\n'))\n \n # Return the model\n return model", "def test_merge_multiply(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(5)(x1)\n x4 = merge([x2, x3], mode=\"mul\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )", "def test_model_equality(self):\r\n class EqualityModel0(Model):\r\n pk = columns.Integer(primary_key=True)\r\n\r\n class EqualityModel1(Model):\r\n kk = columns.Integer(primary_key=True)\r\n\r\n m0 = EqualityModel0(pk=0)\r\n m1 = EqualityModel1(kk=1)\r\n\r\n self.assertEqual(m0, m0)\r\n self.assertNotEqual(m0, m1)", "def test_model_equality(self):\n class EqualityModel0(Model):\n pk = columns.Integer(primary_key=True)\n\n class EqualityModel1(Model):\n kk = columns.Integer(primary_key=True)\n\n m0 = EqualityModel0(pk=0)\n m1 = EqualityModel1(kk=1)\n\n self.assertEqual(m0, m0)\n self.assertNotEqual(m0, m1)", "def assert_predictions_equal(first, second, x):\n preds1 = first.predict(x, batch_size=batch_size)\n preds2 = second.predict(x, batch_size=batch_size)\n np.testing.assert_array_equal(preds1, preds2)", "def test_model_manager_will_return_same_instance_when_instantiated_many_times(self):\n # arrange, act\n # instantiating the model manager class twice\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n\n # loading the MLModel objects from configuration\n first_model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n first_model_object = first_model_manager.get_model(qualified_name=\"qualified_name\")\n second_model_object = second_model_manager.get_model(qualified_name=\"qualified_name\")\n\n # assert\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))", "def compareAB(model1_name, model2_name, X_test_B, X_test_S, analysis_dir=\"Analysis/\"):\n #Load best weights\n model = tf.keras.models.load_model(\"Models/\"+model1_name)\n bkg_preds1 = model.predict(X_test_B).flatten()\n sig_preds1 = model.predict(X_test_S).flatten()\n\n model = tf.keras.models.load_model(\"Models/\"+model2_name)\n bkg_preds2 = model.predict(X_test_B).flatten()\n sig_preds2 = model.predict(X_test_S).flatten()\n\n sig_eff = []\n bkg_eff = []\n sig_eff_50 = 1.0\n bkg_eff_50 = 1.0\n for thresh in (1-np.arange(0.00005, 0.8, 0.01)):\n bkg_eff_temp = np.sum(bkg_preds1 > thresh)/len(bkg_preds1)\n sig_eff_temp = np.sum(sig_preds1 > thresh)/len(sig_preds1)\n sig_eff.append(sig_eff_temp)\n bkg_eff.append(1/bkg_eff_temp)\n if abs(sig_eff_temp-0.5) < abs(sig_eff_50-0.5):\n sig_eff_50 = sig_eff_temp\n bkg_eff_50 = 1/bkg_eff_temp\n plt.semilogy(sig_eff, bkg_eff)\n plt.annotate(model1_name + ' Background rejection @0.5 Signal efficiency = {:.2e}'.format(bkg_eff_50), xy=(0.05, 0.95), xycoords='axes fraction')\n print(sig_eff_50)\n\n sig_eff = []\n bkg_eff = []\n sig_eff_50 = 1.0\n bkg_eff_50 = 1.0\n for thresh in (1-np.arange(0.00005, 0.8, 0.01)):\n bkg_eff_temp = np.sum(bkg_preds2 > thresh)/len(bkg_preds2)\n sig_eff_temp = np.sum(sig_preds2 > thresh)/len(sig_preds2)\n sig_eff.append(sig_eff_temp)\n bkg_eff.append(1/bkg_eff_temp)\n if abs(sig_eff_temp-0.5) < abs(sig_eff_50-0.5):\n sig_eff_50 = sig_eff_temp\n bkg_eff_50 = 1/bkg_eff_temp\n plt.semilogy(sig_eff, bkg_eff)\n plt.annotate(model2_name + ' Background rejection @0.5 Signal efficiency = {:.3e}'.format(bkg_eff_50), xy=(0.05, 0.88), xycoords='axes fraction')\n print(sig_eff_50)\n\n plt.legend([model1_name, model2_name])\n plt.xlabel(\"Signal efficiency\")\n plt.ylabel(\"Background rejection\")\n plt.gcf().set_size_inches(8.3, 5.85)\n plt.savefig(analysis_dir+\"ROC\" + model1_name + \"VS\" + model2_name + \".pdf\", format=\"pdf\")\n plt.show()", "def __eq__(self, other):\n if not isinstance(other, ModelTrainingInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def models_compatible(model_a: ModuleModel, model_b: ModuleModel) -> bool:\n if model_a == model_b:\n return True\n return model_b.value in _load_v2_module_def(model_a)['compatibleWith']", "def test_ids_maker(self):\n firstins = BaseModel()\n secondins = BaseModel()\n self.assertNotEqual(firstins, secondins)", "def test_shared_objects_wrapper(self):\n input_ = keras.Input(shape=(1,))\n unwrapped = keras.layers.Layer(name='unwrapped')\n wrapped = keras.layers.Wrapper(unwrapped, name='wrapped')\n model = keras.Model(inputs=input_,\n outputs=[unwrapped(input_), wrapped(input_)])\n\n # Test recreating directly from config\n config = model.get_config()\n loaded = keras.Model.from_config(config)\n self.assertIs(loaded.layers[1], loaded.layers[2].layer)\n\n # Test saving and loading to disk\n save_format = testing_utils.get_save_format()\n saved_model_dir = self._save_model_dir()\n keras.models.save_model(model, saved_model_dir, save_format=save_format)\n loaded = keras.models.load_model(saved_model_dir)\n self.assertIs(loaded.layers[1], loaded.layers[2].layer)", "def assert_models_equal(self, benchmark1, benchmark2):\n if (not isinstance(benchmark1, detection_comp.FeatureDetectionComparison) or\n not isinstance(benchmark2, detection_comp.FeatureDetectionComparison)):\n self.fail('object was not a FeatureDetectionComparison')\n self.assertEqual(benchmark1.identifier, benchmark2.identifier)\n self.assertEqual(benchmark1._acceptable_radius, benchmark2._acceptable_radius)", "def test_merge_add(self):\n input_tensor = Input(shape=(3,))\n x1 = Dense(4)(input_tensor)\n x2 = Dense(5)(x1)\n x3 = Dense(5)(x1)\n x4 = merge([x2, x3], mode=\"sum\")\n x5 = Dense(7)(x4)\n\n model = Model(input=[input_tensor], output=[x5])\n input_names = [\"data\"]\n output_names = [\"output\"]\n\n spec = keras.convert(model, input_names, output_names).get_spec()\n self.assertIsNotNone(spec)\n\n # Test the model class\n self.assertIsNotNone(spec.description)\n self.assertTrue(spec.HasField(\"neuralNetwork\"))\n\n # Test the inputs and outputs\n self.assertEqual(len(spec.description.input), len(input_names))\n self.assertEqual(\n sorted(input_names), sorted(map(lambda x: x.name, spec.description.input))\n )\n self.assertEqual(len(spec.description.output), len(output_names))\n self.assertEqual(\n sorted(output_names), sorted(map(lambda x: x.name, spec.description.output))\n )", "def keras_model():\n\n model = Sequential([\n Conv2D(8, (2, 2), input_shape=(16, 16, 3,)),\n BatchNormalization(momentum=.3, epsilon=.65),\n AvgPool2D(),\n MaxPool2D(),\n BatchNormalization(momentum=.4, epsilon=.25),\n Conv2D(4, (2, 2), activation=tf.nn.tanh, kernel_regularizer=tf.keras.regularizers.l2(0.5)),\n Flatten(),\n Dense(2, activation='softmax', name=\"keras_model\")])\n return model", "def compare_models(model1, model2):\n # at minimum, numbers of species and reactions should be the same\n if model1.n_species != model2.n_species:\n return False\n if model1.n_reactions != model2.n_reactions:\n return False\n\n for sp1, sp2 in zip(\n sorted(model1.species(), key=attrgetter('name')), \n sorted(model2.species(), key=attrgetter('name'))\n ):\n if sp1.name != sp2.name:\n return False\n\n if sp1.composition != sp2.composition:\n return False\n \n if sp1.thermo.n_coeffs == sp2.thermo.n_coeffs:\n if any(sp1.thermo.coeffs != sp2.thermo.coeffs):\n return False\n else:\n return False\n \n if hasattr(sp1, 'transport') or hasattr(sp2, 'transport'):\n if hasattr(sp1, 'transport') and hasattr(sp2, 'transport'):\n # iterate over transport parameters\n params = [a for a in dir(sp1.transport) \n if not a.startswith('__') and \n not callable(getattr(sp1.transport, a))\n ]\n for attr in params:\n if getattr(sp1.transport, attr) != getattr(sp2.transport, attr, 0.0):\n return False\n else:\n return False\n\n for rxn1, rxn2 in zip(\n sorted(model1.reactions(), key=attrgetter('equation')), \n sorted(model2.reactions(), key=attrgetter('equation'))\n ):\n if type(rxn1) != type(rxn2):\n return False\n \n if rxn1.reactants != rxn2.reactants:\n return False\n if rxn1.products != rxn2.products:\n return False\n\n if rxn1.duplicate != rxn2.duplicate:\n return False\n\n # Check rate parameters for elementary and third-body reactions\n if hasattr(rxn1, 'rate') or hasattr(rxn2, 'rate'):\n if hasattr(rxn1, 'rate') and hasattr(rxn2, 'rate'):\n if type(rxn1.rate) != type(rxn2.rate):\n return False\n if len(dir(rxn1.rate)) != len(dir(rxn2.rate)):\n return False\n params = [\n a for a in dir(rxn1.rate) \n if not a.startswith('__') and not callable(getattr(rxn1.rate, a))\n ]\n for attr in params:\n if getattr(rxn1.rate, attr) != getattr(rxn2.rate, attr, 0.0):\n return False\n else:\n return False\n \n # For falloff and chemically activated reactions, check low and high rates\n if hasattr(rxn1, 'low_rate') or hasattr(rxn2, 'low_rate'):\n if hasattr(rxn1, 'low_rate') and hasattr(rxn2, 'low_rate'):\n if type(rxn1.low_rate) != type(rxn2.low_rate):\n return False\n if len(dir(rxn1.low_rate)) != len(dir(rxn2.low_rate)):\n return False\n params = [\n a for a in dir(rxn1.low_rate) \n if not a.startswith('__') and not callable(getattr(rxn1.low_rate, a))\n ]\n for attr in params:\n if getattr(rxn1.low_rate, attr) != getattr(rxn2.low_rate, attr, 0.0):\n return False\n else:\n return False\n\n if hasattr(rxn1, 'high_rate') or hasattr(rxn2, 'high_rate'):\n if hasattr(rxn1, 'high_rate') and hasattr(rxn2, 'high_rate'):\n if type(rxn1.high_rate) != type(rxn2.high_rate):\n return False\n if len(dir(rxn1.high_rate)) != len(dir(rxn2.high_rate)):\n return False\n params = [\n a for a in dir(rxn1.high_rate) \n if not a.startswith('__') and not callable(getattr(rxn1.high_rate, a))\n ]\n for attr in params:\n if getattr(rxn1.high_rate, attr) != getattr(rxn2.high_rate, attr, 0.0):\n return False\n else:\n return False\n \n # check Plog rates\n if hasattr(rxn1, 'rates') or hasattr(rxn2, 'rates'):\n if hasattr(rxn1, 'rates') and hasattr(rxn2, 'rates'):\n if len(rxn1.rates) != len(rxn2.rates):\n return False\n for rate1, rate2 in zip(\n sorted(rxn1.rates, key=lambda rate: rate[0]),\n sorted(rxn2.rates, key=lambda rate: rate[0]),\n ):\n if not np.allclose(rate1[0], rate2[0]):\n return False\n params = ['activation_energy', 'pre_exponential_factor', 'temperature_exponent']\n for param in params:\n if getattr(rate1[1], param, 0.0) != getattr(rate2[1], param, 0.0):\n return False\n \n # check Chebyshev parameters\n if hasattr(rxn1, 'coeffs') or hasattr(rxn2, 'coeffs'):\n if hasattr(rxn1, 'coeffs') and hasattr(rxn2, 'coeffs'):\n if rxn1.nPressure != rxn2.nPressure:\n return False\n if rxn1.nTemperature != rxn2.nTemperature:\n return False\n if (rxn1.Pmax != rxn2.Pmax) or (rxn1.Pmin != rxn1.Pmin):\n return False\n if (rxn1.Tmax != rxn2.Tmax) or (rxn1.Tmin != rxn1.Tmin):\n return False\n if not np.allclose(rxn1.coeffs, rxn2.coeffs):\n return False\n \n # ensure matching default efficiencies, if present\n if getattr(rxn1, 'default_efficiency', 1.0) != getattr(rxn2, 'default_efficiency', 1.0):\n return False\n \n if hasattr(rxn1, 'efficiencies') or hasattr(rxn2, 'efficiencies'):\n if hasattr(rxn1, 'efficiencies') and hasattr(rxn2, 'efficiencies'):\n if rxn1.efficiencies != rxn2.efficiencies:\n return False\n else:\n return False\n \n # Check falloff parameters if any\n if hasattr(rxn1, 'falloff') or hasattr(rxn2, 'falloff') :\n if hasattr(rxn1, 'falloff') and hasattr(rxn2, 'falloff'):\n if len(rxn1.falloff.parameters) == len(rxn2.falloff.parameters):\n if any(rxn1.falloff.parameters != rxn2.falloff.parameters):\n return False\n else:\n return False\n else:\n return False\n\n return True", "def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n self._input_dims == other._input_dims and\n self._output_dims == other._output_dims)", "def assert_models_equal(self, benchmark1, benchmark2):\n if (not isinstance(benchmark1, ate.BenchmarkATE) or\n not isinstance(benchmark2, ate.BenchmarkATE)):\n self.fail('object was not a BenchmarkATE')\n self.assertEqual(benchmark1.identifier, benchmark2.identifier)\n self.assertEqual(benchmark1.offset, benchmark2.offset)\n self.assertEqual(benchmark1.max_difference, benchmark2.max_difference)\n self.assertEqual(benchmark1.scale, benchmark2.scale)", "def test_multi_output_metrics_name_stay_same(self, fit):\n # This doesn't work at all, so we can't check whether metric names are\n # correct.\n if not context.executing_eagerly() and not fit:\n self.skipTest('b/181767784')\n\n input_ = keras.Input((4,))\n model = keras.Model(\n input_,\n [keras.layers.Softmax(name='head_0')(keras.layers.Dense(3)(input_)),\n keras.layers.Softmax(name='head_1')(keras.layers.Dense(5)(input_))])\n metric = keras.metrics.BinaryAccuracy()\n model.compile(optimizer='rmsprop',\n loss='mse',\n metrics={'head_0': [metric, 'accuracy']})\n\n x = np.random.rand(2, 4)\n y = {'head_0': np.random.randint(2, size=(2, 3)),\n 'head_1': np.random.randint(2, size=(2, 5))}\n\n # Make sure metrix prefixing works the same regardless of whether the user\n # has fit the model before saving.\n if fit:\n model.fit(x, y, verbose=0)\n\n # Save and reload.\n save_format = testing_utils.get_save_format()\n saved_model_dir = self._save_model_dir()\n keras.models.save_model(model, saved_model_dir, save_format=save_format)\n loaded = keras.models.load_model(saved_model_dir)\n\n # Make sure the metrics names from the model before saving match the loaded\n # model.\n self.assertSequenceEqual(model.metrics_names, loaded.metrics_names)", "def test_bn_fold_find_layers_model_with_multi_input(self):\n\n input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))\n input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))\n x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a')(input1)\n x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b')(input2)\n x = tf.keras.layers.add([x1, x2])\n x = tf.keras.layers.Conv2D(4, (1, 1), name='conv2')(x)\n bn_op = tf.keras.layers.BatchNormalization(fused=True)(x)\n relu = tf.nn.relu(bn_op)\n model = tf.keras.Model(inputs=[input1, input2], outputs=relu)\n\n conv_bn_pairs, bn_conv_pairs, _ = _find_all_batch_norms_to_fold(model)\n assert 1 == len(conv_bn_pairs) + len(bn_conv_pairs)", "def merge_models(model_1, model_2, task=None):\n\n def _merge_models(model_1, model_2):\n\n result_model = copy.deepcopy(model_1)\n\n if isinstance(model_1, torch.nn.Embedding):\n\n result_model = _add_embedding_layer(model_1, model_2)\n\n elif isinstance(model_1, torch.nn.Linear):\n result_model = _add_linear_layer(model_1, model_2)\n\n elif isinstance(model_1, torch.nn.LayerNorm):\n result_model = _add_double_norm_layer(model_1, model_2)\n\n elif isinstance(model_1, BertSelfAttention):\n result_model = _add_bert_self_attention_layer(model_1, model_2)\n\n for name_1, name_2 in zip(model_1._modules, model_2._modules):\n module_1 = model_1._modules[name_1]\n module_2 = model_2._modules[name_2]\n\n result_model._modules[name_1] = _merge_models(module_1, module_2)\n\n return result_model\n\n result_model = _merge_models(model_1, model_2)\n\n result_model._text_field_embedder._token_embedders[\"tokens\"].output_dim = 1024\n\n if task == \"QA\":\n result_model._linear_layer = _add_final_linear_layer(\n model_1._linear_layer, model_2._linear_layer\n )\n else:\n result_model._classification_layer = _add_final_linear_layer(\n model_1._classification_layer, model_2._classification_layer\n )\n\n return result_model", "def test_copied_models_are_equal(dbdiskrepo):\n original = fit_model()\n\n shallow = copy(original)\n assert original.artifact.id == shallow.artifact.id\n assert original.artifact.value_id == shallow.artifact.value_id\n assert hash(original) == hash(shallow)\n\n deep = deepcopy(original)\n assert original.artifact.id == deep.artifact.id\n assert original.artifact.value_id == deep.artifact.value_id\n assert hash(original) == hash(deep)", "def demoModel(dim, num_classes):\n import numpy as np\n from keras.models import Sequential, Model\n from keras.layers import Input\n from keras.layers import Conv2D, ZeroPadding2D, MaxPooling2D, Conv2DTranspose, Cropping2D\n from keras.layers import concatenate, UpSampling2D, Reshape\n import keras.backend as K\n\n # Build model\n input_image = Input(shape=(dim, dim, 3))\n\n conv = Conv2D(24, (3, 3), activation='relu', padding='same')(input_image)\n\n pool = MaxPooling2D((2, 2), strides=(2, 2), name=\"pool\")(conv)\n\n conv1x1 = Conv2D(24, (1, 1), padding='same', activation='relu')(pool)\n\n up = UpSampling2D(size=(2,2))(conv1x1)\n up_conv = Conv2D(24, 2, activation = 'relu', padding = 'same')(up)\n merge = concatenate([conv,up_conv], axis = 3)\n\n conv = Conv2D(12, 3, activation = 'relu', padding = 'same')(merge)\n\n activation = Conv2D(num_classes, (1, 1), activation = \"softmax\")(conv)\n\n # need to reshape for training\n output = Reshape((dim*dim, 3))(activation)\n\n model = Model(inputs=[input_image], outputs=output)\n\n model.summary()\n\n return model", "def compare_models(model1,model2):\n\n # initialisation:\n n_radial = 0\n n_radial_numax = 0\n n_non_radial = 0\n n_non_radial_numax = 0\n result = np.zeros((6+nglb,),dtype=gtype)\n # define frequency interval around numax:\n numax = 0.5*(model1.numax/model1.glb[ifreq_ref] \\\n + model2.numax/model2.glb[ifreq_ref])\n a = 0.8*numax\n b = 1.2*numax\n\n # compare frequency spectra:\n size1 = len(model1.modes)\n size2 = len(model2.modes)\n i1 = i2 = 0\n while((i1 < size1) and (i2 < size2)):\n if (model1.modes['l'][i1] < model2.modes['l'][i2]): i1+=1; continue\n if (model1.modes['l'][i1] > model2.modes['l'][i2]): i2+=1; continue\n if (model1.modes['n'][i1] < model2.modes['n'][i2]): i1+=1; continue\n if (model1.modes['n'][i1] > model2.modes['n'][i2]): i2+=1; continue\n\n # now the two modes have the same n and l values:\n diff = abs(model1.modes['freq'][i1] - model2.modes['freq'][i2])\n avg_freq =(model1.modes['freq'][i1] + model2.modes['freq'][i2])/2.0\n if (model1.modes['l'][i1] == 0):\n if (result[0] < diff): result[0] = diff\n diff *= diff # square diff\n result[1] += diff\n n_radial += 1\n # in python, this is called an interval comparison:\n if (a <= avg_freq <= b):\n result[2] += diff\n n_radial_numax += 1\n else:\n if (result[3] < diff): result[3] = diff\n diff *= diff # square diff\n result[4] += diff\n n_non_radial += 1\n if (a <= avg_freq <= b):\n result[5] += diff\n n_non_radial_numax += 1\n i1+=1\n i2+=1\n\n # avoid divisions by zero:\n if (n_radial > 0):\n result[1] = math.sqrt(result[1]/float(n_radial))\n else:\n result[1] = np.nan\n\n if (n_radial_numax > 0):\n result[2] = math.sqrt(result[2]/float(n_radial_numax))\n else:\n result[2] = np.nan\n\n if (n_non_radial > 0):\n result[4] = math.sqrt(result[4]/float(n_non_radial))\n else:\n result[4] = np.nan\n\n if (n_non_radial_numax > 0):\n result[5] = math.sqrt(result[5]/float(n_non_radial_numax))\n else:\n result[5] = np.nan\n\n # absolute differences on global parameters:\n result[6:6+nglb] = np.absolute(model1.glb - model2.glb)\n\n return result", "def __eq__(self, other):\n if not isinstance(other, Model):\n return False\n return self.graph == other.graph", "def __eq__(self, other: 'ModelParameters') -> bool:\n if not isinstance(other, ModelParameters) or len(self) != len(other):\n return False\n else:\n return all(torch.equal(p_self, p_other) for p_self, p_other in zip(self.parameters, other.parameters))", "def sub_model_net(self):\r\n # define input\r\n x = keras.Input(shape=(960,), name='input')\r\n fc_2 = keras.layers.Dense(160, name='fc_2')(x)\r\n add_1 = keras.layers.Activation('relu')(fc_2)\r\n drop = keras.layers.Dropout(0.5)\r\n # output\r\n y_hat = keras.layers.Dense(1283, activation='softmax', name='output')(add_1)\r\n model = keras.Model(inputs=x, outputs=y_hat)\r\n\r\n return model", "def compare_thresholded_data_with_models(self):\n pass", "def two_var_medium_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.3):\n model = Sequential()\n model.add(Dense(32, input_dim=2, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(3)))\n model.add(Dense(16, kernel_initializer=init, activation='relu'))\n model.add(Dense(1))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def compare(model, input1, input2): \n patch_image_time_start = time.time()\n input1_patches = get_patches_non_overlap(input1, 48, 48)\n input2_patches = get_patches_non_overlap(input2, 48, 48)\n patch_image_time_end = time.time()\n compare_image_time_start = time.time()\n pred = model.predict([input1_patches, input2_patches])\n compare_image_time_end = time.time()\n\n result[\"patch_retrieval_time\"].append(patch_image_time_end - patch_image_time_start)\n result[\"image_comparison_time\"].append(compare_image_time_end - compare_image_time_start)\n\n return np.sum(pred)", "def test_keras_model_with_no_bias(self):\n\t\t\n\t\tfrom keras.models import Sequential\n\t\tfrom keras.layers import Dense, Activation\n\t\t\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(100, input_shape=(190,), use_bias=False))\n\t\tmodel.add(Activation(\"relu\"))\n\t\tmodel.add(Dense(10, use_bias=False))\n\t\tmodel.add(Activation('sigmoid'))\n\t\t\n\t\twatcher = ww.WeightWatcher(model=model)\n\t\tdetails = watcher.describe()\n\t\tprint(details)\n\t\tself.assertTrue(len(details)==2)\n\t\t\n\t\tdetails = watcher.analyze()\n\t\tprint(details)\n\t\tself.assertTrue(len(details)==2)\n\t\t\n\t\t\n\t\tdetails = watcher.analyze(min_evals=20)\n\t\tprint(details[['layer_id', 'M', 'num_evals']])\n\t\tself.assertTrue(len(details)==1)", "def NNModel(X_train, X_test, y_train, y_test ): \r\n model = keras.Sequential(\r\n [\r\n # meanify layers\r\n # keras.layers.Dense(25, activation='sigmoid' ),\r\n # keras.layers.Dense(10, activation='sigmoid' ),\r\n keras.layers.Dropout(0.01),\r\n keras.layers.Dense(25, activation='relu' ),\r\n keras.layers.Dropout(0.01),\r\n keras.layers.Dense(10, activation='relu' ),\r\n keras.layers.Dropout(0.01),\r\n keras.layers.Dense(5, activation='relu' ),\r\n keras.layers.Dense( 1, activation='sigmoid' )\r\n\r\n # conv layers\r\n # keras.layers.Dropout(0.2),\r\n # keras.layers.Dense(50, activation='relu', kernel_regularizer=keras.regularizers.l1_l2(l1=1e-2, l2=1e-2) ),\r\n # keras.layers.Dropout(0.2),\r\n # keras.layers.Dense(25, activation='relu', kernel_regularizer=keras.regularizers.l1_l2(l1=1e-2, l2=1e-2) ),\r\n # # keras.layers.Dropout(0.2),\r\n # keras.layers.Dense( 1, activation='relu', kernel_regularizer=keras.regularizers.l1_l2(l1=1e-2, l2=1e-2) )\r\n ]\r\n )\r\n\r\n model.compile(\r\n optimizer='rmsprop', \r\n loss=keras.losses.BinaryCrossentropy(),\r\n metrics=['accuracy']\r\n )\r\n\r\n\r\n # tinker with the epoch\r\n epoch = 500\r\n history = model.fit(X_train, y_train, validation_split=0.1, shuffle=True, epochs=epoch)\r\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2) # two axes on figure\r\n \r\n ax1.plot(history.history['accuracy'])\r\n ax1.plot(history.history['val_accuracy'])\r\n ax1.set(xlabel='epoch', ylabel='accuracy')\r\n ax1.legend(['test accuracy', 'validation accuracy'])\r\n \r\n ax2.plot(history.history['loss'])\r\n ax2.plot(history.history['val_loss'])\r\n ax2.set(xlabel='epoch', ylabel='loss')\r\n ax2.legend(['test loss', 'validation loss'])\r\n\r\n print(model.summary()) \r\n plt.show()\r\n \r\n # model save paths for meanify and conv\r\n savedModelPath = {\r\n 'meanify': 'saved-models-meanify',\r\n 'conv': 'saved-models-conv'\r\n }\r\n # keras.models.save_model(\r\n # model=model, \r\n # save_format='tf', \r\n\r\n # # write the file path, 'conv' vs 'meanify'\r\n # # filepath=savedModelPath['conv']\r\n # filepath=savedModelPath['meanify']\r\n # )\r\n return model.evaluate(X_test, y_test )", "def keras_model_oh(X_train, y_train, max_epochs=20, batch_size=16, train_size=0.85):\n num_classes = len(np.unique(y_train))\n num_features = X_train.shape[1]\n\n print(\"Building model...\")\n\n model = Sequential()\n model.add(Dropout(0.05))\n\n model.add(Dense(num_features, 1024, init='glorot_uniform'))\n model.add(PReLU((1024,)))\n model.add(BatchNormalization((1024,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(1024, 512, init='glorot_uniform'))\n model.add(PReLU((512,)))\n model.add(BatchNormalization((512,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(512, 256, init='glorot_uniform'))\n model.add(PReLU((256,)))\n model.add(BatchNormalization((256,)))\n model.add(Dropout(0.5))\n\n model.add(Dense(256, num_classes, init='glorot_uniform'))\n model.add(Activation('softmax'))\n\n sgd = SGD(lr=0.1, decay=1e-6, momentum=0.1, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer='adam')\n\n print(\"Training model...\")\n X = X_train\n y = np_utils.to_categorical(y_train)\n history = model.fit(X, y, nb_epoch=max_epochs, batch_size=batch_size, verbose=2, validation_split=1-train_size, show_accuracy=True)\n\n return model, history", "def compare_models(model1, model2, rtol=1e-7, atol=0., check_inverse=True):\n\n from astropy.modeling import Model\n from numpy.testing import assert_allclose\n\n if not (isinstance(model1, Model) and isinstance(model2, Model)):\n raise TypeError('Inputs must be Model instances')\n\n if model1 is model2:\n return\n\n # Require each model to be composed of same number of constituent models:\n assert model1.n_submodels == model2.n_submodels\n\n # Treat everything like an iterable compound model:\n if model1.n_submodels == 1:\n model1 = [model1]\n model2 = [model2]\n\n # Compare the constituent model definitions:\n for m1, m2 in zip(model1, model2):\n assert type(m1) == type(m2)\n assert len(m1.parameters) == len(m2.parameters)\n # NB. For 1D models the degrees match if the numbers of parameters do\n if hasattr(m1, 'x_degree'):\n assert m1.x_degree == m2.x_degree\n if hasattr(m1, 'y_degree'):\n assert m1.y_degree == m2.y_degree\n if hasattr(m1, 'domain'):\n assert m1.domain == m2.domain\n if hasattr(m1, 'x_domain'):\n assert m1.x_domain == m2.x_domain\n if hasattr(m1, 'y_domain'):\n assert m1.y_domain == m2.y_domain\n\n # Compare the model parameters (coefficients):\n assert_allclose(model1.parameters, model2.parameters, rtol=rtol, atol=atol)\n\n # Now check for any inverse models and require them both to have the same\n # type or be undefined:\n try:\n inverse1 = model1.inverse\n except NotImplementedError:\n inverse1 = None\n try:\n inverse2 = model2.inverse\n except NotImplementedError:\n inverse2 = None\n\n assert type(inverse1) == type(inverse2)\n\n # Compare inverses only if they exist and are not the forward model itself:\n if inverse1 is None or (inverse1 is model1 and inverse2 is model2):\n check_inverse = False\n\n # Recurse over the inverse models (but not their inverses in turn):\n if check_inverse:\n compare_models(inverse1, inverse2, rtol=rtol, atol=atol,\n check_inverse=False)", "def build_model(self):\n if not os.path.isdir(os.path.join(self.save_dir, self.name)):\n os.mkdir(os.path.join(self.save_dir, self.name))\n self.fitted = False\n else:\n self.fitted = True\n \n if self.hidden_ratio != 1.0:\n hidden_dim_A = int(self.dimension_A * self.hidden_ratio)\n hidden_dim_V = int(self.dimension_V * self.hidden_ratio)\n hidden_dim = int((self.dimension_A + self.dimension_V) * self.hidden_ratio / 4)\n else:\n hidden_dim_A = int(self.dimension_A * 0.75)\n hidden_dim_V = int(self.dimension_V * 0.75)\n hidden_dim = int((self.dimension_A + self.dimension_V) * 0.5)\n\n input_data_A = Input(shape=(self.dimension_A, ), name='audio_input')\n input_data_V = Input(shape=(self.dimension_V, ), name='video_input')\n encoded_input = Input(shape=(hidden_dim, ))\n \n encoded_A = Dense(hidden_dim_A, \n activation='relu', kernel_initializer='he_uniform', \n name='audio_encoded')(input_data_A)\n encoded_V = Dense(hidden_dim_V, \n activation='relu', kernel_initializer='he_uniform', \n name='video_encoded')(input_data_V)\n\n shared = Concatenate(axis=1, name='concat')([encoded_A, encoded_V])\n if self.sparse:\n encoded = Dense(hidden_dim, \n activation='relu',\n activity_regularizer=self.sparse_regularizer,\n kernel_initializer='he_uniform', \n name='shared_repres')(shared)\n else:\n encoded = Dense(hidden_dim, \n activation='relu',\n kernel_initializer='he_uniform', \n name='shared_repres')(shared)\n \n decoded_A = Dense(hidden_dim_A, \n activation='relu', kernel_initializer='he_uniform', \n name='audio_decoded')(encoded)\n decoded_V = Dense(hidden_dim_V, \n activation='relu', kernel_initializer='he_uniform', \n name='video_decoded')(encoded)\n\n decoded_A = Dense(self.dimension_A, activation='linear',\n name='audio_recon')(decoded_A)\n decoded_V = Dense(self.dimension_V, activation='linear',\n name='video_recon')(decoded_V)\n\n self.autoencoder = Model(inputs=[input_data_A, input_data_V], outputs=[decoded_A, decoded_V])\n self.encoder = Model(inputs=[input_data_A, input_data_V], outputs=encoded)\n self.decoder_A = Model(inputs=encoded_input, \n outputs=self.autoencoder.get_layer('audio_recon')(\n self.autoencoder.get_layer('audio_decoded')(\n encoded_input)))\n self.decoder_V = Model(inputs=encoded_input, \n outputs=self.autoencoder.get_layer('video_recon')(\n self.autoencoder.get_layer('video_decoded')(\n encoded_input)))\n\n # configure model\n self.autoencoder.compile(optimizer='adam', \n loss='mse',\n metrics=[metrics.mse, metrics.mse],\n loss_weights=[0.5, 0.5])\n print(\"--\" * 20)\n print(\"autoencoder\")\n print(self.autoencoder.summary())\n print(\"--\" * 20)\n print(\"encoder\")\n print(self.encoder.summary())\n print(\"--\" * 20)\n print(\"decoder (A)\")\n print(self.decoder_A.summary())\n print(\"--\" * 20)\n print(\"decoder (V)\")\n print(self.decoder_V.summary())\n print(\"--\" * 20)\n\n plot_model(self.autoencoder, show_shapes=True, to_file=os.path.join(self.save_dir, self.name, 'bimodal_DDAE.png'))", "def validate_model_with_params(self, model_params: dict) -> None:\n\n # init model\n model = CrossAttentionTransformerEncoder(**model_params)\n\n # init random sequences that don't exceed max sequences length\n seq_a_len = random.randint(0, model_params[\"max_seq_len_a\"])\n seq_b_len = random.randint(0, model_params[\"max_seq_len_b\"])\n batch_size = random.randint(1, 10)\n s1 = torch.randint(0, model_params[\"num_tokens_a\"], (batch_size, seq_a_len))\n s2 = torch.randint(0, model_params[\"num_tokens_b\"], (batch_size, seq_b_len))\n\n # processing sample\n output = model(s1, s2)\n\n # validation\n assert output.shape[0] == batch_size\n if output[:, 0].shape[1] != model_params[\"output_dim\"]:\n raise Exception(\n f\"Expected output dimension to be {model_params['output_dim']}, but got: {output.shape[1]}. used model parameters: {model_params}.\"\n )", "def test_noise_models_not_equal(self):\n error = pauli_error([['X', 1]])\n\n model1 = NoiseModel()\n model1.add_all_qubit_quantum_error(error, ['u3'], False)\n\n model2 = NoiseModel(basis_gates=['u3', 'cx'])\n model2.add_all_qubit_quantum_error(error, ['u3'], False)", "def test_DP_used():\n # should pass aftyer model compiled **and** fitted with DP optimizer\n model1, X, y, Xval, yval = make_small_model(num_hidden_layers=1)\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model1.compile(loss=loss)\n dp_used, msg = safekeras.check_DP_used(model1.optimizer)\n assert dp_used is False\n model1.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n dp_used, msg = safekeras.check_DP_used(model1.optimizer)\n assert dp_used is True\n\n # this model gets changed to non-DP by calling the superclass compile()\n # so should fail all the checks\n model2, _, _, _, _ = make_small_model(num_hidden_layers=1)\n super(SafeKerasModel, model2).compile(loss=loss, optimizer=\"SGD\")\n model2.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n dp_used, msg = safekeras.check_DP_used(model2.optimizer)\n assert dp_used is False, msg", "def create_org_model( width=28, \r\n height=28, channel=1, verbose=True,epochs=10):\r\n input1 = Input(\r\n shape=(\r\n width,\r\n height,\r\n channel,\r\n ), name='concat_input')\r\n conv1 = Conv2D(32, kernel_size=5, activation='relu', padding='same')\r\n conv2 = Conv2D(32, kernel_size=5, activation='relu', padding='same')\r\n conv3 = Conv2D(64, kernel_size=3, activation='relu', padding='same')\r\n conv4 = Conv2D(64, kernel_size=3, activation='relu', padding='same')\r\n dense1 = Dense(256, activation='relu')\r\n predict = Dense(10, activation='softmax')\r\n\r\n conv1o = conv1(input1)\r\n conv2o = conv2(conv1o)\r\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv2o)\r\n drop1 = Dropout(.25)(pool1)\r\n conv3o = conv3(drop1)\r\n conv4o = conv4(conv3o)\r\n pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2,2))(conv4o)\r\n drop2 = Dropout(.25)(pool2)\r\n drop2f = Flatten()(drop2)\r\n fc1 = dense1(drop2f)\r\n softmax1 = predict(fc1)\r\n\r\n drop2_2 = Input(shape=(7,7,64), name='concat_input') \r\n drop2f_2 = Flatten()(drop2_2)\r\n fc1_2 = dense1(drop2f_2)\r\n softmax1_2 = predict(fc1_2)\r\n\r\n mlp = Model(input1, softmax1)\r\n optimizer = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\r\n mlp.compile(\r\n loss='sparse_categorical_crossentropy',\r\n optimizer=optimizer,\r\n metrics=['accuracy'])\r\n\r\n\r\n mlp.load_weights(model_dir+'complete_model.h5')\r\n\r\n for layer in mlp.layers:\r\n layer.trainable = False\r\n\r\n feature_model = Model(input1, drop2)\r\n predict_model = Model(drop2_2, softmax1_2)\r\n\r\n return feature_model, predict_model, mlp", "def creat_model_dcca(layer_sizes1, layer_sizes2, input_size1, input_size2, learning_rate, reg_par, outdim_size, use_all_singular_values, beta):\n view1_model = build_mlp_net(layer_sizes1, input_size1, reg_par)\n view2_model = build_mlp_net(layer_sizes2, input_size2, reg_par)\n in_a = Input(shape=(input_size1,))\n in_b = Input(shape=(input_size2,))\n in_c = Input(shape=(1,))\n out_a= view1_model(in_a)\n out_b= view2_model(in_b)\n concat1 = Lambda(myconcat1)([out_a,out_b])\n concat2 = Lambda(myconcat1)([concat1,in_c])\n model = Model([in_a, in_b, in_c], concat2, name='all_model')\n model_optimizer = RMSprop(lr=0.001)\n model.compile(loss=cca_loss(outdim_size, True, 200, True, beta), optimizer=model_optimizer)\n return model", "def _optimization(dataset1, dataset2, nb_epochs=3000):\n\n x1_mean = dataset1['data'].mean()\n x1_std = dataset1['data'].std()\n x1 = (dataset1['data'] - x1_mean) / (x1_std)\n y1 = dataset1['labels']\n Y1 = dataset1['hot_labels']\n\n x2_mean = dataset2['data'].mean()\n x2_std = dataset2['data'].std()\n x2 = (dataset2['data'] - x2_mean) / (x2_std)\n\n x_model1 = Input(x1.shape[1:])\n y_model1 = Dropout(0.1)(x_model1)\n y_model1 = Dense(50, activation='relu')(x_model1)\n y_model1 = Dropout(0.2)(y_model1)\n y_model1 = Dense(50, activation='relu')(y_model1)\n out_model1 = Dense(len(np.unique(y1)), activation='softmax')(y_model1)\n\n model1 = Model(input=x_model1, output=out_model1)\n\n optimizer = keras.optimizers.Adadelta()\n model1.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=200, min_lr=0.1)\n\n hist = model1.fit(x1, Y1, batch_size=x1.shape[0], nb_epoch=nb_epochs, verbose=1, shuffle=True, callbacks=[reduce_lr])\n\n dataset2_new_labels = []\n\n for i in range(x2.shape[0]):\n xTrain = x2[i,:].reshape((1,x2.shape[1]))\n dataset2_new_labels.append(np.argmax(model1.predict(xTrain, batch_size=1)))\n\n # Print the testing results which has the l in range(x_train.shape[0]):\n # for i in range(len(x_test1)):\n # xTest = x_test1[i,:].reshape((1,2048))\n # print((np.argmax(model.predict(xTest, batch_size=1)), y_test1[i]))\n # log = pd.DataFrame(hist.history)\n # print(\"saving results for 100 nodes\" + _MODE + fname)\n # log.to_json('accuracies/accuracy_100_' + _MODE + fname + '.json')\n\n # with open('Text_Files/' + fname + '_results.txt', 'w') as text_file:\n # text_file.write(fname + '<<<=====>>>' + str(max(log.val_acc.values)))\n\n # assert 2==1\n\n x_model1 = []\n y_model1 = []\n out_model1 = []\n model1 = []\n\n return dataset2_new_labels", "def construct_model():\n # model = Sequential()\n # model.add(Dense(units=64, activation='relu', input_dim=100))\n # model.add(Dense(units=10, activation='softmax'))\n # model.compile(loss='categorical_crossentropy',\n # optimizer='sgd',\n # metrics=['accuracy'])\n # return model\n\n model = Sequential()\n # Input Layer\n model.add(Conv2D(64, 3, data_format='channels_last', activation='relu', padding='same',\n input_shape=(img_width, img_height, 3)))\n model.add(MaxPool2D(pool_size=2, strides=2))\n # Hidden Layer 1\n model.add(Conv2D(64, 3, activation='relu', padding='same'))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 2\n model.add(Conv2D(128, 3, activation='relu', padding='same'))\n model.add(Conv2D(128, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 3\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n\n # Fully Connected Layer\n model.add(Flatten())\n # 512 Neuron Layer\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n # Output Layer\n model.add(Dense(num_of_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model", "def assert_models_equal(self, benchmark1, benchmark2):\n if (not isinstance(benchmark1, detection_comp.FeatureDetectionComparisonResult) or\n not isinstance(benchmark2, detection_comp.FeatureDetectionComparisonResult)):\n self.fail('object was not a FeatureDetectionComparisonResult')\n self.assertEqual(benchmark1.identifier, benchmark2.identifier)\n self.assertEqual(benchmark1.success, benchmark2.success)\n self.assertEqual(benchmark1.benchmark, benchmark2.benchmark)\n self.assertEqual(benchmark1.trial_result, benchmark2.trial_result)\n self.assertEqual(benchmark1.reference_trial_result, benchmark2.reference_trial_result)\n self.assertEqual(benchmark1._feature_changes, benchmark2._feature_changes)\n self.assertEqual(benchmark1._changes_id, benchmark2._changes_id)", "def test_num_layers_with_model(self):\n\t\tdetails = self.watcher.describe(model=self.model)\n\t\tprint(\"Testing Keras on VGG16\")\n\t\tself.assertEqual(len(details), 16)", "def testShapesSame(self, use_bias):\n\n batch_size = random.randint(1, 100)\n in_height = random.randint(10, 288)\n in_width = random.randint(10, 288)\n in_channels = random.randint(1, 10)\n out_channels = random.randint(1, 32)\n kernel_shape_h = random.randint(1, 11)\n kernel_shape_w = random.randint(1, 11)\n\n inputs = tf.placeholder(\n tf.float32,\n shape=[batch_size, in_height, in_width, in_channels])\n\n conv1 = snt.Conv2D(\n name=\"conv1\",\n output_channels=out_channels,\n kernel_shape=[kernel_shape_h, kernel_shape_w],\n padding=snt.SAME,\n stride=1,\n use_bias=use_bias)\n\n output = conv1(inputs)\n\n self.assertTrue(\n output.get_shape().is_compatible_with(\n [batch_size, in_height, in_width, out_channels]))\n\n self.assertTrue(\n conv1.w.get_shape().is_compatible_with(\n [kernel_shape_h, kernel_shape_w, in_channels, out_channels]))\n\n if use_bias:\n self.assertTrue(\n conv1.b.get_shape().is_compatible_with(\n [out_channels]))", "def test_instance_equality(self):\n class EqualityModel(Model):\n pk = columns.Integer(primary_key=True)\n\n m0 = EqualityModel(pk=0)\n m1 = EqualityModel(pk=1)\n\n self.assertEqual(m0, m0)\n self.assertNotEqual(m0, m1)", "def multiple_input_model():\n\n input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))\n input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))\n x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a')(input1)\n x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b')(input2)\n x = tf.keras.layers.add([x1, x2])\n x = tf.keras.layers.Conv2D(4, (1, 1), name='conv2')(x)\n x = tf.keras.layers.Flatten()(x)\n outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name=\"multiple_input_model\")(x)\n\n return outputs", "def test_instance_equality(self):\r\n class EqualityModel(Model):\r\n pk = columns.Integer(primary_key=True)\r\n\r\n m0 = EqualityModel(pk=0)\r\n m1 = EqualityModel(pk=1)\r\n\r\n self.assertEqual(m0, m0)\r\n self.assertNotEqual(m0, m1)", "def medium2_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.25):\n model = Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n model.add(Dense(32, input_dim=12, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(4)))\n model.add(Dropout(dropout))\n model.add(Dense(32, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(4)))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def keras_model_fn(model_config, vocab_size, embedding_size, embeddings):\n ## hyperparams\n model_name = model_config['model_name']\n num_class = model_config['num_class']\n lstm_hs = model_config['lstm_hs']\n gru_hs = model_config['gru_hs']\n learning_rate = model_config['learning_rate']\n \n ## build model - , weights=[embeddings[1]]\n inputs = ks.Input(shape=(None,), dtype='int32', name='inputs')\n embedded_sequences_ft1 = layers.Embedding(vocab_size, embedding_size, trainable = True, mask_zero = False)(inputs)\n embedded_sequences_ft2 = layers.Embedding(vocab_size, embedding_size, trainable = True, mask_zero = False)(inputs)\n concat_embed = layers.concatenate([embedded_sequences_ft1 ,embedded_sequences_ft2])\n concat_embed = layers.SpatialDropout1D(0.5)(concat_embed)\n x = layers.Bidirectional(layers.CuDNNLSTM(lstm_hs, return_sequences = True))(concat_embed)\n x, x_h, x_c = layers.Bidirectional(layers.CuDNNGRU(gru_hs, return_sequences = True, return_state = True))(x)\n x_1 = layers.GlobalMaxPool1D()(x)\n x_2 = layers.GlobalAvgPool1D()(x)\n x_out = layers.concatenate([x_1 ,x_2, x_h])\n x_out = layers.BatchNormalization()(x_out)\n outputs = layers.Dense(num_class, activation = 'softmax', name = 'outputs')(x_out) # outputs\n model = ks.Model(inputs, outputs, name = model_name)\n \n ## compile\n model.compile(loss = 'categorical_crossentropy', \n optimizer = ks.optimizers.Adam(lr=learning_rate, clipnorm=.25, beta_1=0.7, beta_2=0.99), \n metrics = ['categorical_accuracy', ks.metrics.TopKCategoricalAccuracy(k=3)]) # metric what?\n return model", "def testKerasModel(self):\n input_data = {\"x\": constant_op.constant(1., shape=[1, 1])}\n\n # Create a simple Keras model.\n x = [-1, 0, 1, 2, 3, 4]\n y = [-3, -1, 1, 3, 5, 7]\n\n model = keras.models.Sequential(\n [keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer=\"sgd\", loss=\"mean_squared_error\")\n model.fit(x, y, epochs=1)\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=[1, 1], dtype=dtypes.float32)\n ])\n def to_save(x):\n return model(x)\n\n root, output_func = self._freezeModel(to_save)\n self._testConvertedFunction(root, root.f, output_func, input_data)", "def check_if_model_is_valid(self, num_inputs, num_outputs):\n if not self.is_model_init:\n self.init_model(num_inputs, num_outputs)\n else:\n if (not self.number_outputs == num_outputs) or (not self.number_inputs == num_inputs):\n self.init_model(num_inputs, num_outputs)", "def __eq__(self, other):\n return (self.app_id == other.app_id and\n dict.__eq__(self._model_sigs, other._model_sigs))", "def check_model(expected_model, actual_model):\n assert (expected_model == actual_model), \\\n \"Not Compare model: Expected model:\\n {0}\\nActual model:\\n {1}\".format(expected_model, actual_model)", "def __eq__(self, other):\n if not isinstance(other, LookmlModel):\n return False\n\n return self.__dict__ == other.__dict__", "def init_two_layer_model(input_size, hidden_size, output_size):\n # initialize a model\n model = {}\n model['W1'] = 0.00001 * np.random.randn(input_size, hidden_size)\n model['b1'] = np.zeros(hidden_size)\n model['W2'] = 0.00001 * np.random.randn(hidden_size, output_size)\n model['b2'] = np.zeros(output_size)\n return model", "def build_model(model_id1='bert-base-multilingual-cased',\n model_id2='bert-base-multilingual-uncased',\n max_len=192, dropout=0.2,\n **_):\n print(model_id1, model_id2)\n\n transformer1 = TFAutoModel.from_pretrained(model_id1)\n transformer2 = TFAutoModel.from_pretrained(model_id2)\n\n input_word_ids1 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids1\")\n out1 = transformer1(input_word_ids1)\n\n input_word_ids2 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids2\")\n out2 = transformer2(input_word_ids2)\n\n sequence_output1 = out1[0]\n sequence_output2 = out2[0]\n cls_token1 = sequence_output1[:, 0, :]\n cls_token2 = sequence_output2[:, 0, :]\n\n x = Dropout(dropout)(cls_token1) + Dropout(dropout)(cls_token2)\n out = Dense(1, activation='sigmoid')(x)\n\n model = Model(inputs=[input_word_ids1, input_word_ids2], outputs=out)\n\n return model", "def __matmul__(self, other: 'ModelParameters') -> 'ModelParameters':\n raise NotImplementedError()", "def baseline_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = keras.models.Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n model.add(Dense(12, input_dim=12, kernel_initializer=init, activation='relu'))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def L2X(train = True):\n print('Loading dataset...') \n x_train, y_train, x_val, y_val, id_to_word = load_data()\n #pred_train = np.load('data/pred_train.npy')\n #pred_val = np.load('data/pred_val.npy') \n print('Creating model...')\n\n # P(S|X)\n with tf.variable_scope('selection_model'):\n X_ph = Input(shape=(maxlen,), dtype='int32')\n\n logits_T_grp = construct_gumbel_selector(X_ph, max_features, embedding_dims, maxlen) # bs, max_len * num_groups\n tau = 0.5 \n T = Sample_Concrete(tau, k, num_feature=maxlen, num_groups=num_groups)(logits_T_grp)\n\n T = Reshape((maxlen, num_groups))(T)\n T = Permute((2, 1))(T) # bs, num_groups, max_len\n\n # q(X_S)\n with tf.variable_scope('prediction_model'):\n emb2 = Embedding(max_features, embedding_dims, \n input_length=maxlen)(X_ph)\n # emb2 bs, max_len, 50\n # apply the matrix trick as before\n # here the output size of matmul layer is different from before\n net = matmul_layer([T, emb2]) # bs, num_groups, 50\n #print(net.shape)\n net = Conv1D(1, 1, padding='same', activation=None, strides=1, name = 'merge_channel')(net) # bs, num_groups, 1\n\n # net = Mean(net) # bs, 50\n input_group = Flatten()(net) # bs, num_groups\n # num_groups = K.int_shape(input_group)[1]\n # here we add instance wise f-s again!!!!\n net = Dense(100, activation='relu', name = 's/dense1',\n kernel_regularizer=regularizers.l2(1e-3))(input_group)\n net = Dense(100, activation='relu', name = 's/dense2',\n kernel_regularizer=regularizers.l2(1e-3))(net)\n logits = Dense(num_groups)(net)\n\n\n\n\n # A tensor of shape, [batch_size, max_sents, 100]\n samples = Sample_Concrete_Original(tau, num_vital_group, name='group_importance')(logits)\n new_input_group = Multiply()([input_group, samples]) \n\n\n\n net = Dense(hidden_dims, activation='relu')(new_input_group)\n preds = Dense(2, activation='softmax', \n name = 'new_dense')(net)\n\n\n model = Model(inputs=X_ph, \n outputs=preds)\n model.summary()\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',#optimizer,\n metrics=['acc']) \n #train_acc = np.mean(np.argmax(pred_train, axis = 1)==np.argmax(y_train, axis = 1))\n #val_acc = np.mean(np.argmax(pred_val, axis = 1)==np.argmax(y_val, axis = 1))\n #print('The train and validation accuracy of the original model is {} and {}'.format(train_acc, val_acc))\n\n if train:\n filepath=\"models/l2x.hdf5\"\n checkpoint = ModelCheckpoint(filepath, monitor='val_acc', \n verbose=1, save_best_only=True, mode='max')\n callbacks_list = [checkpoint] \n st = time.time()\n model.fit(x_train, y_train, \n validation_data=(x_val, y_val), \n callbacks = callbacks_list,\n epochs=epochs, batch_size=batch_size)\n duration = time.time() - st\n print('Training time is {}'.format(duration)) \n\n model.load_weights('models/l2x.hdf5', by_name=True) \n\n pred_model = Model(X_ph, [T, samples]) \n pred_model.summary()\n pred_model.compile(loss='categorical_crossentropy', \n optimizer='adam', metrics=['acc']) \n\n st = time.time()\n #scores = pred_model.predict(x_val, \n # verbose = 1, batch_size = batch_size)[:,:,0] \n #scores = np.reshape(scores, [scores.shape[0], maxlen])\n scores_t, group_importances_t = pred_model.predict(x_train, verbose = 1, batch_size = batch_size)\n scores_v, group_importances_v = pred_model.predict(x_val, verbose = 1, batch_size = batch_size)\n return scores_t, group_importances_t, scores_v, group_importances_v, x_val", "def test_torch_train_original_layer(self):\n model = self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n analog_model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n self.set_weights_from_digital_model(analog_model, model)\n\n loss_func = mse_loss\n y_b = randn(3, 3, 5)\n x_b = randn(3, 2, 4)\n\n if self.use_cuda:\n y_b = y_b.cuda()\n x_b = x_b.cuda()\n\n self.train_model(model, loss_func, x_b, y_b)\n self.train_model(analog_model, loss_func, x_b, y_b)\n\n weight, bias = self.get_weights_from_digital_model(analog_model, model)\n\n weight_analog, bias_analog = analog_model.analog_tile.get_weights(realistic=False)\n\n self.assertTensorAlmostEqual(weight_analog, weight)\n if analog_model.use_bias:\n self.assertTensorAlmostEqual(bias_analog, bias)", "def get_model(point_cloud, is_training, bn_decay=None):\n #print(point_cloud.shape())\n batch_size = point_cloud.get_shape()[0].value\n BLOCK_SIZE1 = point_cloud.get_shape()[1].value\n BLOCK_SIZE2 = point_cloud.get_shape()[2].value\n \n #print batch_size, num_point, dim_point \n pixel_points = point_cloud[:, :, :, :2]\n\n input_image = point_cloud\n net1 = tf_util.conv2d(input_image, 128, [1, 1], # 3 is replaced by two \n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv11', bn_decay=bn_decay)\n #### Net1 \n net1 = tf_util.conv2d(net1, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv12', bn_decay=bn_decay)\n \n\n net1 = tf_util.max_pool2d(net1, [4,4], stride=[4,4], \n padding='VALID', scope='maxpool12') \n\n net1 = tf_util.conv2d(net1, 64, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv15', bn_decay=bn_decay) \n net1 = tf_util.conv2d(net1, 64, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv16', bn_decay=bn_decay)\n\n\n #### Net2 \n\n\n net2 = tf_util.conv2d(input_image, 128, [1, 1], # 3 is replaced by two \n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv21', bn_decay=bn_decay)\n net2 = tf_util.conv2d(net2, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv22', bn_decay=bn_decay)\n \n\n net2 = tf_util.max_pool2d(net2, [4,4], stride=[4,4], \n padding='VALID', scope='maxpool22') \n net2 = tf_util.conv2d(net2, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv25', bn_decay=bn_decay) \n net2 = tf_util.conv2d(net2, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv26', bn_decay=bn_decay)\n\n\n #### Net3 \n\n\n net3 = tf_util.conv2d(input_image, 128, [1, 1], # 3 is replaced by two \n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv31', bn_decay=bn_decay)\n net3 = tf_util.conv2d(net3, 256, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv32', bn_decay=bn_decay)\n \n\n net3 = tf_util.max_pool2d(net3, [4,4], stride=[4,4], \n padding='VALID', scope='maxpool32') \n net3 = tf_util.conv2d(net3, 256, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv35', bn_decay=bn_decay) \n net3 = tf_util.conv2d(net3, 512, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv36', bn_decay=bn_decay)\n\n\n\n\n net1 = tf_util.max_pool2d(net1, [2,2], stride=[2,2], \n padding='VALID', scope='maxpool3') \n\n #### Concatenation of Net1, Net2, Net3 \n\n net2 = tf_util.max_pool2d(net2, [4,4], stride=[4,4], \n padding='VALID', scope='maxpool4') \n #print net2.shape \n\n net3 = tf_util.max_pool2d(net3, [8,8], stride=[1,1], \n padding='VALID', scope='maxpool5') \n\n net1 = tf.reshape(net1, [batch_size, -1])\n net2 = tf.reshape(net2, [batch_size, -1])\n net3 = tf.reshape(net3, [batch_size, -1])\n net = tf.concat([net1, net2, net3], 1)\n \n\n #### Fully Connected Layers - DropOut --- Bigger Version \n\n #### Try with smaller Network, i.e, 1024 parameters \n\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training,\n scope='fc1', bn_decay=bn_decay)\n net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,\n scope='dp1')\n\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training,\n scope='fc2', bn_decay=bn_decay)\n net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,\n scope='dp2')\n\n nett = tf_util.fully_connected(net, 40, bn=True, is_training=is_training,\n scope='fc5', bn_decay=bn_decay)\n netr = tf_util.fully_connected(net, 40, bn=True, is_training=is_training,\n scope='fc6', bn_decay=bn_decay)\n \n\n nett = tf_util.fully_connected(nett, 3, activation_fn=None, scope='fc7')\n netr = tf_util.fully_connected(netr, 4, activation_fn=None, scope='fc8')\n\n net = tf.concat([nett, netr], 1)\n\n return net, pixel_points # Note that pixel co-ordinates are not used directly during the training ", "def test_torch_train_original_layer(self):\n model = self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n analog_model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n self.set_weights_from_digital_model(analog_model, model)\n\n loss_func = mse_loss\n y_b = randn(3, 3, 5, 5)\n x_b = randn(3, 2, 4, 4)\n\n if self.use_cuda:\n y_b = y_b.cuda()\n x_b = x_b.cuda()\n\n self.train_model(model, loss_func, x_b, y_b)\n self.train_model(analog_model, loss_func, x_b, y_b)\n\n weight, bias = self.get_weights_from_digital_model(analog_model, model)\n\n weight_analog, bias_analog = analog_model.analog_tile.get_weights(realistic=False)\n\n self.assertTensorAlmostEqual(weight_analog, weight)\n if analog_model.use_bias:\n self.assertTensorAlmostEqual(bias_analog, bias)", "def verifyModels(self):\r\n\r\n #\r\n # now check that all models have the same poly data in the\r\n # model node as in the display node\r\n #\r\n polyDataInScene = []\r\n fileNamesInScene = []\r\n success = True\r\n numModels = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" )\r\n for n in range(numModels):\r\n modelNode = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelNode\" )\r\n polyDataInScene.append(modelNode.GetPolyData())\r\n for dn in range(modelNode.GetNumberOfDisplayNodes()):\r\n displayNode = modelNode.GetNthDisplayNode(dn)\r\n if modelNode.GetPolyData() != displayNode.GetInputPolyData():\r\n self.delayDisplay(\"Model %d does not match its display node %d! (name: %s, ids: %s and %s)\" % (n,dn,modelNode.GetName(), modelNode.GetID(),displayNode.GetID()))\r\n success = False\r\n for sn in range(modelNode.GetNumberOfStorageNodes()):\r\n storageNode = modelNode.GetNthStorageNode(sn)\r\n fileName = storageNode.GetFileName()\r\n fileNamesInScene.append(fileName)\r\n if fileName in fileNamesInScene:\r\n self.delayDisplay(\"Model %d has duplicate file name %s! (ids: %s and %s)\" % (n,fileName,modelNode.GetID(),storageNode.GetID()))\r\n success = False\r\n\r\n\r\n #\r\n # now check that each model has a unique polydata\r\n #\r\n for n in range(numModels):\r\n modelNode = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelNode\" )\r\n if polyDataInScene.count(modelNode.GetPolyData()) > 1:\r\n self.delayDisplay(\"Polydata for Model is duplicated! (id: %s and %s)\" % (n,modelNode.GetID()))\r\n success = False\r\n\r\n return success", "def test_torch_train_original_layer(self):\n model = self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n analog_model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n self.set_weights_from_digital_model(analog_model, model)\n\n loss_func = mse_loss\n y_b = randn(3, 3, 5, 5, 5)\n x_b = randn(3, 2, 4, 4, 4)\n\n if self.use_cuda:\n y_b = y_b.cuda()\n x_b = x_b.cuda()\n\n self.train_model(model, loss_func, x_b, y_b)\n self.train_model(analog_model, loss_func, x_b, y_b)\n\n weight, bias = self.get_weights_from_digital_model(analog_model, model)\n\n weight_analog, bias_analog = analog_model.analog_tile.get_weights(realistic=False)\n\n self.assertTensorAlmostEqual(weight_analog, weight)\n if analog_model.use_bias:\n self.assertTensorAlmostEqual(bias_analog, bias)", "def __eq__(self, other):\n if not isinstance(other, ModelDetailsTensorpb):\n return False\n\n return self.__dict__ == other.__dict__", "def model_setup(self):\n self.input_a = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_A\")\n self.input_b = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_B\")\n\n self.fake_pool_A = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_A\")\n self.fake_pool_B = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_B\")\n\n self.global_step = slim.get_or_create_global_step()\n\n self.num_fake_inputs = 0\n\n self.learning_rate = tf.placeholder(tf.float32, shape=[], name=\"lr\")\n\n inputs = {\n 'images_a': self.input_a,\n 'images_b': self.input_b,\n 'fake_pool_a': self.fake_pool_A,\n 'fake_pool_b': self.fake_pool_B,\n }\n\n outputs = model.get_outputs(\n inputs, network=self._network_version, skip=self._skip)\n\n self.prob_real_a_is_real = outputs['prob_real_a_is_real']\n self.prob_real_b_is_real = outputs['prob_real_b_is_real']\n self.fake_images_a = outputs['fake_images_a']\n self.fake_images_b = outputs['fake_images_b']\n self.prob_fake_a_is_real = outputs['prob_fake_a_is_real']\n self.prob_fake_b_is_real = outputs['prob_fake_b_is_real']\n\n self.cycle_images_a = outputs['cycle_images_a']\n self.cycle_images_b = outputs['cycle_images_b']\n\n self.prob_fake_pool_a_is_real = outputs['prob_fake_pool_a_is_real']\n self.prob_fake_pool_b_is_real = outputs['prob_fake_pool_b_is_real']", "def linear_model_ols_equal(ols_model_a, ols_model_b):\n\n if ols_model_a is None or ols_model_b is None:\n if ols_model_a is None and ols_model_b is None:\n models_equal = True\n else:\n models_equal = False\n else:\n\n exog_equal = np.array_equal(ols_model_a.exog, ols_model_b.exog)\n endog_equal = np.array_equal(ols_model_a.endog, ols_model_b.endog)\n exog_names_equal = ols_model_a.exog_names == ols_model_b.exog_names\n endog_names_equal = ols_model_a.endog_names == ols_model_b.endog_names\n formula_equal = ols_model_a.formula == ols_model_b.formula\n\n models_equal = exog_equal and endog_equal and exog_names_equal and endog_names_equal and formula_equal\n\n return models_equal", "def model(x_crop, y_, reuse):\n with tf.variable_scope(\"model\", reuse=reuse):\n net = tl.layers.InputLayer(x_crop, name='input')\n output1 = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1')\n net = tl.layers.MaxPool2d(output1, (3, 3), (2, 2), padding='SAME', name='pool1')\n output2 = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn2')\n net = tl.layers.MaxPool2d(output2, (3, 3), (2, 2), padding='SAME', name='pool2')\n net = tl.layers.FlattenLayer(net, name='flatten')\n output3 = tl.layers.DenseLayer(net, 384, act=tf.nn.relu, name='d1relu')\n output4 = tl.layers.DenseLayer(output3, 192, act=tf.nn.relu, name='d2relu')\n output5 = tl.layers.DenseLayer(output4, 10, act=None, name='output')\n\n return output1.outputs, output2.outputs, output3.outputs, output4.outputs, output5.outputs, output5", "def create_model(input_shape=None):\n\n model = Sequential()\n #n,height,width,chennel = input_shape\n height = 146\n width = 243\n chennel = 3\n\n model.add(Conv2D(filters=4, input_shape=(width, height, chennel), kernel_size=(3, 3), padding='same'))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(filters=4,kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(filters=4, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=16, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=16, kernel_size=(5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.25))\n model.add(Dense(32))\n model.add(Activation('relu'))\n model.add(Dropout(0.25))\n model.add(Dense(8))\n model.add(Activation('softmax'))\n\n sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.87, nesterov=True)\n model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=[\"accuracy\"])\n return model", "def keras_model_fn_cpu(model_config, vocab_size, embedding_size, embeddings):\n ## hyperparams\n model_name = model_config['model_name']\n num_class = model_config['num_class']\n lstm_hs = model_config['lstm_hs']\n gru_hs = model_config['gru_hs']\n learning_rate = model_config['learning_rate']\n \n with tf.device('/cpu:0'):\n ## build model\n inputs = ks.Input(shape=(None,), dtype='int32', name='inputs')\n embedded_sequences_ft1 = layers.Embedding(vocab_size, embedding_size, trainable = False, mask_zero = False)(inputs)\n embedded_sequences_ft2 = layers.Embedding(vocab_size, embedding_size, trainable = False, mask_zero = False)(inputs)\n concat_embed = layers.concatenate([embedded_sequences_ft1 ,embedded_sequences_ft2])\n concat_embed = layers.SpatialDropout1D(0.5)(concat_embed)\n x = layers.Bidirectional(layers.LSTM(lstm_hs,recurrent_activation = 'sigmoid', return_sequences = True))(concat_embed)\n x, x_h, x_c = layers.Bidirectional(layers.GRU(gru_hs, reset_after = True, recurrent_activation = 'sigmoid', return_sequences = True, return_state = True))(x)\n x_1 = layers.GlobalMaxPool1D()(x)\n x_2 = layers.GlobalAvgPool1D()(x)\n x_out = layers.concatenate([x_1 ,x_2, x_h])\n x_out = layers.BatchNormalization()(x_out)\n outputs = layers.Dense(num_class, activation = 'softmax', name = 'outputs')(x_out) # outputs\n model = ks.Model(inputs, outputs, name = model_name)\n\n ## compile\n model.compile(loss = 'categorical_crossentropy', \n optimizer=ks.optimizers.Adam(lr=learning_rate, clipnorm=.25, beta_1=0.7, beta_2=0.99), \n metrics=['categorical_accuracy', ks.metrics.TopKCategoricalAccuracy(k=3)])\n return model", "def test_model_with_upsample2d(self):\n tf.compat.v1.reset_default_graph()\n _ = model_with_upsample2d()\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), starting_op_names=['input_1'],\n output_op_names=['model_with_upsample2d/Softmax'])\n self.assertTrue(validate_branch_ops(conn_graph))\n self.assertTrue(validate_product_tensor_lists(conn_graph))\n self.assertEqual(0, conn_graph.branch_count)\n self.assertEqual(7, len(conn_graph.get_all_ops()))\n\n # 6 products from inter module connections\n # 6 products from parameters\n self.assertEqual(12, len(conn_graph.get_all_products()))\n found_upsample2d = False\n for op in conn_graph.get_all_ops().values():\n if op.type == 'Upsample2D':\n found_upsample2d = True\n self.assertTrue(found_upsample2d)\n\n tf.compat.v1.reset_default_graph()" ]
[ "0.72829473", "0.72438365", "0.7227518", "0.7076971", "0.6773338", "0.6756114", "0.6642388", "0.6548654", "0.6545828", "0.6540332", "0.65398526", "0.6519682", "0.65037364", "0.6483842", "0.64417934", "0.64361674", "0.6423497", "0.636907", "0.6350753", "0.63441813", "0.6334493", "0.6330024", "0.6327539", "0.62835634", "0.6279862", "0.62551105", "0.6233698", "0.62084", "0.6202684", "0.62004304", "0.61842746", "0.61693585", "0.61668265", "0.61583954", "0.6140384", "0.61366904", "0.6133566", "0.6130021", "0.6119701", "0.60762185", "0.6061099", "0.6060747", "0.6039676", "0.60306466", "0.60210145", "0.5960328", "0.594478", "0.5935395", "0.59343463", "0.5932383", "0.5923989", "0.59146243", "0.5912894", "0.59072155", "0.5885704", "0.5881957", "0.5844758", "0.5841033", "0.58336014", "0.58232903", "0.58219343", "0.5820123", "0.5816991", "0.58107173", "0.58056986", "0.57933223", "0.5783204", "0.57791895", "0.5754154", "0.5751199", "0.57501215", "0.5747529", "0.5743962", "0.57399553", "0.57366854", "0.5731636", "0.57259816", "0.5719436", "0.57175547", "0.56971973", "0.56930596", "0.5687187", "0.56811816", "0.5665776", "0.5655486", "0.5648513", "0.564656", "0.5642885", "0.56265944", "0.5626038", "0.5620101", "0.5618088", "0.561244", "0.56068456", "0.56030625", "0.5577496", "0.5572261", "0.5563715", "0.55603004", "0.5557905" ]
0.7113716
3
two BaseWrapper instances are equal enough
def assert_wrappers_equal(first, second): assert first.sk_params == second.sk_params assert first.history_ == second.history_ if not first.model_ or not second.model_: assert first.model_ == second.model_ else: assert_models_equal(first.model, second.model)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inheritedClassesEquality(self):\n self.assertTrue(Record(1, 2) == DerivedRecord(1, 2))\n self.assertFalse(Record(1, 2) == DerivedRecord(1, 3))\n self.assertFalse(Record(1, 2) == DerivedRecord(2, 2))\n self.assertFalse(Record(1, 2) == DerivedRecord(3, 4))", "def test_identical(self):\n write this test!", "def testEquality(self):\n pass", "def _is_equal_same_type(self, other):\n return True", "def test_inheritedClassesInequality(self):\n self.assertFalse(Record(1, 2) != DerivedRecord(1, 2))\n self.assertTrue(Record(1, 2) != DerivedRecord(1, 3))\n self.assertTrue(Record(1, 2) != DerivedRecord(2, 2))\n self.assertTrue(Record(1, 2) != DerivedRecord(3, 4))", "def test_baseid_different(self):\n test1 = BaseModel()\n test2 = BaseModel()\n self.assertNotEqual(test1.id, test2, id)", "def same_as(self, other):\n return super().__eq__(other)", "def test_almost_equal(self):\n x = Point(\n lat=23.4,\n lng=23.1,\n author=self.u\n )\n self.assertTrue(self.a == x)\n self.assertFalse(self.a != x)", "def test00(self):\n b_0 = Base()\n b_1 = Base()\n self.assertEqual(b_0.id, 1)\n self.assertEqual(b_1.id, 2)", "def is_type_equivalent(self, other):\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n # removes base attributes in the phyiscal layer.\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n\n remove_base(mine)\n remove_base(theirs)\n\n return type(self) == type(other) and mine == theirs", "def is_identical(self, other):\n return (self.compounddatatype == other.compounddatatype and\n self.min_row == other.min_row and\n self.max_row == other.max_row)", "def assert_compatible(self, other):\n assert self.config == other.config, ('configs are not the same self: %s '\n 'other %s') % (self.config,\n other.config)\n\n assert self.hash_functions == other.hash_functions, (\n 'hash functions are not the same')\n return True", "def test_differentClassesEquality(self):\n self.assertFalse(Record(1, 2) == DifferentRecord(1, 2))", "def test_class_eq_method(self, test_instances):\n a, b, _ = test_instances\n\n assert a == b", "def test_differentClassesInequality(self):\n self.assertTrue(Record(1, 2) != DifferentRecord(1, 2))", "def test_equal_on_equal(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def iexact(self, other):", "def __eq__(self, other):\n return isinstance(other, type(self)) and self.size == other.size", "def is_identical(self, other):\n if self.is_input != other.is_input:\n return False\n\n if self.is_raw() and other.is_raw():\n return True\n if self.is_raw() or other.is_raw():\n return False\n return self.structure.is_identical(other.structure)", "def __eq__(self, other: 'Pool') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def almost_equals(self, other, decimal=...): # -> bool:\n ...", "def test_attrs(self):\n for self_attr, wrapper_attr in [(\"reactor\", \"_reactor\"),\n (\"client\", \"_client\")]:\n self.assertIdentical(getattr(self, self_attr),\n getattr(self.wrapper, wrapper_attr))", "def test_compatible(self, other):\n if not self.center.dims == other.center.dims:\n raise ValueError(\"Devices have different dimensionality: {:d} vs {:d}\".format(self.center.dims, other.center.dims))\n\n if not self.center.shape == other.center.shape:\n raise ValueError(\"The shape of the central part does not match: {} vs {}\".format(self.center.shape, other.center.shape))\n\n if not len(self.leads) == len(other.leads):\n raise ValueError(\"The number of leads is different: {:d} vs {:d}\".format(len(self.leads), len(other.leads)))\n\n for n, (i,j) in enumerate(zip(self.leads, other.leads)):\n if not i.shape == j.shape:\n raise ValueError(\"The shape of a lead {:d} does not match: {} vs {}\".format(n,i.shape,j.shape))\n\n for n, (i,j) in enumerate(zip(self.connections, other.connections)):\n if not numpy.array_equal(i,j):\n raise ValueError(\"The connections arrays for lead {:d} are not equal\".format(n))", "def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)", "def _merge_sanity_check(self, other):\n if self._fields is not None and (\n set(self.query.values_select) != set(other.query.values_select)\n or set(self.query.extra_select) != set(other.query.extra_select)\n or set(self.query.annotation_select) != set(other.query.annotation_select)\n ):\n raise TypeError(\n \"Merging '%s' classes must involve the same values in each case.\"\n % self.__class__.__name__\n )", "def test_autocreate(self):\n a = Vector(1, 2)\n b = Vector(a)\n assert b == a", "def __le__(self, other: object) -> bool:\n ...", "def __le__(self, other: object) -> bool:\n ...", "def test_00(self):\n base0 = Base()\n base1 = Base()\n self.assertEqual(base0.id, 1)\n self.assertEqual(base1.id, 2)", "def __eq__(self, other: 'GatewayCollection') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n raise NotImplementedError('must be implemented by subclass')", "def test_instance_equality(self):\r\n class EqualityModel(Model):\r\n pk = columns.Integer(primary_key=True)\r\n\r\n m0 = EqualityModel(pk=0)\r\n m1 = EqualityModel(pk=1)\r\n\r\n self.assertEqual(m0, m0)\r\n self.assertNotEqual(m0, m1)", "def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertTrue(v1 != v2)\n self.assertTrue(v2 != v1)", "def __eq__(self, other):\n raise NotImplementedError(\"must be implemented by subclass\")", "def test_instance_equality(self):\n class EqualityModel(Model):\n pk = columns.Integer(primary_key=True)\n\n m0 = EqualityModel(pk=0)\n m1 = EqualityModel(pk=1)\n\n self.assertEqual(m0, m0)\n self.assertNotEqual(m0, m1)", "def test_lt_self(self):\n self.assertFalse(self.instance < self.instance)", "def test_equal_on_equal_and_empty(self):\n a = Digest()\n b = Digest()\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def _is_equal_same_type(self, other):\n # approximate_online_count\n if self.approximate_online_count != other.approximate_online_count:\n return False\n \n # approximate_user_count\n if self.approximate_user_count != other.approximate_user_count:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # discovery_splash_hash\n if self.discovery_splash_hash != other.discovery_splash_hash:\n return False\n \n # discovery_splash_type\n if self.discovery_splash_type != other.discovery_splash_type:\n return False\n \n # emojis\n if self.emojis != other.emojis:\n return False\n \n # features\n if self.features != other.features:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # id\n if self.id != other.id:\n return False\n \n # invite_splash_hash\n if self.invite_splash_hash != other.invite_splash_hash:\n return False\n \n # invite_splash_type\n if self.invite_splash_type != other.invite_splash_type:\n return False\n \n # stickers\n if self.stickers != other.stickers:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True", "def __eq__(self, other):\n raise NotImplementedError('must be implemented by subclass')", "def __eq__(self, other):\n raise NotImplementedError('must be implemented by subclass')", "def __eq__(self, other):\n raise NotImplementedError('must be implemented by subclass')", "def __eq__(self, other):\n raise NotImplementedError('must be implemented by subclass')", "def is_compatible(self, other):\n return self.intervals == other.intervals and\\\n self.nonderived_directions == other.nonderived_directions", "def almost_equals(self, other):\n if self.__class__ is other.__class__ and len(self) == len(other):\n for a, b in zip(self, other):\n if not a.almost_equals(b):\n return False\n return True\n else:\n return False", "def test_equal(self):\n self.assertTrue(self.a == self.a)\n self.assertFalse(self.a != self.a)", "def __eq__(self, other):\n if not isinstance(other, ModCorpBasicDTO):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n raise NotImplementedError()", "def test_compatible(self, other):\n if self.dims != other.dims:\n raise ValueError(\"Dimension mismatch: {:d} vs {:d}\".format(self.dims, other.dims))\n\n if self.shape != other.shape:\n raise ValueError(\"Block shape mismatch: {} vs {}\".format(self.shape, other.shape))", "def test_ids_maker(self):\n firstins = BaseModel()\n secondins = BaseModel()\n self.assertNotEqual(firstins, secondins)", "def __ge__(self, other: object) -> bool:\n ...", "def __ge__(self, other: object) -> bool:\n ...", "def __eq__(self, other: Any) -> bool:\n # Subclasses should call this as part of their equality checks\n return (\n isinstance(other, BaseField)\n and self._is_nullable == other._is_nullable\n and self._resolve_field_name() == other._resolve_field_name() # may be None == None\n and self._spark_type_class == other._spark_type_class\n and self._metadata == other._metadata # may be None == None\n )", "def __eq__(self, other):\n raise NotImplementedError", "def __eq__(self, other):\n raise NotImplementedError", "def __eq__(self, other):\n raise NotImplementedError", "def test_equal_on_equal(self):\n a = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n b = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def __eq__(self, other: 'Gateway') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def test__ActivityMetadataBase__eq():\n activity_metadata = ActivityMetadataBase()\n \n vampytest.assert_eq(activity_metadata, activity_metadata)\n vampytest.assert_ne(activity_metadata, object())", "def test_multiple_singleton_are_same():\n single_a = SingletonTest('Loaded with A')\n single_b = SingletonTest('Loaded with B')\n assert single_a is single_b\n assert single_b.store_value == 'Loaded with A'", "def test_eq(self):\n dummy = DummyCryptographicObject()\n self.assertTrue(dummy == dummy)", "def test_basemodel_diff_id(self):\n B1 = BaseModel()\n B2 = BaseModel()\n B3 = BaseModel()\n self.assertNotEqual(B1.id, B2.id)\n self.assertNotEqual(B1.id, B3.id)\n self.assertNotEqual(B2.id, B3.id)", "def __eq__(self, other):\n # could use key xor hash here\n return isinstance(other, self.__class__) and \\\n other.hash == self.hash", "def __eq__(self, other):\n if type(other) is type(self):\n # TODO: check that this does not mix Clifford classes without different symmetric bilinear forms,\n # as created with class factories.\n return (\n self.items() == other.items()\n and self.symmetric_bilinear_form.__code__.co_code == other.symmetric_bilinear_form.__code__.co_code\n )\n return NotImplemented", "def is_consistent(self, other):\n return self.name != other.name or self.type is other.type", "def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n self.type == other.type and\n self.data == other.data)", "def __eq__(self, other):\n if self.__repr__() != other.__repr__():\n return False\n\n for attribute in set(\n list(self.__dict__.keys()) + list(other.__dict__.keys())\n ):\n\n value = getattr(self, attribute, None)\n other_value = getattr(other, attribute, None)\n\n if attribute in [\"_random\", \"_seed\"]:\n # Don't compare the random generators.\n continue\n\n if isinstance(value, np.ndarray):\n if not (np.array_equal(value, other_value)):\n return False\n\n elif isinstance(value, types.GeneratorType) or isinstance(\n value, itertools.cycle\n ):\n # Split the original generator so it is not touched\n generator, original_value = itertools.tee(value)\n other_generator, original_other_value = itertools.tee(\n other_value\n )\n\n if isinstance(value, types.GeneratorType):\n setattr(self, attribute, (ele for ele in original_value))\n setattr(\n other, attribute, (ele for ele in original_other_value)\n )\n else:\n setattr(self, attribute, itertools.cycle(original_value))\n setattr(\n other, attribute, itertools.cycle(original_other_value)\n )\n\n for _ in range(200):\n try:\n if next(generator) != next(other_generator):\n return False\n except StopIteration:\n break\n\n # Code for a strange edge case where each strategy points at each\n # other\n elif value is other and other_value is self:\n pass\n else:\n if value != other_value:\n return False\n return True", "def pod_equals(x, y):\n return type(x) == type(y) and x.__dict__ == y.__dict__", "def __eq__(self, other):\n if not isinstance(other, Fiddle):\n return False\n\n return self.__dict__ == other.__dict__", "def test_none(self):\n base1 = Base(None)\n base2 = Base(None)\n base3 = Base(None)\n self.assertEqual(base1.id, base3.id - 2)", "def __eq__(self, other: Any) -> bool:\n if not isinstance(other, Just):\n return False\n return other.get == self.get", "def test_eq_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1, 'Q2.8')\n assert a == b", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __ne__(self, other: 'Pool') -> bool:\n return not self == other", "def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass", "def __eq__(self, other) -> bool:\n if not isinstance(other, NilpotentOrbit):\n return False\n if self.my_type != other.my_type:\n return False\n if self.lie_rank != other.lie_rank:\n return False\n if self.decorator != other.decorator:\n return False\n return self.my_diagram == other.my_diagram", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def unitset_is_consistent(self, other: \"UnitSet\"):\n return all(getattr(self, q) is getattr(other, q) for q in self._base_quantities)", "def __eq__(self, other) -> bool:\n if not isinstance(other, type(self)):\n return False\n for attribute in self.classes:\n if getattr(self, attribute) != getattr(other, attribute):\n return False\n return True", "def test_a(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2.3', name='bar')\n\n self.assertTrue(v1 == v2)\n self.assertTrue(v2 == v1)", "def __eq__(self, other: object) -> bool:\n\n if not isinstance(other, self.__class__):\n return False\n\n if not self.simctl_type == other.simctl_type:\n return False\n\n return self.raw_info == other.raw_info", "def almost_equal(self, other, rtol=1e-05, atol=1e-08):\n\n # float attributes defining the instance\n fkeys = ['x0', 'y0', 'dx', 'dy']\n # unambiguous attributes\n ckeys = ['nx', 'ny', 'origin']\n\n ok = True\n for k in fkeys:\n ok = ok and np.isclose(getattr(self.corner_grid, k),\n getattr(other.corner_grid, k),\n rtol=rtol, atol=atol)\n for k in ckeys:\n _ok = getattr(self.corner_grid, k) == getattr(other.corner_grid, k)\n ok = ok and _ok\n p1 = self.corner_grid.proj\n p2 = other.corner_grid.proj\n return ok and proj_is_same(p1, p2)", "def __eq__(self, other):\n if not isinstance(other, RestInstancePropertiesV1):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, Single2HaObject):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return isinstance(other, self.__class__)", "def __eq__(self, other):\r\n if not isinstance(other, BaseElement): return False\r\n return self.as_dict() == other.as_dict() and self.eid == other.eid", "def test_not_equal_different_class(self):\n test1 = self.Test({ 'id': 1, 'name': 'Poop Head' })\n test2 = self.Test2({ 'id': 1, 'name': 'Poop Head' })\n self.assertNotEqual(test1, test2)", "def __eq__(self, other):\n if not isinstance(other, InlineResponseDefault1):\n return False\n\n return self.__dict__ == other.__dict__", "def are_equal(self, sp1, sp2):\n return", "def __neq__(self, other): \n return not self == other", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def __eq__(self, other: object) -> bool:\n # Argument 1 of \"__eq__\" is incompatible with supertype \"object\";\n # supertype defines the argument type as \"object\"\n # So, the type of other shouldn't lower than 'object'. For that intention, add the following two line code.\n if not isinstance(other, Dictionary):\n return NotImplemented\n lst_1 = self.to_list()\n lst_2 = other.to_list()\n is_equal = True\n for index in range(len(lst_1)):\n if lst_1[index] != lst_2[index]:\n is_equal = False\n break\n return is_equal", "def test_equal(self):\r\n\r\n a_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n a_x_dist = 3\r\n a_y_dist = 3\r\n a_num_to_win = 1\r\n a_game = Game(a_players, a_x_dist, a_y_dist, a_num_to_win)\r\n\r\n b_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n b_x_dist = 3\r\n b_y_dist = 3\r\n b_num_to_win = 1\r\n b_game = Game(b_players, b_x_dist, b_y_dist, b_num_to_win)\r\n\r\n c_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n c_x_dist = 3\r\n c_y_dist = 3\r\n c_num_to_win = 1\r\n c_game = Game(c_players, c_x_dist, c_y_dist, c_num_to_win)\r\n\r\n self.assertTrue(b_game == a_game == c_game)\r\n\r\n a_game.play_game()\r\n b_game.play_game()\r\n\r\n self.assertTrue(a_game == b_game)\r\n self.assertFalse(c_game == a_game)\r\n\r\n c_game.play_game()\r\n\r\n self.assertTrue(b_game == a_game == c_game)", "def __eq__(self, other: 'GatewayTemplateGatewayTypeDedicatedTemplate') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, DenseUnit):\n return (Counter(self.dimension) == Counter(other.dimension) and Counter(self.points) == Counter(\n other.points))\n return False", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__" ]
[ "0.6653726", "0.6650997", "0.65729487", "0.64894223", "0.6444425", "0.64114064", "0.63375264", "0.63026255", "0.6292803", "0.6269224", "0.6257633", "0.6256101", "0.62419796", "0.62266093", "0.6213192", "0.6190846", "0.6179785", "0.6126253", "0.61250675", "0.6109466", "0.609552", "0.609192", "0.6090806", "0.6088815", "0.6072727", "0.6072093", "0.6065218", "0.6065218", "0.60543853", "0.60422385", "0.60334295", "0.60311985", "0.6028922", "0.6026463", "0.60232794", "0.6014817", "0.6011269", "0.6010828", "0.601039", "0.601039", "0.601039", "0.601039", "0.6004783", "0.6001256", "0.60002166", "0.5980331", "0.59772664", "0.59748", "0.5969667", "0.5968735", "0.5968735", "0.59626627", "0.5962032", "0.5962032", "0.5962032", "0.59596014", "0.59507227", "0.5937139", "0.5935824", "0.59354484", "0.59162396", "0.59129745", "0.59072435", "0.59057784", "0.5903794", "0.59007305", "0.5892873", "0.58875936", "0.58799124", "0.5879071", "0.58782506", "0.5877855", "0.58708537", "0.58679706", "0.58660185", "0.58617127", "0.58593667", "0.5858515", "0.58456695", "0.5844431", "0.58429676", "0.58358437", "0.5831466", "0.5824265", "0.5820035", "0.5815509", "0.58146274", "0.58116484", "0.5806579", "0.58031964", "0.58018756", "0.58003396", "0.57993525", "0.57985383", "0.5796267", "0.5796267", "0.5796267", "0.5791806", "0.5791806", "0.5791806" ]
0.67075044
0
two BaseWrapper instances return same predictions
def assert_predictions_equal(first, second, x): preds1 = first.predict(x, batch_size=batch_size) preds2 = second.predict(x, batch_size=batch_size) np.testing.assert_array_equal(preds1, preds2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assert_wrappers_equal(first, second):\n assert first.sk_params == second.sk_params\n assert first.history_ == second.history_\n if not first.model_ or not second.model_:\n assert first.model_ == second.model_\n else:\n assert_models_equal(first.model, second.model)", "def predict(self, instances):\r\n raise NotImplementedError", "def test_instances(self):\n a = CommonMixin()\n a.__dict__.update(**self.data)\n\n b = CommonMixin()\n b.__dict__.update(**self.data)\n\n c = ExampleClass()\n c.__dict__.update(**self.data)\n\n return a, b, c", "def proxy_other(self):\n return None", "def postprocess_model_outputs(self, predictions, expected):\n\n for key, val in predictions.items():\n predictions[key] = val.numpy()\n\n for key, val in expected.items():\n expected[key] = val.numpy()\n\n return predictions, expected", "def reuse(self):\n similarCases = self.r1.similarCases\n self.r2.setQueryCase(self.case)\n self.r2.retrieval = self.r1\n self.r2.reuse(similarCases)\n self.predictionCase = self.r2.predictionCase", "def _get_prediction(self):\n raise NotImplementedError", "def test_predict(mock_classifier, wrapper_arguments, input_data):\n data, labels = input_data\n model = Mock()\n true_prediction = np.random.choice(labels, size=len(data))\n model.predict.return_value = true_prediction\n mock_classifier.return_value = model\n wrapped_model = LinearClassifierModel(*wrapper_arguments)\n\n output_prediction = wrapped_model.predict(data)\n\n model.predict.assert_called_once_with(data)\n np.testing.assert_array_equal(output_prediction, true_prediction)", "def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass", "def predict_only(self):", "def prediction_a(self):\n return self._prediction_a", "def test_multifield_classify(self):\n self.model.fit(self.text_data_train, self.train_targets)\n self.assertTrue(self.model.is_classification)\n predictions = self.model.predict(self.text_data_valid)\n self.model.save(self.save_file)\n model = LanguageModelGeneralAPI.load(self.save_file)\n new_predictions = model.predict(self.text_data_valid)\n for new_pred, old_pred in zip(new_predictions, predictions):\n self.assertEqual(new_pred, old_pred)", "def test_get_predictions(self):\n predictions = self.stop.predictions\n self.assertEqual(type(predictions), type([]))\n [self.assertEqual(type(i), BusPrediction) for i in predictions]\n [self.assertEqual(type(i.route), BusRoute) for i in predictions]\n [self.assertEqual(type(i.run), BusRun) for i in predictions]\n predictions[0].__repr__()\n predictions[0].__str__()\n predictions[0].__unicode__()", "def common(self):", "def match(self, other):", "def _inherit_binary_operation(self, other, op):\n sdata = self.data\n if isinstance(op, basestring) and hasattr(sdata, op):\n bound_op = getattr(sdata, op)\n else:\n def bound_op(odata):\n return op(sdata, odata)\n\n bset = self.bset\n if isinstance(other, type(self)) or isinstance(self, type(other)):\n obset = other.bset\n if not ((bset == obset) or\n bset.shape == () or\n obset.shape == ()):\n raise ValueError(\"instances of {} must be defined over \"\n \"instances of {} that compare equal for \"\n \"binary operations to be defined\"\n .format(self.__class__.__name__,\n bset.__class__.__name__))\n new_data = bound_op(other.data)\n if bset.shape == ():\n bset = obset\n else:\n new_data = bound_op(other)\n\n return type(self)(new_data, bset)", "def __init__(self) :\n self.prediction_ = None", "def __init__(self) :\n self.prediction_ = None", "def __init__(self) :\n self.prediction_ = None", "def postprocess_model_outputs(self, predictions, expected):\n\n predictions = {k: t.numpy() for k, t in predictions.items()}\n\n return predictions, expected", "def test_predict(self):\n self.regression_single.predict(self.X_test)\n self.assertTrue(len(self.regression_single.y_pred))\n self.regression_boston.predict(self.boston_x_test)\n self.assertTrue(len(self.regression_boston.y_pred))", "def __matmul__(self, other: 'SampledField'): # values @ representation\n return self.at(other, keep_extrapolation=False)", "def mock_get_actuals_left_outer_join_with_predictions(monkeypatch):\n monkeypatch.setattr(machine_learning, 'get_actuals_left_outer_join_with_predictions',\n get_actuals_left_outer_join_with_predictions)", "def test_shared_objects_wrapper(self):\n input_ = keras.Input(shape=(1,))\n unwrapped = keras.layers.Layer(name='unwrapped')\n wrapped = keras.layers.Wrapper(unwrapped, name='wrapped')\n model = keras.Model(inputs=input_,\n outputs=[unwrapped(input_), wrapped(input_)])\n\n # Test recreating directly from config\n config = model.get_config()\n loaded = keras.Model.from_config(config)\n self.assertIs(loaded.layers[1], loaded.layers[2].layer)\n\n # Test saving and loading to disk\n save_format = testing_utils.get_save_format()\n saved_model_dir = self._save_model_dir()\n keras.models.save_model(model, saved_model_dir, save_format=save_format)\n loaded = keras.models.load_model(saved_model_dir)\n self.assertIs(loaded.layers[1], loaded.layers[2].layer)", "def test_fit_returns_self(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=\"a\")\n\n x_fitted = x.fit(df)\n\n assert x_fitted is x, \"Returned value from BaseTransformer.fit not as expected.\"", "def prediction_a_all(self):\n return self._prediction_a_all", "def clone_with_updates(self, **kwargs):\n fields_dict = self.to_dict()\n fields_dict.update(kwargs)\n return BindingPrediction(**fields_dict)", "def __rmul__(self, other):\r\n if isinstance(other, tuple):\r\n return self.transform_point(other)\r\n if isinstance(other, LinearTransformation):\r\n return self.right_composition(other)\r\n else:\r\n raise NotImplementedError", "def test_instance(self):\n self.assertIsInstance(self.test1, BaseModel)", "def _from_other(cls, obj):", "def __init__(self,prediction, x1, y1, x2, y2):\n self.prediction = prediction\n self.x1 = x1\n self.y1 = y1\n self.x2 = x2\n self.y2 = y2", "def predict(self, x_val, y1_test=None, y2_test=None):\n if y1_test is None and y2_test is None:\n y1_test = np.ones(len(x_val))\n y2_test = y1_test\n score_model = False\n else:\n score_model = True\n\n self.stage_one.predict(x_val, y1_test)\n if score_model:\n stage_one_score = self.stage_one.score\n\n # This section will not generalize well beyond this specific dataset.\n if score_model:\n df = pd.DataFrame(data=[y1_test, self.stage_one.pred],\n columns=['y1_test', 'stage_one.pred'])\n df = pd.DataFrame(y1_test).rename(columns={'class_second': 'y1_test'})\n df['stage_one.pred'] = self.stage_one.pred\n df['tweets'] = x_val\n df['y2_test'] = y2_test\n\n # x2_val = df['tweets'].loc[df['stage_one.pred'] == 0]\n # y2_test_ = df['y2_test'].loc[df['stage_one.pred'] == 0]\n x2_val = df['tweets'].loc[df['stage_one.pred'] == 1]\n y2_test_ = df['y2_test'].loc[df['stage_one.pred'] == 1]\n else:\n # x2_val = x_val[self.stage_one.pred == 0]\n x2_val = x_val[self.stage_one.pred == 1]\n y2_test_ = np.ones(len(x2_val))\n\n self.stage_two.predict(x2_val, y2_test_)\n if score_model:\n stage_two_score = self.stage_two.score\n return (stage_one_score, stage_two_score, stage_one_score*stage_two_score)\n else:\n # return tweets that contain hate speech\n return x2_val[self.stage_two.pred == 0]", "def __call__(self, a, b):\n self.a = a\n self.b = b\n return a.data * b.data", "def iexact(self, other):", "def test_predict(self, pipeline):\n pipeline.fit(X, Y)\n y_out_fit = pipeline.predict(X_TEST_1)\n assert isinstance(y_out_fit, np.ndarray)\n assert y_out_fit.ndim == 1\n pipeline.partial_fit(X, Y)\n y_out_partial_fit = pipeline.predict(X_TEST_2)\n assert isinstance(y_out_partial_fit, np.ndarray)\n assert y_out_partial_fit.ndim == 1", "def prediction_b(self):\r\n return self._prediction_b", "def merge_two_calls(self) -> None:", "def test_ids_maker(self):\n firstins = BaseModel()\n secondins = BaseModel()\n self.assertNotEqual(firstins, secondins)", "def analogies(self, queries):\n pass", "def model_wrapper(self):\n original = self.args.rnn_type\n if(self.args.rnn_type=='DeepCoNN'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='TRANSNET'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM_TNET'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='DATT'):\n self.args.rnn_type ='RAW_MSE_DUAL_DOT'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='MPCN'):\n self.args.rnn_type = 'RAW_MSE_MPCN_FN_FM'\n self.args.base_encoder = 'NBOW'\n\n print(\"Conversion to {} | base:{}\".format(\n self.args.rnn_type,\n self.args.base_encoder))", "def make_predictions(self):\n if is_classification(self.model):\n if self.ct == None:\n prediction = self.model.predict(self.input_data.to_numpy())\n probabilities = self.model.predict_proba(self.input_data.to_numpy())\n return prediction, probabilities\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n probabilities = self.model.predict_proba(self.data_into_model())\n return prediction, probabilities\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.model))\n \n else:\n if self.ct == None:\n prediction = self.model.predict(self.input_data)\n return prediction\n elif self.ct != None: \n prediction = self.model.predict(self.data_into_model())\n return prediction\n else:\n raise Exception((\"{} not supported. Please create an issue on Github\").format(self.self.model))", "def test_return_self(self):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\")\n\n x_fitted = x.fit(df)\n\n assert (\n x_fitted is x\n ), \"return value from ScalingTransformer.fit not as expected (self).\"", "def test_predict(self):\n \n\n model ,vec, x_testing=setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"count\")\n \n model2 ,vec_tfidf, x_testing2=setup_log_reg_classifier(self.training_data, self.training_y, self.testing_data,\"text\", method=\"tfidf\")\n \n \n \"\"\" Test correct data types and corrrect range of predicted values (1,0) for predict with countVectorizer\"\"\" \n \n self.assertIsInstance(predict(model,x_testing),\n np.ndarray)\n \n self.assertTrue(([0,1] ==np.unique(predict(model2,x_testing2))).all())\n\n \n \"\"\" Test correct data types and corrrect range of predicted values (1,0) for predict with tfidfVectorizer\"\"\" \n \n self.assertIsInstance(predict(model,x_testing),\n np.ndarray)\n \n self.assertTrue(([0,1] ==np.unique(predict(model2,x_testing2))).all())", "def __eq__(self, other):\n if not isinstance(other, RuleSchemaFormulaPredict):\n return False\n\n return self.__dict__ == other.__dict__", "def is_type_equivalent(self, other):\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n # removes base attributes in the phyiscal layer.\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n\n remove_base(mine)\n remove_base(theirs)\n\n return type(self) == type(other) and mine == theirs", "def test_attrs(self):\n for self_attr, wrapper_attr in [(\"reactor\", \"_reactor\"),\n (\"client\", \"_client\")]:\n self.assertIdentical(getattr(self, self_attr),\n getattr(self.wrapper, wrapper_attr))", "def __ror__(self, other):\n return self._dunder_concat(\n other=other,\n base_class=BaseForecaster,\n composite_class=MultiplexForecaster,\n attr_name=\"forecasters\",\n concat_order=\"right\",\n )", "def test_multipleinstancecreation(self):\n b1 = BaseModel()\n self.assertEqual(type(b1.id), str)\n self.assertEqual(type(b1.created_at), datetime)\n self.assertEqual(type(b1.updated_at), datetime)\n b2 = BaseModel()\n self.assertEqual(type(b2.id), str)\n self.assertEqual(type(b2.created_at), datetime)\n self.assertEqual(type(b2.updated_at), datetime)\n b3 = BaseModel()\n self.assertEqual(type(b3.id), str)\n self.assertEqual(type(b3.created_at), datetime)\n self.assertEqual(type(b3.updated_at), datetime)\n self.assertNotEqual(b1.id, b2.id, b3.id)", "def test_addingnewattributes(self):\n b1 = BaseModel()\n b1.name = \"Holberton\"\n b1.my_number = 89\n dictionary = b1.to_dict()\n self.assertEqual('name' in dictionary, True)\n self.assertEqual('my_number' in dictionary, True)\n b2 = BaseModel()\n dictionary2 = b2.to_dict()\n self.assertEqual('name' in dictionary2, False)\n self.assertEqual('my_number' in dictionary2, False)", "def __eq__(self, other):\n eq = True\n for attr in ['geocode',\n 'geocodeDict',\n 'geolevel',\n 'parentGeocode',\n 'raw',\n 'raw_housing',\n 'dp',\n 'syn',\n 'syn_unrounded',\n # 'cons',\n # 'invar',\n # 'dp_queries',\n # 'congDistGeocode',\n # 'sldlGeocode',\n # 'slduGeocode',\n ]:\n\n eq = eq and self.__getattribute__(attr) == other.__getattribute__(attr)\n\n #eq = eq and (np.array_equal(self.raw.toDense(), other.raw.toDense()))\n return eq", "def b_class_a(self):\n return self._b_class_a", "def __call__(self, images, **kwargs) -> Union[Predictions, List[Prediction]]:\n return super().__call__(images, **kwargs)", "def test_generate_leaf_node_predictions(self, mocker):\n\n leaf_nodes_return_value = np.array([1, 0, 1 / 3, 2])\n\n # set return value from _generate_leaf_node_predictions\n mocked = mocker.patch.object(\n DummyLeafNodeScaledConformalPredictor,\n \"_generate_leaf_node_predictions\",\n return_value=leaf_nodes_return_value,\n )\n\n mocked2 = mocker.patch.object(\n DummyLeafNodeScaledConformalPredictor,\n \"_count_leaf_node_visits_from_calibration\",\n return_value=np.array([1]),\n )\n\n # set a dummy value for leaf_node_counts attribute as\n # _count_leaf_node_visits_from_calibration is mocked\n dummy_confo_model = DummyLeafNodeScaledConformalPredictor()\n dummy_confo_model.leaf_node_counts = 1234\n\n data_arg = np.array([0, 1, 3, -9])\n\n dummy_confo_model._calculate_scaling_factors(data_arg)\n\n # test the call to _generate_leaf_node_predictions\n\n assert (\n mocked.call_count == 1\n ), \"incorrect number of calls to _generate_leaf_node_predictions\"\n\n call_args = mocked.call_args_list[0]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n assert (\n call_kwargs == {}\n ), \"keyword args incorrect in _generate_leaf_node_predictions call\"\n\n assert len(call_pos_args) == 1, \"incorrect number of positional args\"\n\n np.testing.assert_array_equal(call_pos_args[0], data_arg)\n\n # test _count_leaf_node_visits_from_calibration called with\n # _generate_leaf_node_predictions outputs\n\n assert (\n mocked2.call_count == 1\n ), \"incorrect number of calls to _count_leaf_node_visits_from_calibration\"\n\n call_args = mocked2.call_args_list[0]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n assert (\n call_pos_args == ()\n ), \"positional args incorrect in _count_leaf_node_visits_from_calibration call\"\n\n assert list(call_kwargs.keys()) == [\n \"leaf_node_predictions\"\n ], \"incorrect kwargs in _count_leaf_node_visits_from_calibration call\"\n\n np.testing.assert_array_equal(\n call_kwargs[\"leaf_node_predictions\"], leaf_nodes_return_value\n )", "def test_response_datapoint_observation_works(tmp_observe_class, train_Y, y_data, covars_proposed_iter, response_sampled_iter, monkeypatch):\n\n # device for torch tensor definitions\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # covariates to sample\n resp = [2.2]\n resp_tensor = torch.tensor([resp], dtype=torch.double, device=device)\n\n # temp class to execute the test\n cls = tmp_observe_class\n\n # set proposed_X attribute (required for method to work)\n cls.initial_guess = torch.tensor([[1, 2, 3]], dtype=torch.double, device=device)#resp_tensor\n cls.proposed_X = torch.tensor([[1, 2, 3]], dtype=torch.double, device=device)#resp_tensor\n cls.train_Y = train_Y\n cls.model = {\"covars_proposed_iter\": covars_proposed_iter,\n \"response_sampled_iter\": response_sampled_iter}\n cls.y_data = y_data\n\n # monkeypatch\n def mock_input(x): # mock function to replace 'input' for unit testing purposes\n return \", \".join([str(x) for x in resp])\n monkeypatch.setattr(\"builtins.input\", mock_input)\n\n # set kwarg response to None (so manually provided input is used)\n kwarg_response = None\n\n # run the method being tested\n cls._get_response_datapoint(response=kwarg_response)\n\n # assert the right elements have been added\n assert cls.train_Y[-1].item() == resp[0]\n\n # assert that counter has been updated\n assert cls.model[\"response_sampled_iter\"] == cls.model[\"covars_proposed_iter\"]\n\n # only if covars_proposed_iter is ahead of sampled\n if covars_proposed_iter > response_sampled_iter:\n # assert that new row has been added\n assert cls.train_Y.size()[0] == train_Y.size()[0] + 1\n elif train_Y is None:\n # assert that cls.train_X has been initiated\n assert cls.train_Y.size()[0] == 1\n else:\n # assert that no new row has been added\n assert cls.train_Y.size()[0] == train_Y.size()[0]", "def predict_and_update(self, z):", "def assess_perf(self, bases: list, set_type, epoch):\n self.prepare_calc_performance()\n self.base_dfl = dict() # base dict of lists with prediction data frames\n for base in bases:\n self.base_dfl[base] = list()\n ohlcv_list = ad.SplitSets.split_sets(set_type, self.predictor.ohlcv.load_data(base))\n features_list = ad.SplitSets.split_sets(set_type, self.predictor.features.load_data(base))\n targets_list = ad.SplitSets.split_sets(set_type, self.predictor.targets.load_data(base))\n assert len(ohlcv_list) == len(features_list)\n assert len(ohlcv_list) == len(targets_list)\n for ix in range(len(ohlcv_list)):\n odf = ohlcv_list[ix]\n fdf = features_list[ix]\n tdf = targets_list[ix]\n if (fdf is None) or fdf.empty or (tdf is None) or tdf.empty:\n logger.warning(f\"empty data for {base} between {odf.index[0]} and {odf.index[-1]}\")\n continue\n [fdf, tdf] = ccd.common_timerange([fdf, tdf])\n\n if self.predictor.scaler is not None:\n fdf_scaled = self.predictor.scaler.transform(fdf.values)\n pred = self.predictor.kerasmodel.predict_on_batch(fdf_scaled)\n else:\n logger.error(\"missing scaler\")\n pred = self.predictor.kerasmodel.predict_on_batch(fdf.values)\n if pred is None:\n logger.warning(f\"no prediction data for {base} between {odf.index[0]} and {odf.index[-1]}\")\n continue\n pdf = pd.DataFrame(data=pred, index=fdf.index, columns=self.predictor.targets.target_dict().keys())\n pdf.loc[pdf.index[-1], ct.SELL] = 1 # force sell at end of data range\n if pdf.empty:\n logger.warning(f\"empty prediction data for {base} between {odf.index[0]} and {odf.index[-1]}\")\n continue\n pdf = pd.concat([odf.close, tdf.target, pdf], axis=1, join=\"inner\")\n self.base_dfl[base].append(self.calc_performance(pdf))\n logger.info(f\"\\n performance results \\n{self.total}\\n\")\n logger.info(f\"\\n precision results \\n{self.confusion}\\n\")\n return self.find_best()", "def testModel( self, classTest, classPred):", "def _baseDistance(self, b1, b2):\n if b1 > b2:\n b1, b2 = b2, b1\n distance = self.baseDistanceCache.get((b1,b2), None)\n if distance is None:\n distance = self.scoreFunction(\\\n self.baseFeatureArray[b1], self.baseFeatureArray[b2])\n self.baseDistanceCache[(b1,b2)] = distance\n return distance", "def predict(self, *args, **kwargs):\n return self(*args, **kwargs)", "def _update(self, other):\n # NOTE: detail map properties should NEVER be overridden. NEVER. EVER. kthx.\n if other.use_alpha:\n self.use_alpha = True\n if other.mipmap:\n self.mipmap = True", "def predict(self, X, a, b):\n pass", "def predict(self):\n raise NotImplementedError", "def get_stored(self):\n return self.__prepare(self.pred), self.__prepare(self.labels)", "def _infer_single(self, description: Description2):\n bindings = self.query_executer.get_arguments_bindings(description,\n restriction_pattern=Description2(body=[Atom('?x',\n self.relation,\n '?z')]))\n head = description.head\n\n # only supports p(?x,CONSTANT)\n predictions = [Prediction((b, head.predicate, head.object), [description]) for b in bindings]\n\n return predictions", "def test_get_value(self):\r\n dtype = self.dtype\r\n if dtype is None:\r\n dtype = theano.config.floatX\r\n\r\n rng = numpy.random.RandomState(utt.fetch_seed())\r\n x_orig = numpy.asarray(rng.uniform(0,1,[2,4]),dtype=dtype)\r\n x_cast = self.cast_value(x_orig)\r\n if self.shared_constructor_accept_ndarray:\r\n x_shared = self.shared_constructor(x_orig, borrow = False)\r\n assert isinstance(x_shared.get_value(), x_orig.__class__)\r\n\r\n x_shared = self.shared_constructor(x_cast, borrow = False)\r\n assert isinstance(x_shared.get_value(), x_cast.__class__)", "def _compare(self, x,y, pr=False):\n batched = self.ex.batched(x, y)\n looped = self.ex.looped(x, y)\n #print(f'batched value {batched}')\n #print(f'looped value {looped}')\n \n self.assertTrue(\n torch.equal(batched, looped)\n )", "def test00(self):\n b_0 = Base()\n b_1 = Base()\n self.assertEqual(b_0.id, 1)\n self.assertEqual(b_1.id, 2)", "def apply_wrappers(self, **wrapper_params):\n return self", "def __init__(self):\n\n self.result = None # To store the result\n self.predictor = None # To store the fit predictor", "def compare_thresholded_data_with_models(self):\n pass", "def evaluate_predictions(self, input_predictions, run_type, override_tags_roles=None):\n out_dict = self.results[run_type]\n # outer evaluation tags: model instances\n outer = [t for t in self.tags if t.startswith(\"model\") and not t.endswith(defs.roles.inputs)]\n inner = [t for t in self.tags if t in [defs.roles.train, defs.roles.val, defs.roles.test]]\n\n # collect indexes to across outer tags to produce, e.g. total <train> performance\n total_idxs_inner = defaultdict(list)\n has_multiple_models = len(outer) > 1\n\n for outer_tag in outer:\n out_idx = self.indexes[self.tags.index(outer_tag)]\n out_dict[outer_tag] = {}\n\n for inner_tag in inner:\n # get prediction indexes\n in_idx = self.indexes[self.tags.index(inner_tag)]\n joint_idx = np.intersect1d(out_idx, in_idx)\n current_predictions = input_predictions[joint_idx]\n if len(current_predictions) == 0:\n continue\n\n total_idxs_inner[inner_tag].append(joint_idx)\n\n out_dict[outer_tag][inner_tag] = {}\n\n for measure in self.available_measures:\n result = self.evaluate_measure(current_predictions, joint_idx, measure, tag_info=(outer_tag, inner_tag))\n out_dict[outer_tag][inner_tag][measure] = result\n do_print = (not has_multiple_models) or self.should_print_this(run_type, outer_tag)\n self.compute_additional_info(current_predictions, joint_idx, f\"{run_type}-{outer_tag}-{inner_tag}\", do_print=do_print)\n\n if has_multiple_models:\n # aggregate\n out_dict[\"all_tags\"] = {}\n self.aggregate_tags(outer, inner, out_dict)\n for o in total_idxs_inner:\n self.compute_additional_info(input_predictions, np.concatenate(total_idxs_inner[o]), f\"{run_type}-{o}-all_tags\", do_print=not self.is_baseline_run(run_type))\n print()", "def __call__(self, pred_texture, gt_texture):\n pred_class = self.classifier.predict(pred_texture)\n gt_class = self.classifier.predict(gt_texture)\n if pred_class == gt_class:\n return 0\n else:\n return 1", "def test_type(self):\n self.assertEqual(type(self.base1), BaseModel)\n self.assertEqual(type(self.base2), BaseModel)", "def merged_rep(self,other):\n raise NotImplementedError(\"Abstract method\")", "def __eq__(self, other: Any) -> bool:\n if not isinstance(other, DetectionResult):\n return False\n\n return self.to_pb2().__eq__(other.to_pb2())", "def predict_sync(\n input: predictor.Input = Body(..., example=predictor.factory.mock_input()),\n ):\n return predictor.run(input)", "def test_basemodel_kwargs_to_dict(self):\n B1 = BaseModel()\n dict = B1.to_dict()\n B2 = BaseModel(**dict)\n self.assertEqual(B1.id, B1.id)\n self.assertEqual(B1.created_at, B2.created_at)\n self.assertEqual(B1.updated_at, B2.updated_at)\n self.assertNotEqual(B1, B2)", "def __eq__(self, other):\n return (isinstance(other, type(self)) and (self.get_all_features() == other.get_all_features()))", "def relate(self, other):\n ...", "def test_shared_link(self):\n\n head = L.Linear(2, 2)\n model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))\n model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))\n\n a_arrays = async_.extract_params_as_shared_arrays(model_a)\n b_arrays = async_.extract_params_as_shared_arrays(model_b)\n\n print(('model_a shared_arrays', a_arrays))\n print(('model_b shared_arrays', b_arrays))\n\n head = L.Linear(2, 2)\n model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))\n model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))\n\n async_.set_shared_params(model_a, a_arrays)\n async_.set_shared_params(model_b, b_arrays)\n\n print('model_a replaced')\n a_params = dict(model_a.namedparams())\n for param_name, param in list(a_params.items()):\n print((param_name, param.array.ctypes.data))\n\n print('model_b replaced')\n b_params = dict(model_b.namedparams())\n for param_name, param in list(b_params.items()):\n print((param_name, param.array.ctypes.data))\n\n # Pointers to head parameters must be the same\n self.assertEqual(a_params['/0/W'].array.ctypes.data,\n b_params['/0/W'].array.ctypes.data)\n self.assertEqual(a_params['/0/b'].array.ctypes.data,\n b_params['/0/b'].array.ctypes.data)\n\n # Pointers to tail parameters must be different\n self.assertNotEqual(a_params['/1/W'].array.ctypes.data,\n b_params['/1/W'].array.ctypes.data)\n self.assertNotEqual(a_params['/1/b'].array.ctypes.data,\n b_params['/1/b'].array.ctypes.data)", "def _fit_exact_match(self):\n entity_map = self._resource_loader.get_entity_map(self.type)\n self._exact_match_mapping = self._process_entity_map(\n self.type, entity_map, self._normalizer\n )", "def fold_prediction_result(x_train, y_train, x_test, y_test, classification_types, basic_classifier):\n metrics_dict = {}\n for metric in METRICS:\n metrics_dict[metric] = {}\n training_time = {}\n test_time = {}\n for classification in classification_types:\n # logger.info(\"*****************************\")\n logger.info(classification)\n if classification in ENCODING_TYPES:\n classifier = EncodedClassifier(basic_classifier, encoding_type=classification)\n elif classification == \"meta_binary_tree_classifier\":\n classifier = MetaBinaryTreeClassifier(basic_classifier)\n elif classification == \"standard-classifier\":\n classifier = basic_classifier\n else:\n raise Exception(\"The Classification Method is not a valid one\")\n start_time = time.time()\n if isinstance(classifier, h2o.estimators.H2OEstimator):\n classifier = fit_h2o(x_train, y_train, classifier)\n else:\n classifier.fit(x_train, y_train)\n train_time = time.time() - start_time\n if isinstance(classifier, h2o.estimators.H2OEstimator):\n column_types = get_h2o_column_types(x_test.columns)\n x_test = H2OFrame(x_test, column_types=column_types)\n prediction = classifier.predict(x_test)\n y_pred = np.concatenate(prediction['predict'].as_data_frame().values)\n else:\n y_pred = classifier.predict(x_test)\n prediction_time = time.time() - train_time - start_time\n # Calculate metrics\n for metric, f in METRICS.items():\n metrics_dict[metric][classification] = f(y_test, y_pred)\n training_time[classification] = train_time\n test_time[classification] = prediction_time\n\n return metrics_dict, training_time, test_time", "def test_baseid_different(self):\n test1 = BaseModel()\n test2 = BaseModel()\n self.assertNotEqual(test1.id, test2, id)", "def test_torch_original_layer(self):\n # This tests the forward pass\n model = self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n x = randn(3, 2, 4, 5, 6)\n\n if self.use_cuda:\n x = x.cuda()\n\n y = model(x)\n\n analog_model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n self.set_weights_from_digital_model(analog_model, model)\n\n y_analog = analog_model(x)\n self.assertTensorAlmostEqual(y_analog, y)", "def test_torch_original_layer(self):\n # This tests the forward pass\n model = self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n x = randn(3, 2, 4)\n\n if self.use_cuda:\n x = x.cuda()\n\n y = model(x)\n\n analog_model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n self.set_weights_from_digital_model(analog_model, model)\n\n y_analog = analog_model(x)\n self.assertTensorAlmostEqual(y_analog, y)", "def test_article_subclass_behavior(self):\n print self.base_article.defined_attributes\n print self.sub_article.defined_attributes\n self.assertEqual(self.sub_article.first().attributes,\n self.base_article.first().attributes)", "def matchModelPose(self):\n\n pass", "def real_result(self, other):\r\n self_in_game_skill = np.random.normal(self.skill,self.var)\r\n other_in_game_skill = np.random.normal(other.skill,other.var)\r\n if self_in_game_skill > other_in_game_skill:\r\n return 1\r\n else:\r\n return 0", "def test_torch_original_layer(self):\n # This tests the forward pass\n model = self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n x = randn(3, 2, 4, 4)\n\n if self.use_cuda:\n x = x.cuda()\n\n y = model(x)\n\n analog_model = self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n self.set_weights_from_digital_model(analog_model, model)\n\n y_analog = analog_model(x)\n self.assertTensorAlmostEqual(y_analog, y)", "def prediction_b_all(self):\r\n return self._prediction_b_all", "def __matmul__(self, other: 'ModelParameters') -> 'ModelParameters':\n raise NotImplementedError()", "def test_inheritedClassesEquality(self):\n self.assertTrue(Record(1, 2) == DerivedRecord(1, 2))\n self.assertFalse(Record(1, 2) == DerivedRecord(1, 3))\n self.assertFalse(Record(1, 2) == DerivedRecord(2, 2))\n self.assertFalse(Record(1, 2) == DerivedRecord(3, 4))", "def __eq__(self, other):\n if not isinstance(other, InlineResponseDefault1):\n return False\n\n return self.__dict__ == other.__dict__", "def __call__(a, b):", "def __isub__(self, other):\n\t\t#print(\"isub\")\n\t\t# merge other branch\n\t\tself.graph.update(other.graph)\n\t\tself.bottoms.update(other.bottoms)\n\t\tself.output_shape.update(other.output_shape)\n\t\tlayer_name = \"sub_{}\".format(len(self.graph))\n\t\tself.graph[layer_name] = layer_name\n\t\tself.bottoms[layer_name] = [self.cur_id, other.cur_id]\n\t\tself.output_shape[layer_name] = self.cur_tensor.size()\n\t\tself.cur_id = layer_name\n\t\t# save memory\n\t\tdel other\n\t\treturn self", "def evaluate(self, X1, X2):\r\n raise NotImplementedError()", "def _get_target_single(self,\n flat_anchors,\n valid_flags,\n cls_scores,\n bbox_preds,\n num_level_anchors,\n gt_bboxes,\n gt_bboxes_ignore,\n gt_labels,\n img_meta,\n label_channels=1,\n unmap_outputs=True,\n is_cls_assigner=True):\n inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n img_meta['img_shape'][:2],\n self.train_cfg.allowed_border)\n if not inside_flags.any():\n return (None, ) * 7\n # assign gt and sample anchors\n anchors = flat_anchors[inside_flags, :]\n\n num_level_anchors_inside = self.get_num_level_anchors_inside(\n num_level_anchors, inside_flags)\n bbox_preds_valid = bbox_preds[inside_flags, :]\n cls_scores_valid = cls_scores[inside_flags, :]\n\n assigner = self.cls_assigner if is_cls_assigner else self.reg_assigner\n\n # decode prediction out of assigner\n bbox_preds_valid = self.bbox_coder.decode(anchors, bbox_preds_valid)\n assign_result = assigner.assign(anchors, num_level_anchors_inside,\n gt_bboxes, gt_bboxes_ignore, gt_labels,\n cls_scores_valid, bbox_preds_valid)\n sampling_result = self.sampler.sample(assign_result, anchors,\n gt_bboxes)\n\n num_valid_anchors = anchors.shape[0]\n bbox_targets = torch.zeros_like(anchors)\n bbox_weights = torch.zeros_like(anchors)\n labels = anchors.new_full((num_valid_anchors, ),\n self.num_classes,\n dtype=torch.long)\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n if len(pos_inds) > 0:\n if hasattr(self, 'bbox_coder'):\n pos_bbox_targets = self.bbox_coder.encode(\n sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n else:\n # used in VFNetHead\n pos_bbox_targets = sampling_result.pos_gt_bboxes\n bbox_targets[pos_inds, :] = pos_bbox_targets\n bbox_weights[pos_inds, :] = 1.0\n if gt_labels is None:\n # Only rpn gives gt_labels as None\n # Foreground is the first class since v2.5.0\n labels[pos_inds] = 0\n else:\n labels[pos_inds] = gt_labels[\n sampling_result.pos_assigned_gt_inds]\n if self.train_cfg.pos_weight <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = self.train_cfg.pos_weight\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n # map up to original set of anchors\n if unmap_outputs:\n num_total_anchors = flat_anchors.size(0)\n anchors = unmap(anchors, num_total_anchors, inside_flags)\n labels = unmap(\n labels, num_total_anchors, inside_flags, fill=self.num_classes)\n label_weights = unmap(label_weights, num_total_anchors,\n inside_flags)\n bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n pos_inds, neg_inds)", "def fit(\n self,\n base_models_predictions: np.ndarray,\n true_targets: np.ndarray,\n model_identifiers: List[Tuple[int, int, float]],\n ) -> 'SingleBest':\n return self", "def _compare_attributes_of_interpolate1(self, first: Node, second: Node) -> bool:\n # If some of attributes 'mode', 'align_corners', 'antialias', 'pads_begin', 'pads_end' are different,\n # then attributes of nodes are not identical.\n op = Interpolate(graph=first.graph, attrs={})\n for attr in ['mode', 'align_corners', 'antialias', 'pads_begin', 'pads_end']:\n if first.soft_get(attr, default=op.attrs[attr]) != second.soft_get(attr, default=op.attrs[attr]):\n return False\n return True", "def __eq__(self, other: Any) -> bool:\n if not isinstance(other, Detection):\n return False\n\n return self.to_pb2().__eq__(other.to_pb2())" ]
[ "0.5750074", "0.57188416", "0.5443609", "0.5316026", "0.52933276", "0.5275967", "0.52627426", "0.5258058", "0.5199715", "0.51438063", "0.5142573", "0.51336676", "0.51235074", "0.5116406", "0.5077794", "0.5052014", "0.50518346", "0.50518346", "0.50518346", "0.5041546", "0.50404185", "0.5038786", "0.5036414", "0.50239336", "0.5020099", "0.49958786", "0.49761665", "0.4976132", "0.4973257", "0.4971351", "0.4968483", "0.4965209", "0.4935644", "0.4923026", "0.49217418", "0.49183074", "0.49177343", "0.4913695", "0.4913363", "0.49029702", "0.4900121", "0.4898883", "0.48979995", "0.48947412", "0.4890053", "0.48897398", "0.48895672", "0.48693225", "0.4868151", "0.4867039", "0.48648378", "0.48636538", "0.48609334", "0.48575145", "0.48521647", "0.4851989", "0.4850634", "0.48498505", "0.4842638", "0.48396534", "0.48337734", "0.48304847", "0.48272783", "0.48258364", "0.48075658", "0.48046568", "0.47990736", "0.47978556", "0.47955006", "0.4793304", "0.47901404", "0.47867617", "0.47854835", "0.4784101", "0.47837225", "0.47812343", "0.47798848", "0.4778292", "0.47756568", "0.47749823", "0.47644", "0.47628358", "0.47588524", "0.47524527", "0.47524166", "0.4750412", "0.4750251", "0.47494465", "0.47486454", "0.47480163", "0.47446486", "0.4743132", "0.4743129", "0.47410652", "0.47396228", "0.4739365", "0.4736553", "0.47355333", "0.4733466", "0.47301632" ]
0.5298379
4
The name of the variable in which the list of all arguments to the function is stored
def name(self): return self._name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_arg_name(self, arg, variable_name):", "def _name(self):\n return self.arguments[0].split('(')[0]", "def _name(self):\n return self._arguments[0].split('(')[0]", "def func_var_names(func):\n names = func.__code__.co_varnames[:func.__code__.co_argcount]\n return names", "def build_stkvar_name(*args):\n return _ida_frame.build_stkvar_name(*args)", "def variable_argument(self):\n if self.is_variadic():\n if self.args[-1] == '...':\n # An unnamed variable argument replaces __VA_ARGS__\n return \"__VA_ARGS__\"\n else:\n # Strip '...' from argument name\n return self.args[-1][:-3]\n else:\n return None", "def parameter_names(self) -> List[str]:", "def get_arg_name(args):\n names = []\n for arg in args:\n if type(arg).__name__ == 'ID':\n names.append(arg.name)\n elif type(arg).__name__ == 'UnaryOp':\n names.append(arg.expr.name)\n elif type(arg).__name__ == 'StructRef':\n #############################################\n # So far, we don't care about this situation:\n # fun(a->b)\n # POSSIBLE CODE HERE\n #############################################\n names.append(None)\n return names", "def _getArgStr(self):\n return \"name=%r\" % (self.name)", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def variables(self):\n return tuple(flatten([a.variables for a in self.args]))", "def getvarname(self,j_): # 3\n sizename_ = (1 + self.getvarnamelen((j_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getvarname(j_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def argument_list(self):\n answer = self._call('argument_list')\n return answer.names", "def retrieve_name(self, var):\r\n\t\tfor fi in reversed(inspect.stack()):\r\n\t\t\tnames = [var_name for var_name, var_val in fi.frame.f_locals.items() if var_val is var]\r\n\t\t\tif len(names) > 0:\r\n\t\t\t\treturn names[0]\r\n\t\treturn \"<unknown>\"", "def get_variable_names(self):\n return [var[1] for var in self.variables]", "def get_id_args(func, arg):\n\n return \"{} {}\".format(func.__name__, arg)", "def retrieve_name(var):\n for fi in reversed(inspect.stack()):\n names = [var_name for var_name, var_val in fi.frame.f_locals.items() if var_val is var]\n if len(names) > 0:\n return names[0]", "def var_name(i, j):\n return \"x_\" + str(i) + \",\" + str(j)", "def argnames(method):\n return [arg for arg in method.__code__.co_varnames if arg != \"self\"]", "def this_func_input_name():\n\treturn input_name_from_func_name(inspect.stack()[1][3])", "def varNames(self):\n return self.__varNames", "def variables(self):\n return [i.name for i in self.inputs + self.outputs]", "def varfunc(self, fields=[]):\n self.func_arguments = fields", "def function_name(parameters):", "def name(self):\n\t\treturn self.args[0]", "def get_layer_var_names(self):\n return(self.params)", "def varname(p):\n for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:\n m = re.search(r'\\bvarname\\s*\\(\\s*([A-Za-z_][A-Za-z0-9_]*)\\s*\\)', line)\n if m:\n return m.group(1)", "def func_args(self) -> str:\n\n return self.call_data[10:]", "def get_variable_full_name(var):\n if var._save_slice_info:\n return var._save_slice_info.full_name\n else:\n return var.op.name", "def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result", "def _var(self, name=None, context=None):\n\t\tif name is None: name = None\n\t\tif context is None: context = self.context\n\t\tif (not name):\n\t\t\treturn context.getVariables().keys()\n\t\telif True:\n\t\t\treturn context.getVariables().get(name)", "def _name_from_args(func, _, params):\n return \"{}_{}\".format(func.__name__, \"_\".join(str(arg) for arg in params.args))", "def get_variable_names(self):\n return [VariableString(s) for s in\n self._design.GetVariables()+self._design.GetPostProcessingVariables()]", "def get_input_var_names(self):\n return self._input_var_names", "def names(self):\n result = []\n result.extend(self.positional_arguments)\n if self.arbitary_positional_arguments is not None:\n result.append(self.arbitary_positional_arguments)\n if self.arbitary_keyword_arguments is not None:\n result.append(self.arbitary_keyword_arguments)\n result.extend(self.keyword_arguments)\n return result", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def get_stkvar(*args):\n return _ida_frame.get_stkvar(*args)", "def var_names(self):\n return self._var_names", "def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params", "def arg_names(self):\n return self._arg_names", "def arg_names(self):\n return self._arg_names", "def get_argument_module_name(arg, dim):\n return \"arg_%s_dim%s\" % (arg.name, dim)", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def variable_names(self):\n \n return [x['variable'] for x in self.variable_dicts()]", "def _get_name(var):\n lcls = inspect.stack()[2][0].f_locals\n for name in lcls:\n if id(var) == id(lcls[name]):\n return name\n return None", "def variable_names(self):\n\n status, stdout, stderr = self.__xcall__(['--print-variables'])\n\n if status != 0:\n raise RuntimeError(\"error querying --print-variables for package `%s': %s\" % (self.name, stderr))\n\n return stdout.strip().split()", "def get_variables_func(arguments, exclude):\n names = [name for name in arguments.keys() if name not in exclude]\n return lambda obj: {name: getattr(obj, name) for\n name in names}", "def GetFunctionParametersAndValues():\n frame = inspect.currentframe().f_back\n args, _, _, values = inspect.getargvalues(frame)\n return ([(i, values[i]) for i in args])", "def variables_used (self) :\r\n\t\t## These names possibly contain dimension specification!\r\n\t\treturn self.variable_names", "def getVariableInfo(self, variables, name):\r\n\r\n return [var.return_variable_dict() for var in variables if var.name == name][0]", "def get_name_list(msh, varname):\n return [str(chartostring(v)) for v in msh.variables[varname]]", "def get_input_names(self):\n inputNames = []\n for inVar in self.inputs:\n # inVar is of type InOutVar and the object that it contains is a PyFMI variable\n inputNames.append(inVar.get_object().name)\n return inputNames", "def get_arg(self, name):\n return getattr(self.args, f\"{self.key}_{self.alias}_{name}\")", "def getargvalues(frame):\r\n args, varargs, varkw = getargs(frame.f_code)\r\n return ArgInfo(args, varargs, varkw, frame.f_locals)", "def getbarvarname(self,i_): # 3\n sizename_ = (1 + self.getbarvarnamelen((i_)))\n arr_name = array.array(\"b\",[0]*((sizename_)))\n memview_arr_name = memoryview(arr_name)\n res,resargs = self.__obj.getbarvarname(i_,sizename_,memview_arr_name)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n retarg_name = resargs\n retarg_name = arr_name.tobytes()[:-1].decode(\"utf-8\",errors=\"ignore\")\n return retarg_name", "def punkte(self):\n return self.args", "def extract_keywords(func):\n if hasattr(func, 'im_func'):\n func = func.im_func\n\n try:\n return func.func_code.co_varnames[-len(func.func_defaults):]\n except (TypeError, ValueError, IndexError):\n return tuple()", "def get_all_variables_names(self):\n return self.project.get_variable_names() + self.design.get_variable_names()", "def args_str(self):", "def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)", "def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}", "def getElementName(self):\n return _libsbml.LocalParameter_getElementName(self)", "def getElementName(self):\n return _libsbml.ListOfLocalParameters_getElementName(self)", "def getPositionalArgs():", "def name(self):\n\t\treturn self._func_name", "def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result", "def getBindedNames(self):\n names = []\n for function in self.functions:\n names.append(function.__name__)\n return \", \".join(names)", "def getInfoVariableNames(self, product):\r\n return []", "def return_parameter_names():\n return list(titles), list(labels)", "def argnames(self):\n if self.get_key is None:\n return set()\n return set(self.get_key.names)", "def getLinIterVarNames( self ):\n\n self.updateAdb( )\n\n return self.iterNames.keys()", "def name(cls):\n return arg.s()(cls.func).func.__name__", "def __parameters__(self) -> tuple[TypeVar, ...]:\n return super().__getattribute__(\"_parameters\")", "def parameter_names_from_model(model):\n variables = model.getVariables()\n itvar = variables.iterator()\n names = []\n for i in xrange(len(variables)):\n currentvar = itvar.Next()\n names.append(currentvar.GetName())\n return names", "def var():\n def _var(quoted_name):\n name = quoted_name.subexpression.name\n if (value := get_name(name)) is not None:\n return value\n else:\n raise TypeError(f\"Binding {name} not found\")\n yield (\"(λ &[name] . any)\", _var)", "def getElementName(self):\n return _libsbml.ListOfParameters_getElementName(self)", "def getVisitableNodesNamed(self):\n\n return ((\"locals_arg\", self.subnode_locals_arg),)", "def get_name(self) -> str:\n # read the original value passed by the command\n name = self.raw_param.get(\"name\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return name", "def retr():\n stack = currentframe().f_back.f_locals.setdefault(SN, [])\n return stack[-1]", "def getDataVariableNames(self, product):\r\n return []", "def vars(self):\n return self.v", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def args(self) -> List[str]:\n return self.__args", "def get_arg_list(sv, nam):\r\n cond, value=sv.Object[nam].clauses[0] # in first clause: (condition, value), value is a node with list of args\r\n if type(value)!=tuple or value[0]!=Comma or not value[1]: # expecting (',', [(argname1, None, None), (argname2, None, None)...], None)\r\n print(\"\\n\", Anom_no_args) # *** Anomaly: cannot find function arguments *** \r\n print(nam)\r\n raise ReferenceError\r\n argu_list = [x[0] for x in value[1] if x is not None] # extract just the names\r\n return argu_list", "def getElementName(self):\n return _libsbml.Parameter_getElementName(self)", "def get_variables(self):\n return [self.g_t, self.m_t]", "def getName(self):\n return _libsbml.Parameter_getName(self)", "def get_variable_names(self):\n varNames = []\n for var in self.variables:\n # EstimationVariable\n varNames.append(var.name)\n return varNames", "def step_param(self):\n if self.variable_name is None:\n return self.step_name\n elif self.step_name is None:\n return self.variable_name\n else:\n return '{step}__{var}'.format(\n step=self.step_name, var=self.variable_name)", "def visit_Identifier(self, node):\n if node.args is None:\n # Not a function.\n return node.name\n elif node.name == \"size\":\n # size(arg)\n if len(node.args) > 2:\n raise RuntimeError(\n \"{}:Too many arguments to 'size': {}\".format(\n self.context.linenumber, self.expr)\n )\n # isinstance(node.args[0], declalst.Identifier)\n argname = node.args[0].name\n arg = declast.find_arg_by_name(self.decls, argname)\n if arg is None:\n raise RuntimeError(\n \"{}:Unknown argument '{}': {}\".format(\n self.context.linenumber, argname, self.expr)\n )\n return \"size\"\n elif node.name in [\"len\", \"len_trim\"]:\n # len(arg) len_trim(arg)\n if len(node.args) != 1:\n raise RuntimeError(\n \"{}:Too many arguments to '{}': {}\".format(\n self.context.linenumber, node.name, self.expr)\n )\n argname = node.args[0].name\n arg = declast.find_arg_by_name(self.decls, argname)\n if arg is None:\n raise RuntimeError(\n \"{}:Unknown argument '{}': {}\".format(\n self.context.linenumber, argname, self.expr)\n )\n # XXX - Make sure character\n# if arg.attrs[\"dimension\"] is None:\n# raise RuntimeError(\n# \"Argument '{}' must have dimension attribute: {}\".format(\n# argname, self.expr\n# )\n# )\n return node.name\n else:\n # Assume a user defined function.\n return self.param_list(node)", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def variables(self):\r\n return self.get_field('variable')", "def get_name():", "def get_global_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_glo_var\"][:]]", "def _getArgStr(self):\n return \"name=%s, host=%s, port=%s\" % (self.name, self.host, self.port)", "def arguments_from_call_funccode(f):\n fc = fc_or_c(f.__call__)\n argcount = fc.co_argcount\n args = list(fc.co_varnames[1:argcount])\n if not args:\n raise RuntimeError('Function has variable number of arguments')\n return args", "def __name__(self):\n return '_'.join([function.__name__ for function in self.functions])" ]
[ "0.74990004", "0.69113594", "0.6881187", "0.67148364", "0.6657547", "0.6623787", "0.6606068", "0.6556822", "0.65215355", "0.6428388", "0.64085597", "0.63898975", "0.6327654", "0.6321967", "0.63197297", "0.6268201", "0.6252624", "0.6248424", "0.6245552", "0.6207349", "0.62011933", "0.6200882", "0.619044", "0.6155514", "0.6123595", "0.61233425", "0.61204296", "0.6113574", "0.6078315", "0.60621715", "0.6051216", "0.60472846", "0.6030483", "0.6015061", "0.6007557", "0.5995664", "0.59797573", "0.59732765", "0.5973139", "0.5944687", "0.5944687", "0.59400886", "0.5917472", "0.5910327", "0.5895279", "0.58693254", "0.58651304", "0.58502984", "0.5824936", "0.5824758", "0.5823479", "0.5821181", "0.58198714", "0.58174723", "0.58069724", "0.5803482", "0.5800264", "0.5772607", "0.576019", "0.5756857", "0.57486856", "0.5743096", "0.5732448", "0.57292545", "0.57165146", "0.57137614", "0.57030994", "0.56975526", "0.5689205", "0.56884986", "0.56873196", "0.56852144", "0.5679534", "0.5676958", "0.5669165", "0.566566", "0.56507045", "0.5643198", "0.5640349", "0.56382436", "0.56137747", "0.5607401", "0.5607401", "0.5607401", "0.5607401", "0.5580074", "0.5564933", "0.55503553", "0.5546599", "0.55440956", "0.5540349", "0.55316013", "0.55223554", "0.55218", "0.55218", "0.55170906", "0.5512946", "0.55015486", "0.54942137", "0.5493715", "0.5492744" ]
0.0
-1
The names of the arguments to the function which are contained in the PyArgKeywords list
def arg_names(self): return self._arg_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def names(self):\n result = []\n result.extend(self.positional_arguments)\n if self.arbitary_positional_arguments is not None:\n result.append(self.arbitary_positional_arguments)\n if self.arbitary_keyword_arguments is not None:\n result.append(self.arbitary_keyword_arguments)\n result.extend(self.keyword_arguments)\n return result", "def argument_list(self):\n answer = self._call('argument_list')\n return answer.names", "def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args", "def extract_keywords(func):\n if hasattr(func, 'im_func'):\n func = func.im_func\n\n try:\n return func.func_code.co_varnames[-len(func.func_defaults):]\n except (TypeError, ValueError, IndexError):\n return tuple()", "def argnames(method):\n return [arg for arg in method.__code__.co_varnames if arg != \"self\"]", "def get_keyword_args(function):\n argspec = inspect.getargspec(function)\n kwargs = argspec.args[len(argspec.args) - len(argspec.defaults):]\n kwargs = {arg: value for arg, value in zip(kwargs, argspec.defaults)}\n return kwargs", "def argnames(self):\n if self.get_key is None:\n return set()\n return set(self.get_key.names)", "def list_kwargs(func):\n \n details = inspect.getargspec(func)\n nopt = len(details.defaults)\n \n return details.args[-nopt:]", "def getPositionalArgs():", "def parameter_names(self) -> List[str]:", "def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}", "def _get_args(function, varargs=False):\n\n try:\n params = signature(function).parameters\n except ValueError:\n # Error on builtin C function\n return []\n args = [\n key\n for key, param in params.items()\n if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)\n ]\n if varargs:\n varargs = [\n param.name\n for param in params.values()\n if param.kind == param.VAR_POSITIONAL\n ]\n if len(varargs) == 0:\n varargs = None\n return args, varargs\n else:\n return args", "def args(self):\n\t\tret = []\n\t\tfor argname in self._arg_names:\n\t\t\tret += [self._args[argname]]\n\t\treturn ret", "def args(self) -> List[str]:\n return self.__args", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def func_args(self) -> str:\n\n return self.call_data[10:]", "def _get_param_names(self):\r\n return sorted([p\r\n for p in self.__dict__\r\n if p != 'additional_args'])", "def _validate_arglist_and_kwlist(self, p, items, keywords):\n kwnames = set()\n args = []\n kws = []\n self._validate_arglist_list(items, p.lexer.lexer)\n for arg in items:\n if isinstance(arg, ast.keyword):\n kws.append(arg)\n kwnames.add(arg.arg)\n else:\n args.append(arg)\n for kw in keywords:\n if not isinstance(kw, ast.keyword):\n msg = 'only named arguments may follow *expression'\n tok = FakeToken(p.lexer.lexer, p.lineno(2))\n syntax_error(msg, tok)\n if kw.arg in kwnames:\n msg = 'keyword argument repeated'\n tok = FakeToken(p.lexer.lexer, kw.lineno)\n syntax_error(msg, tok)\n kwnames.add(kw.arg)\n kws.extend(keywords)\n\n return args, kws", "def get_argument_as_keywords(self):\n status = True\n arg_kv = self.get_values_for_mandatory_args()\n if len(arg_kv) != len(self.req_args_list):\n msg = 'could not execute %s without mandatory arguments' % (object)\n self.data_repository = skip_and_report_status(self.data_repository, msg)\n status = False\n arg_kv = self.get_values_for_optional_args(arg_kv)\n return arg_kv, status", "def get_json_argument_list():\n list_of_arguments_to_get = [\"finish_time\", \"segmentation_training_samples\", \"patch_count_per_image\", \"learning_rate\", \"batch_k\",\n \"batch_p\", \"flip_augment\", \"standardize\", \"margin\", \"metric\"]\n\n return list_of_arguments_to_get", "def derive_args(func):\n args = inspect.getfullargspec(func).args\n if args and is_selfish_name(args[0]):\n del args[0]\n return args", "def getargspec(self,obj):\n\n if inspect.isfunction(obj):\n func_obj = obj\n elif inspect.ismethod(obj):\n func_obj = obj.im_func\n else:\n raise TypeError, 'arg is not a Python function'\n args, varargs, varkw = inspect.getargs(func_obj.func_code)\n return args, varargs, varkw, func_obj.func_defaults", "def getargvalues(frame):\r\n args, varargs, varkw = getargs(frame.f_code)\r\n return ArgInfo(args, varargs, varkw, frame.f_locals)", "def get_arg_name(args):\n names = []\n for arg in args:\n if type(arg).__name__ == 'ID':\n names.append(arg.name)\n elif type(arg).__name__ == 'UnaryOp':\n names.append(arg.expr.name)\n elif type(arg).__name__ == 'StructRef':\n #############################################\n # So far, we don't care about this situation:\n # fun(a->b)\n # POSSIBLE CODE HERE\n #############################################\n names.append(None)\n return names", "def get_args( self, **kwargs ):\n args = []\n for at in self.arg_types:\n args.append( kwargs[at] )\n return args", "def format_args(self, **kwargs: Any) -> str:\n decl = self.declaration\n\n # The logic allows this to be used for both function like and non\n # function like macros.\n # 'SOME_DEFINE'.partition('(')\n # >>> 'SOME_DEFINE', '', ''\n #\n # 'FUNCTION_LIKE(_a, _b)'.partition('(')\n # >>> 'FUNCTION_LIKE', '(', '_a, _b)'\n _, part, args = decl.partition(\"(\")\n return part + args", "def getArgs(func):\n # exclude the defaults at the end (hence the [:-1])\n args = list(utils.flatten(inspect.getargspec(func)[:-1]))\n return set(args).difference(set([None]))", "def GetFunctionParametersAndValues():\n frame = inspect.currentframe().f_back\n args, _, _, values = inspect.getargvalues(frame)\n return ([(i, values[i]) for i in args])", "def build_arg_list(fn, env):\r\n kw = {}\r\n argspec = inspect.getargspec(fn)\r\n\r\n # if there is a **kw argument in the fn definition,\r\n # just pass along the environment\r\n if argspec[2]:\r\n kw = env\r\n #else for each entry in the arglist set the value from the environment\r\n else:\r\n #skip self\r\n argnames = argspec[0][1:]\r\n for name in argnames:\r\n if name in env:\r\n kw[name] = env[name]\r\n return kw", "def get_kwd_args(func):\n try:\n sig = inspect.signature(func)\n except AttributeError:\n args, _, _, defaults = inspect.getargspec(func)\n if defaults:\n kwonlyargs = args[-len(defaults):]\n else:\n kwonlyargs = []\n else:\n kwonlyargs = {p.name:p.default for p in sig.parameters.values()\n if p.default is not p.empty}\n\n return kwonlyargs", "def get_num_positional_args(fun):\n sig = inspect.signature(fun)\n return len([\n name for name, param in sig.parameters.items() if param.kind in\n [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]\n ])", "def get_id_args(func, arg):\n\n return \"{} {}\".format(func.__name__, arg)", "def get_python_function_arguments(f):\n # Note that we only return non-optional arguments (we assume that any optional args are not specified).\n # This allows to, e.g., accept max(a, b, *more, name='') as a binary function\n param_specs = inspect.getfullargspec(f)\n annotations = param_specs.annotations\n arg_names = param_specs.args\n defaults = param_specs.defaults # \"if this tuple has n elements, they correspond to the last n elements listed\n # in args\"\n if defaults:\n arg_names = arg_names[:-len(defaults)]\n return (arg_names, annotations)", "def fetch_arguments(op_def, arg, ws):\n return [fetch_argument(op_def, desc, ws) for desc in arg.strings]", "def test_kw_args_with_keywords():\n assert arguments.fun_opt_kw_params(visited_color='blue',\n link_color='red',\n back_color='yellow',\n fore_color='orange') == ('orange',\n 'yellow',\n 'red', 'blue')", "def pykwarg(self):\n return self._pykwarg", "def get_xx_args_dict(self):\n return self.__xx_args", "def get_all_arguments(self):\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if args.count('self') > 0:\n args.remove('self')\n return args", "def invalid_args(func, argdict):\r\n args, _, keywords, _ = inspect.getargspec(func)\r\n if keywords:\r\n return set() # All accepted\r\n return set(argdict) - set(args)", "def print_args():\n for key, value in vars(ARGS).items():\n print(key + ' : ' + str(value))", "def __arg_list(self):\n args = []\n try:\n arg = self.__arg()\n args.append(arg)\n if arg.token.endswith(\"...\"):\n return args\n\n while True:\n self.match_value(Punctuator, \",\")\n\n arg = self.__arg()\n if arg.token.endswith(\"...\"):\n return args\n\n args.append(arg)\n except ParseError:\n return args", "def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params", "def _extract_args(self, func):\n sig = inspect.signature(func)\n\n # Backwards compatibility\n if len(sig.parameters) == 1:\n ((name, parameter),) = sig.parameters.items()\n if (\n parameter.kind is parameter.POSITIONAL_OR_KEYWORD\n and parameter.annotation in (parameter.empty, argparse.Namespace)\n ):\n self._require_namespace = name\n return\n\n for name, parameter in sig.parameters.items():\n if parameter.annotation is argparse.Namespace:\n self._require_namespace = name\n else:\n arg = Argument.from_parameter(name, parameter)\n action = arg.register_with_proxy(self)\n self._args.append((name, action.dest))", "def python_func_kw_matches(self,text):\n\n if \".\" in text: # a parameter cannot be dotted\n return []\n try: regexp = self.__funcParamsRegex\n except AttributeError:\n regexp = self.__funcParamsRegex = re.compile(r'''\n '.*?' | # single quoted strings or\n \".*?\" | # double quoted strings or\n \\w+ | # identifier\n \\S # other characters\n ''', re.VERBOSE | re.DOTALL)\n # 1. find the nearest identifier that comes before an unclosed\n # parenthesis e.g. for \"foo (1+bar(x), pa\", the candidate is \"foo\"\n tokens = regexp.findall(self.get_line_buffer())\n tokens.reverse()\n iterTokens = iter(tokens); openPar = 0\n for token in iterTokens:\n if token == ')':\n openPar -= 1\n elif token == '(':\n openPar += 1\n if openPar > 0:\n # found the last unclosed parenthesis\n break\n else:\n return []\n # 2. Concatenate any dotted names (e.g. \"foo.bar\" for \"foo.bar(x, pa\" )\n ids = []\n isId = re.compile(r'\\w+$').match\n while True:\n try:\n ids.append(iterTokens.next())\n if not isId(ids[-1]):\n ids.pop(); break\n if not iterTokens.next() == '.':\n break\n except StopIteration:\n break\n # lookup the candidate callable matches either using global_matches\n # or attr_matches for dotted names\n if len(ids) == 1:\n callableMatches = self.global_matches(ids[0])\n else:\n callableMatches = self.attr_matches('.'.join(ids[::-1]))\n argMatches = []\n for callableMatch in callableMatches:\n try: namedArgs = self._default_arguments(eval(callableMatch,\n self.namespace))\n except: continue\n for namedArg in namedArgs:\n if namedArg.startswith(text):\n argMatches.append(\"%s=\" %namedArg)\n return argMatches", "def test_star_args_with_keywords():\n assert arguments.fun_star_params(visited_color='orange',\n link_color='yellow',\n back_color='red',\n fore_color='blue') == ('orange',\n 'yellow',\n 'red', 'blue')", "def args(self):\n return self._args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def func_var_names(func):\n names = func.__code__.co_varnames[:func.__code__.co_argcount]\n return names", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def arguments_from_call_funccode(f):\n fc = fc_or_c(f.__call__)\n argcount = fc.co_argcount\n args = list(fc.co_varnames[1:argcount])\n if not args:\n raise RuntimeError('Function has variable number of arguments')\n return args", "def get_fn_arg_contexts(cls, ctx: AntlrTelParser.FnContext) -> List[Any]:\n if len(ctx.children) <= 3:\n # [fn_name,(,)] => 3 children means no args, return empty array\n return []\n else:\n # Skip fnname and '(', step 2 to skip ','\n return ctx.children[2::2]", "def get_input_arguments(kwargs, function, warn=True):\n np.set_printoptions(threshold=20)\n print('\\narguments to {}:'.format(function.__qualname__))\n params = inspect.signature(function)\n input_kwargs = {}\n not_arguments = {}\n for k, v in kwargs.items():\n if k in params.parameters:\n input_kwargs[k] = v\n print_item(k, v)\n else:\n not_arguments[k] = v\n if warn:\n print('\\nother arguments:')\n for k, v in not_arguments.items():\n #print('{}: {}'.format(k, v))\n print_item(k, v)\n print('\\n')\n return input_kwargs", "def get_input_arguments(kwargs, function, warn=True):\n np.set_printoptions(threshold=20)\n print('\\narguments to {}:'.format(function.__qualname__))\n params = inspect.signature(function)\n input_kwargs = {}\n not_arguments = {}\n for k, v in kwargs.items():\n if k in params.parameters:\n input_kwargs[k] = v\n print_item(k, v)\n else:\n not_arguments[k] = v\n if warn:\n print('\\nother arguments:')\n for k, v in not_arguments.items():\n #print('{}: {}'.format(k, v))\n print_item(k, v)\n print('\\n')\n return input_kwargs", "def extractArguments(frame):\n\n\targuments = ([], None, None)\n\ttry:\n\t\tsource = textwrap.dedent(str().join(inspect.getsourcelines(frame)[0]).replace(\"\\\\\\n\", str()))\n\texcept (IOError, TypeError) as error:\n\t\treturn arguments\n\n\ttry:\n\t\tnode = ast.parse(source)\n\texcept:\n\t\treturn arguments\n\n\tif not node.body:\n\t\treturn arguments\n\n\tnode = node.body[0]\n\tif not isinstance(node, ast.FunctionDef):\n\t\treturn arguments\n\n\treturn [arg.id for arg in node.args.args], node.args.vararg, node.args.kwarg", "def args(self):\n if not self.arg_names:\n return []\n modes = self.arg_modes or [\"i\"] * len(self.arg_names)\n args = [\n (name, typ)\n for name, typ, mode in zip(self.arg_names, self.arg_types, modes)\n if mode in (\"i\", \"b\", \"v\") # IN, INOUT, VARIADIC\n ]\n\n def arg(name, typ, num):\n num_args = len(args)\n num_defaults = len(self.arg_defaults)\n has_default = num + num_defaults >= num_args\n default = (\n self.arg_defaults[num - num_args + num_defaults]\n if has_default\n else None\n )\n return ColumnMetadata(name, typ, [], default, has_default)\n\n return [arg(name, typ, num) for num, (name, typ) in enumerate(args)]", "def _get_arg_help(docstring):\r\n arg_help = {}\r\n\r\n if docstring is None:\r\n return arg_help\r\n\r\n last = None\r\n import re\r\n for line in docstring.split('\\n'):\r\n if line == '':\r\n continue\r\n match = re.search('^\\s*:param[\\w ]* (\\w+):\\s(.*)$', line)\r\n if match:\r\n last = match.group(1)\r\n arg_help[last] = match.group(2)\r\n else:\r\n arg_help[last] += ' %s' % line.strip()\r\n return arg_help", "def _get_required_args(func):\n module_logger.debug(f\"_get_required_args: func={func}\")\n fas = inspect.getfullargspec(func)\n module_logger.debug(f\"_get_required_args: fas={fas}\")\n len_args = len(fas.args)\n len_args += len(fas.kwonlyargs)\n if fas.kwonlydefaults is not None:\n len_args -= len(fas.kwonlydefaults)\n if fas.defaults is not None:\n len_args -= len(fas.defaults)\n return len_args", "def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))", "def punkte(self):\n return self.args", "def _get_arguments(self) -> str:\n func = self.node\n\n # Early logic used to iterate over, `func.get_arguments()`, however when there\n # is an unknown type clang will sometimes fail to provide tokens for that\n # argument. For example in \"unknown_type foo[]\" the brackets will cause clang\n # to return back no tokens for the argument.\n start = func.location\n end = func.extent.end\n if func.is_definition():\n # When a function is a definition the last child is the compound statement\n # so we need to move prior to the compound statement\n children = list(func.get_children())\n body_start = children[-1].extent.start.offset\n end = cindex.SourceLocation.from_offset(func.tu, start.file, body_start - 1)\n\n extent = cindex.SourceRange.from_locations(start, end)\n non_comment_tokens = (\n t\n for t in cindex.TokenGroup.get_tokens(func.tu, extent=extent)\n if t.kind != cindex.TokenKind.COMMENT\n )\n\n # Even though this will place spaces around all the tokens, the sphinx C domain\n # will provide some formatting to make it look nicer in the final output.\n full_signature = \" \".join(t.spelling for t in non_comment_tokens)\n\n _, _, arguments = full_signature.partition(\"(\")\n arguments = arguments.rstrip(\")\")\n arguments = arguments.strip()\n\n return arguments", "def arg_err(self,func):\n print 'Error in arguments:'\n print inspect.getdoc(func)", "def get_args(self):\r\n return self.args", "def preprocess_arguments(self, *args, **kwargs):\n return (args, kwargs)", "def args(self) -> tuple[Basic, ...]:\n return self._args", "def get_param_names(self):\n return list(self.params.keys())", "def get_list_cmd_args(self):\r\n return self.get_args(OSPL.list)", "def get_args(self) -> List[str]:\n return self.content.split()[1:]", "def getListOfParameters(self, *args):\n return _libsbml.KineticLaw_getListOfParameters(self, *args)", "def pyarg(self):\n return self._pyarg", "def args(cls):\n try:\n args = getfullargspec(cls.__init__)\n except TypeError:\n return []\n return args[0]", "def keywords(self):\n return list(self._kw)", "def get_required_kwargs(fun, skip_positional=0):\n sig = inspect.signature(fun)\n # the params from signature with up to skip_positional filtered out\n # (less only if there is not enough of positional args)\n params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())\n if i >= skip_positional or param.kind not in\n [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]\n return [\n name for name, param in params if param.default is inspect.Parameter.empty\n and param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY]\n ]", "def _formal_params(self, doclet):\n name, paren, params = self.arguments[0].partition('(')\n return ('(%s' % params) if params else '(%s)' % ', '.join(doclet['meta']['code']['paramnames'])", "def extract_arguments(args, method):\n intersection = lambda list1, list2: [x for x in list1 if x in list2]\n filterByKey = lambda keys, data: {x: data[x] for x in keys if x in data }\n keys = intersection(signature(method).parameters.keys(), args.keys())\n params = filterByKey(keys, args)\n return params", "def get_args(inst):\n if is_estimator(inst):\n args = inspect.getargspec(inst.update).args\n args = [arg for arg in args if arg != 'self' and arg != 'X']\n else:\n args = inspect.getargspec(inst).args\n ignore_args = {'self', 'X', 'y', 'pattern', 'normalizer', 'coef'}\n args = [arg for arg in args if arg not in ignore_args]\n\n return args", "def generateKwargsAsString(self):\n args = \"\"\n axisList = self.tabWidget.currentWidget()\n\n for axisWidget in axisList.getAxisWidgets():\n args += \"%s = %s, \" % (axisWidget.axis.id,\n axisWidget.getCurrentValuesAsStr())\n\n # Generate additional args\n args += 'squeeze = 0'\n args += \", order = '%s' \" % axisList.getAxesOrderString()\n return args", "def __arg_list(self):\n arg = self.__arg()\n args = [arg]\n try:\n while not self.eol():\n self.match_value(Punctuator, \",\")\n arg = self.__arg()\n args.append(arg)\n except ParseError:\n pass\n return args", "def arguments_from_funccode(f):\n fc = fc_or_c(f)\n vnames = fc.co_varnames\n nargs = fc.co_argcount\n # bound method and fake function will be None\n args = vnames[1 if is_bound(f) else 0:nargs]\n if not args:\n raise RuntimeError('Function has variable number of arguments')\n return list(args)", "def allkeywords(f):\n @_fntools.wraps(f)\n def wrapper(*a, **k):\n a = list(a)\n for idx, arg in enumerate(_inspect.getargspec(f).args, -_inspect.ismethod(f)): # or [0] in 2.5\n if arg in k:\n if idx < len(a):\n a.insert(idx, k.pop(arg))\n else:\n break\n return f(*a, **k)\n return wrapper", "def generate_arg_and_kwags():\n def gen_func(\n #df: DataSource,\n option: List[list],\n style: List[dict]\n )->List[Tuple[list, dict]]:\n\n if len(option) != len(style):\n raise SystemError(\"option and style must be same size list.\")\n\n arg_and_kwarg = []\n for o, s in zip(option, style):\n arg = [*o]\n kwargs = s\n arg_and_kwarg.append((arg, kwargs))\n return arg_and_kwarg\n return gen_func", "def get_num_args(function):\n import inspect\n args = inspect.getfullargspec(function)\n num_args = 0\n if args[0] is not None:\n num_args += len(args[0])\n if 'self' in args[0]:\n num_args -= 1\n if args[1] is not None:\n num_args += len(args[1])\n if args[2] is not None:\n num_args += len(args[2])\n # do not count defaults of keywords conatined in args[3]\n # if args[3] is not None:\n # num_args += len(args[3])\n return num_args", "def parameter_names(self):\n return [x for x in self.transformations.values() if isinstance(x, str)]", "def determine_arg_locations(self, arg_types): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def args(self):\n return self._parse_args", "def _represent_args(*args, **kwargs):\n argument_strings = [repr(a) for a in args]\n keyword_strings = [\"=\".join((k, repr(v))) for k, v in kwargs.items()]\n return \", \".join(argument_strings + keyword_strings)", "def list_args(args):\n run_list_args(args)", "def test_kw_args_with_positional():\n assert arguments.fun_opt_kw_params('blue', 'red', 'yellow',\n 'orange') == ('blue', 'red', 'yellow',\n 'orange')", "def arglist(self) -> List:\n return self.argv[1:]", "def help_args():\n pass", "def parseArgs(self, args, **vars):\n argList = []\n for token in self.argLexer.finditer(args):\n for tokenType, tokenValue in list(token.groupdict().items()):\n if tokenValue is not None:\n argList.append(getattr(self, 'argtoken_' +\n tokenType)(tokenValue, vars))\n return argList", "def arguments(**kw):\n return export_arguments('cc', _all_arguments, _groups, **kw)", "def get_args(self):\n rqst = self.request\n args = rqst.arguments()\n resp = {}\n for arg in args:\n resp[arg] = repr(rqst.get_all(arg))\n return resp", "def return_parameter_names():\n return list(titles), list(labels)", "def _get_init_args(self):\n signature = inspect.signature(self.__init__)\n parameters = signature.parameters\n args = [arg for arg, p in parameters.items()\n if p.kind is p.POSITIONAL_OR_KEYWORD]\n\n return {arg: getattr(self, arg) for arg in args if arg != 'self'}", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")" ]
[ "0.7080962", "0.69528246", "0.6950511", "0.6814684", "0.68055576", "0.67487407", "0.6712614", "0.6712274", "0.6702668", "0.6666874", "0.6628423", "0.66158634", "0.6563383", "0.64373004", "0.64143133", "0.6394061", "0.6304303", "0.6285481", "0.6278419", "0.6258944", "0.62564945", "0.62373346", "0.62220895", "0.62058085", "0.61846083", "0.61682487", "0.61665857", "0.61631507", "0.613867", "0.6131295", "0.60727614", "0.60621136", "0.60570085", "0.60485625", "0.60462487", "0.6045869", "0.604582", "0.60454273", "0.6043131", "0.6037238", "0.6018369", "0.6016439", "0.59786224", "0.59618086", "0.59570616", "0.593966", "0.593966", "0.593966", "0.59291834", "0.5911822", "0.59103715", "0.59088427", "0.5892072", "0.5892072", "0.58743095", "0.585125", "0.58433604", "0.5836764", "0.57995415", "0.5796815", "0.57900107", "0.57781", "0.57715684", "0.57698655", "0.57618994", "0.5757583", "0.57528836", "0.57472986", "0.57321143", "0.5726112", "0.57160294", "0.57120955", "0.57047147", "0.57004887", "0.56998605", "0.56967354", "0.56941307", "0.56837434", "0.5681774", "0.56737614", "0.56685174", "0.56661606", "0.56594527", "0.56515837", "0.5644538", "0.56394064", "0.5637382", "0.5633658", "0.5628106", "0.56233424", "0.5618186", "0.5616272", "0.5613565", "0.5612031", "0.5611602", "0.5608092", "0.5608092", "0.5608092", "0.5608092" ]
0.6993237
1
Return the needed flag to parse or build value
def get_pytype(self, c_arg, parse_arg): if isinstance(c_arg, FunctionAddress): return 'O' else: try: return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)] except KeyError as e: raise NotImplementedError("Type not implemented for argument collection : "+str(type(parse_arg))) from e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getOption(arg):\n return (False, \"\", \"\")", "def get_parsed_flags():\n return Flags.parsed_args", "def _parse_env_value(val):\n if val.lower() == \"false\":\n return False\n elif val.lower() == \"true\":\n return True\n try:\n return int(val)\n except ValueError:\n pass\n try:\n return float(val)\n except ValueError:\n pass\n return val", "def getFlag(self, flag) -> bool:\n ...", "def _parse_task_open_option(self, value):\n if value == \"true\":\n return False\n elif value == \"false\":\n return True\n else:\n return None", "def parseFlags(self):\n # Blank return value.\n retVal = \"\"\n \n try:\n # Store flags as we parse them.\n allFlags = []\n \n # Get the accumulator flag.\n accFlag = self.__flags & self.f_accum\n trendFlag = self.__flags & self.f_trend\n modeFlag = self.__flags & self.f_mode\n \n # Complete set of readings?\n if accFlag == self.f_accum_complete:\n # Completed loading values into the accumulator.\n allFlags.append('C')\n elif accFlag == self.f_accum_accum:\n # Still accumulating.\n allFlags.append('A')\n elif accFlag == self.f_accum_unk:\n # Unknown.\n allFlags.append('?')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Trend?\n if (trendFlag) == self.f_trend_stable:\n # Readings stable.\n allFlags.append('S')\n elif (trendFlag) == self.f_trend_up:\n # Still accumulating.\n allFlags.append('U')\n elif (trendFlag) == self.f_trend_dn:\n # Still accumulating.\n allFlags.append('D')\n elif (trendFlag) == self.f_trend_unk:\n # Still accumulating.\n allFlags.append('?')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Mode?\n if modeFlag == self.f_mode_fast:\n # Fast\n allFlags.append('F')\n elif modeFlag == self.f_mode_slow:\n # Slow\n allFlags.append('S')\n elif modeFlag == self.f_mode_counter:\n # Stream\n allFlags.append('C')\n elif modeFlag == self.f_mode_scaler:\n # Roll\n allFlags.append('L')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Build a nice string.\n retVal = ''.join(allFlags)\n \n \n except:\n raise\n \n # Return value.\n return retVal", "def get(flag=\"rainbow\"):\n return flags[flag]", "def get_flag(self):\n return self.long_flag", "def _parse_flags(self, flags):\n s = ''\n for flag in flags:\n if len(s):\n s += ' | '\n s += 'gf.sim.VariableFlag.%s' % (flag)\n if len(s):\n return s\n else:\n return '0'", "def test_true(self):\n result = self.flag.parseString('Y')\n self.assertEqual('Y', result[0])", "def test_false(self):\n result = self.flag.parseString('N')\n self.assertEqual('N', result[0])", "def get_flag(self):\n price_data = self.get_price_data()\n if price_data.get('flag'):\n return price_data.get('flag')\n return None", "def test_unknown(self):\n result = self.flag.parseString('U')\n self.assertEqual('U', result[0])", "def _compute_value(self, dest, kwargs, flag_val_strs, passthru_arg_strs):\n type_arg = kwargs.get(\"type\", str)\n member_type = kwargs.get(\"member_type\", str)\n\n def to_value_type(val_str):\n return self.to_value_type(val_str, type_arg, member_type)\n\n # Helper function to expand a fromfile=True value string, if needed.\n # May return a string or a dict/list decoded from a json/yaml file.\n def expand(val_or_str):\n if (\n kwargs.get(\"fromfile\", True)\n and isinstance(val_or_str, str)\n and val_or_str.startswith(\"@\")\n ):\n if val_or_str.startswith(\"@@\"): # Support a literal @ for fromfile values via @@.\n return val_or_str[1:]\n else:\n fromfile = val_or_str[1:]\n try:\n contents = Path(get_buildroot(), fromfile).read_text()\n if fromfile.endswith(\".json\"):\n return json.loads(contents)\n elif fromfile.endswith(\".yml\") or fromfile.endswith(\".yaml\"):\n return yaml.safe_load(contents)\n else:\n return contents.strip()\n except (OSError, ValueError, yaml.YAMLError) as e:\n raise FromfileError(\n f\"Failed to read {dest} in {self._scope_str()} from file {fromfile}: {e!r}\"\n )\n else:\n return val_or_str\n\n # Helper function to merge multiple values from a single rank (e.g., multiple flags,\n # or multiple config files).\n def merge_in_rank(vals):\n if not vals:\n return None\n expanded_vals = [to_value_type(expand(x)) for x in vals]\n if is_list_option(kwargs):\n return ListValueComponent.merge(expanded_vals)\n if is_dict_option(kwargs):\n return DictValueComponent.merge(expanded_vals)\n return expanded_vals[-1] # Last value wins.\n\n # Get value from config files, and capture details about its derivation.\n config_details = None\n config_section = GLOBAL_SCOPE_CONFIG_SECTION if self._scope == GLOBAL_SCOPE else self._scope\n config_default_val = merge_in_rank(self._config.get(DEFAULT_SECTION, dest))\n config_val = merge_in_rank(self._config.get(config_section, dest))\n config_source_files = self._config.get_sources_for_option(config_section, dest)\n if config_source_files:\n config_details = f\"from {', '.join(config_source_files)}\"\n\n # Get value from environment, and capture details about its derivation.\n env_vars = self.get_env_var_names(self._scope, dest)\n env_val = None\n env_details = None\n if self._env:\n for env_var in env_vars:\n if env_var in self._env:\n env_val = merge_in_rank([self._env.get(env_var)])\n env_details = f\"from env var {env_var}\"\n break\n\n # Get value from cmd-line flags.\n flag_vals = list(flag_val_strs)\n if kwargs.get(\"passthrough\") and passthru_arg_strs:\n # NB: Passthrough arguments are either of type `str` or `shell_str`\n # (see self._validate): the former never need interpretation, and the latter do not\n # need interpretation when they have been provided directly via `sys.argv` as the\n # passthrough args have been.\n flag_vals.append(\n ListValueComponent(ListValueComponent.MODIFY, [*passthru_arg_strs], [])\n )\n if len(flag_vals) > 1 and not (is_list_option(kwargs) or is_dict_option(kwargs)):\n raise ParseError(\n f\"Multiple cmd line flags specified for option {dest} in {self._scope_str()}\"\n )\n flag_val = merge_in_rank(flag_vals)\n flag_details = None if flag_val is None else \"from command-line flag\"\n\n # Rank all available values.\n values_to_rank = [\n (flag_val, flag_details),\n (env_val, env_details),\n (config_val, config_details),\n (config_default_val, config_details),\n (to_value_type(kwargs.get(\"default\")), None),\n (None, None),\n ]\n # Note that ranked_vals will always have at least one element, and all elements will be\n # instances of RankedValue (so none will be None, although they may wrap a None value).\n ranked_vals = list(reversed(list(RankedValue.prioritized_iter(*values_to_rank))))\n\n def group(value_component_type, process_val_func) -> list[RankedValue]:\n # We group any values that are merged together, so that the history can reflect\n # merges vs. replacements in a useful way. E.g., if we merge [a, b] and [c],\n # and then replace it with [d, e], the history will contain:\n # - [d, e] (from command-line flag)\n # - [a, b, c] (from env var, from config)\n # And similarly for dicts.\n grouped: list[list[RankedValue]] = [[]]\n for ranked_val in ranked_vals:\n if ranked_val.value and ranked_val.value.action == value_component_type.REPLACE:\n grouped.append([])\n grouped[-1].append(ranked_val)\n return [\n RankedValue(\n grp[-1].rank,\n process_val_func(\n value_component_type.merge(\n rv.value for rv in grp if rv.value is not None\n ).val\n ),\n \", \".join(rv.details for rv in grp if rv.details),\n )\n for grp in grouped\n if grp\n ]\n\n if is_list_option(kwargs):\n\n def process_list(lst):\n return [self._convert_member_type(member_type, val) for val in lst]\n\n historic_ranked_vals = group(ListValueComponent, process_list)\n elif is_dict_option(kwargs):\n historic_ranked_vals = group(DictValueComponent, lambda x: x)\n else:\n historic_ranked_vals = ranked_vals\n\n value_history = OptionValueHistory(tuple(historic_ranked_vals))\n\n # Helper function to check various validity constraints on final option values.\n def check_scalar_value(val):\n if val is None:\n return\n choices = kwargs.get(\"choices\")\n if choices is None and \"type\" in kwargs:\n if inspect.isclass(type_arg) and issubclass(type_arg, Enum):\n choices = list(type_arg)\n if choices is not None and val not in choices:\n raise ParseError(\n softwrap(\n f\"\"\"\n `{val}` is not an allowed value for option {dest} in {self._scope_str()}.\n Must be one of: {choices}\n \"\"\"\n )\n )\n elif type_arg == file_option:\n check_file_exists(val)\n elif type_arg == dir_option:\n check_dir_exists(val)\n\n def check_file_exists(val) -> None:\n error_prefix = f\"File value `{val}` for option `{dest}` in `{self._scope_str()}`\"\n try:\n path = Path(val)\n path_with_buildroot = Path(get_buildroot(), val)\n except TypeError:\n raise ParseError(f\"{error_prefix} cannot be parsed as a file path.\")\n if not path.is_file() and not path_with_buildroot.is_file():\n raise ParseError(f\"{error_prefix} does not exist.\")\n\n def check_dir_exists(val) -> None:\n error_prefix = f\"Directory value `{val}` for option `{dest}` in `{self._scope_str()}`\"\n try:\n path = Path(val)\n path_with_buildroot = Path(get_buildroot(), val)\n except TypeError:\n raise ParseError(f\"{error_prefix} cannot be parsed as a directory path.\")\n if not path.is_dir() and not path_with_buildroot.is_dir():\n raise ParseError(f\"{error_prefix} does not exist.\")\n\n # Validate the final value.\n final_val = value_history.final_value\n if isinstance(final_val.value, list):\n for component in final_val.value:\n check_scalar_value(component)\n if inspect.isclass(member_type) and issubclass(member_type, Enum):\n if len(final_val.value) != len(set(final_val.value)):\n raise ParseError(f\"Duplicate enum values specified in list: {final_val.value}\")\n elif isinstance(final_val.value, dict):\n for component in final_val.value.values():\n check_scalar_value(component)\n else:\n check_scalar_value(final_val.value)\n\n return value_history", "def getArg(flag):\n try:\n a = sys.argv[sys.argv.index(flag) + 1]\n except:\n return \"\"\n else:\n return a", "def help_for(self, flag: str) -> Tuple[str, str]:\n # Obtain arg obj\n if flag not in self.flags:\n err = \"{!r} is not a valid flag for this context! Valid flags are: {!r}\" # noqa\n raise ValueError(err.format(flag, self.flags.keys()))\n arg = self.flags[flag]\n # Determine expected value type, if any\n value = {str: \"STRING\", int: \"INT\"}.get(arg.kind)\n # Format & go\n full_names = []\n for name in self.names_for(flag):\n if value:\n # Short flags are -f VAL, long are --foo=VAL\n # When optional, also, -f [VAL] and --foo[=VAL]\n if len(name.strip(\"-\")) == 1:\n value_ = (\"[{}]\".format(value)) if arg.optional else value\n valuestr = \" {}\".format(value_)\n else:\n valuestr = \"={}\".format(value)\n if arg.optional:\n valuestr = \"[{}]\".format(valuestr)\n else:\n # no value => boolean\n # check for inverse\n if name in self.inverse_flags.values():\n name = \"--[no-]{}\".format(name[2:])\n\n valuestr = \"\"\n # Tack together\n full_names.append(name + valuestr)\n namestr = \", \".join(sorted(full_names, key=len))\n helpstr = arg.help or \"\"\n return namestr, helpstr", "def _parse_value(self, data, flags):\n\n\t\tif flags & Client._FLAG_COMPRESSED:\n\t\t\tdata = decompress(data)\n\n\t\tif flags == 0 or flags == Client._FLAG_COMPRESSED:\n\t\t\t# Either a bare string or a compressed string now decompressed...\n\t\t\tvalue = data\n\t\telif flags & Client._FLAG_INTEGER:\n\t\t\tvalue = int(data)\n\t\telif flags & Client._FLAG_LONG:\n\t\t\tvalue = long(data)\n\t\telif flags & Client._FLAG_PICKLE:\n\t\t\ttry:\n\t\t\t\tvalue = pickle.loads(data)\n\t\t\texcept Exception:\n\t\t\t\tself._debuglog('Pickle error...\\n')\n\t\t\t\tvalue = None\n\t\telse:\n\t\t\tself._debuglog(\"unknown flags on get: %x\\n\" % flags)\n\n\t\treturn value", "def _get_flag_value(event):\n flags = event.modifierFlags()\n if flags == 0x100:\n value = 0\n else:\n value = 1\n return value", "def getValue(self, value=None):\n if self.data and self.source & COMMANDLINE:\n return self.data\n\n if self.environ and str(self.environ) in os.environ:\n self.source = ENVIRONMENT\n self.file = None\n return self.cast(os.environ[str(self.environ)])\n\n if self.data:\n return self.data\n\n if self.default:\n self.source = BUILTIN\n self.file = None\n return self.default\n\n self.source = CODE\n self.file = None\n\n if value is None:\n return []\n\n return value", "def get(key: 'int | str', default: 'Optional[int]' = -1) -> 'Flags':\n if isinstance(key, int):\n return Flags(key)\n return Flags[key] # type: ignore[misc]", "def read_flags():\n return flag_args", "def post_formatter(self, value):\n if isinstance(value, bool):\n return value and 'true' or None\n return value", "def _parse_value(self, write_token=True, override=None):\n v_str = self.prior_token\n\n # Construct the complex string\n if v_str == '(':\n v_re = self.token\n\n self._update_tokens(write_token)\n assert self.token == ','\n\n self._update_tokens(write_token)\n v_im = self.token\n\n self._update_tokens(write_token)\n assert self.token == ')'\n\n self._update_tokens(write_token, override)\n v_str = '({0}, {1})'.format(v_re, v_im)\n\n recast_funcs = [int, pyfloat, pycomplex, pybool, pystr]\n\n for f90type in recast_funcs:\n try:\n # Unclever hack.. integrate this better\n if f90type == pybool:\n value = pybool(v_str, self.strict_logical)\n else:\n value = f90type(v_str)\n return value\n except ValueError:\n continue", "def get_flag(flagname):\n if flagname in commands.keys():\n return commands[flagname]\n else:\n for cmdflag in commands.keys():\n if flagname in commands[cmdflag]['aliases']:\n return commands[cmdflag]", "def processOption (self, line) :\n ll = line.split ('=')\n if len (ll) < 2:\n print \"Cannot parse option \" , line\n sys.exit()\n result = (ll[0].strip() , ll[1].strip())\n return result", "def value(self):\n return self.flags.value", "def _flag():\n current_flag = _flag.flag\n _flag.flag <<= 1\n return current_flag", "def get_short_flag(self):\n return self.short_flag", "def validate_value_flag(self):\n if not self.app.args.value is None or self.app.args.value == '':\n return True\n else:\n return False", "def has_flag(compiler, flag, ext=None):\n return try_compile(compiler, flags=[flag], ext=ext)", "def _get_build_flags(cmdline: str) -> Tuple[Tuple[str, ...], Tuple[str, ...]]:\n cmdlist = cmdline.split()\n labels = [arg for arg in cmdlist if arg.startswith(\"//\")]\n build_flags = [arg for arg in cmdlist if not arg.startswith(\"//\")]\n return (tuple(labels), tuple(build_flags))", "def _get_optlevel(\n target, fc, cc, debug, fflags, cflags, osname=None, verbose=False\n):\n # remove target extension, if necessary\n target = _get_base_app_name(target)\n\n # get lower case OS string\n if osname is None:\n osname = _get_osname()\n\n # remove .exe extension from compiler if necessary\n if fc is not None:\n fc = _get_base_app_name(fc)\n if cc is not None:\n cc = _get_base_app_name(cc)\n\n compiler = None\n if fc is not None:\n compiler = fc\n if compiler is None:\n compiler = cc\n\n # get - or / to prepend for compiler switches\n prepend = _get_prepend(compiler, osname)\n\n # set basic optimization level\n if debug:\n if osname == \"win32\":\n optlevel = \"O0\"\n else:\n optlevel = \"O0\"\n else:\n optlevel = \"O2\"\n\n # look for optimization levels in fflags\n for flag in fflags:\n if flag[:2] == \"-O\" or flag == \"-fast\":\n if not debug:\n optlevel = flag[1:]\n break # after first optimization (O) flag\n\n # look for optimization levels in cflags\n for flag in cflags:\n if flag[:2] == \"-O\":\n if not debug:\n optlevel = flag[1:]\n break # after first optimization (O) flag\n\n # reset optlevel with specified flags from setters\n if compiler == fc:\n tval = _set_fflags(target, fc, argv=False, osname=osname)\n else:\n tval = _set_cflags(target, cc, argv=False, osname=osname)\n\n # look for for optimization levels in compiler flags from setters\n if tval is not None:\n for flag in tval:\n if flag[:2] == \"-O\":\n if not debug:\n optlevel = flag[1:]\n break # after first optimization (O) flag\n\n # prepend optlevel\n optlevel = prepend + optlevel\n\n return optlevel", "def _flag_to_arg(flag: str) -> str:\n arg = flag.split('--')[1].split('-')\n arg = '_'.join(arg)\n return arg", "def _arg_to_flag(name: str) -> str:\n arg = '-'.join(name.split('_'))\n return f'--{arg}'", "def _str_to_val(self, value):\n kind, value = value.split(': ', 1)\n\n # Lists and dictionaries are special case\n if kind in ('L', 'D'):\n return eval(value)\n\n if kind in TYPE_MAPPING.keys():\n if kind == 'B':\n if value != 'True':\n return False\n\n value = TYPE_MAPPING[kind](value)\n\n return value\n else:\n raise ValueError(\"An Unknown type of setting was found!\")", "def getBool(string):\n return (True)", "def parse_debug_value(value):\r\n if isinstance(value, bool):\r\n return value\r\n try:\r\n from webassets.env import parse_debug_value\r\n return parse_debug_value(value)\r\n except ValueError:\r\n raise template.TemplateSyntaxError(\r\n '\"debug\" argument must be one of the strings '\r\n '\"true\", \"false\" or \"merge\", not \"%s\"' % value)", "def flag(self) -> str:\n return pulumi.get(self, \"flag\")", "def get_opt(self):\n return self.parser.parse_args()", "def get_opt(self):\n return self.parser.parse_args()", "def get_opt(self):\n return self.parser.parse_args()", "def encode_flag_for_optimizer(val, flagdef):\n if flagdef.choices:\n return [c.value for c in flagdef.choices]\n elif flagdef.min is not None and flagdef.max is not None:\n return _encode_function(flagdef, val)\n return val", "def get_option_generic(pytest_config: pytest.Config, flag: str, default):\n cli_flag = flag.replace(\"-\", \"_\")\n ini_flag = flag\n\n # Lowest priority\n use = default\n\n # Middle priority\n if pytest_config.getini(ini_flag) is not None:\n use = pytest_config.getini(ini_flag)\n\n # Top priority\n if pytest_config.getoption(cli_flag) is not None:\n use = pytest_config.getoption(cli_flag)\n\n return use", "def GetJflag(cmdline):\n\n for i in range(len(cmdline)):\n if (cmdline[i] == '-j' and i + 1 < len(cmdline)\n and cmdline[i + 1].isdigit()):\n return int(cmdline[i + 1])\n\n if (cmdline[i].startswith('-j') and cmdline[i][len('-j'):].isdigit()):\n return int(cmdline[i][len('-j'):])", "def _do_get(self, name, group=None, namespace=None):\n if group is None and name in self._groups:\n return (self.GroupAttr(self, self._get_group(name)), None)\n\n info = self._get_opt_info(name, group)\n opt = info['opt']\n if 'location' in info:\n loc = info['location']\n else:\n loc = opt._set_location\n\n if isinstance(opt, SubCommandOpt):\n return (self.SubCommandAttr(self, group, opt.dest), None)\n\n if 'override' in info:\n return (self._substitute(info['override']), loc)\n\n def convert(value):\n return self._convert_value(\n self._substitute(value, group, namespace), opt)\n\n group_name = group.name if group else None\n key = (group_name, name)\n\n # If use_env is true, get a value from the environment but don't use\n # it yet. We will look at the command line first, below.\n env_val = (sources._NoValue, None)\n if self._use_env:\n env_val = self._env_driver.get(group_name, name, opt)\n\n if opt.mutable and namespace is None:\n namespace = self._mutable_ns\n if namespace is None:\n namespace = self._namespace\n if namespace is not None:\n try:\n alt_loc = None\n try:\n val, alt_loc = opt._get_from_namespace(namespace,\n group_name)\n # Try command line first\n if (val != sources._NoValue\n and alt_loc.location == Locations.command_line):\n return (convert(val), alt_loc)\n # Environment source second\n if env_val[0] != sources._NoValue:\n return (convert(env_val[0]), env_val[1])\n # Default file source third\n if val != sources._NoValue:\n return (convert(val), alt_loc)\n except KeyError: # nosec: Valid control flow instruction\n alt_loc = LocationInfo(\n Locations.environment,\n self._env_driver.get_name(group_name, name),\n )\n # If there was a KeyError looking at config files or\n # command line, retry the env_val.\n if env_val[0] != sources._NoValue:\n return (convert(env_val[0]), env_val[1])\n except ValueError as ve:\n message = \"Value for option %s from %s is not valid: %s\" % (\n opt.name, alt_loc, str(ve))\n # Preserve backwards compatibility for file-based value\n # errors.\n if alt_loc.location == Locations.user:\n raise ConfigFileValueError(message)\n raise ConfigSourceValueError(message)\n\n try:\n return self.__drivers_cache[key]\n except KeyError: # nosec: Valid control flow instruction\n pass\n\n for source in self._sources:\n val = source.get(group_name, name, opt)\n if val[0] != sources._NoValue:\n result = (convert(val[0]), val[1])\n self.__drivers_cache[key] = result\n return result\n\n if 'default' in info:\n return (self._substitute(info['default']), loc)\n\n if self._validate_default_values:\n if opt.default is not None:\n try:\n convert(opt.default)\n except ValueError as e:\n raise ConfigFileValueError(\n \"Default value for option %s is not valid: %s\"\n % (opt.name, str(e)))\n\n if opt.default is not None:\n return (convert(opt.default), loc)\n\n return (None, None)", "def _parse(self, val):\n if self.type == \"integer\":\n return int(val)\n elif self.type == \"number\":\n return float(val)\n elif self.type == \"boolean\":\n lower_val = str(val).lower()\n if lower_val not in {\"true\", \"false\"}:\n msg = \"Boolean parameter '{}' only accept True/False, got {}.\"\n raise ValidationException(\n message=msg.format(self.name, val),\n no_personal_data_message=msg.format(\"[self.name]\", \"[val]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n )\n return True if lower_val == \"true\" else False\n return val", "def parse_bool(arg):\n if arg == 'True':\n return True\n elif arg == 'False':\n return False\n else:\n raise argparse.ArgumentTypeError(\"Expected 'True' or 'False'.\")", "def parse_value(self, value):\n\t\t\n\t\tif goodies.is_float(value):\n\t\t\treturn float(value)\n\t\telif goodies.is_int(value):\n\t\t\treturn int(value)\n\t\telif goodies.is_bool(value):\n\t\t\treturn bool(value.capitalize())\n\t\telse:\n\t\t\treturn value", "def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]:\n flag_value_map: DefaultDict[str, list[str | None]] = defaultdict(list)\n for flag in flags:\n flag_val: str | None\n key, has_equals_sign, flag_val = flag.partition(\"=\")\n if not has_equals_sign:\n if not flag.startswith(\"--\"): # '-xfoo' style.\n key = flag[0:2]\n flag_val = flag[2:]\n if not flag_val:\n # Either a short option with no value or a long option with no equals sign.\n # Important so we can distinguish between no value ('--foo') and setting to an empty\n # string ('--foo='), for options with an implicit_value.\n flag_val = None\n flag_value_map[key].append(flag_val)\n return flag_value_map", "def aria_bool(value: Optional[bool]) -> Optional[str]:\n\n if value is None:\n return None\n elif value is True:\n return \"true\"\n elif value is False:\n return \"false\"\n else:\n raise ValueError(str(value))", "def target_option(s):\n return s", "def get_value(arg):\n if arg in self.args_repository:\n return self.args_repository[arg]\n if arg in self.data_repository:\n return self.data_repository[arg]\n print_error(\"value for mandatory argument '{0}' not available in \"\n \"data_repository/args_repository\".format(args))\n return None", "def _get_parameter(self, name):\n for parameter in self.parameters:\n if name in parameter.names:\n if isinstance(parameter, _Switch):\n return parameter.is_set\n else:\n return parameter.value\n raise ValueError(\"Option name %s was not found.\" % name)", "def translate(value):\n if re.match(r\"true\",value,re.IGNORECASE) or re.match(r\"t\",value,re.IGNORECASE):\n return True\n if re.match(r\"false\",value,re.IGNORECASE) or re.match(r\"f\",value,re.IGNORECASE):\n return False\n if re.match(r\"none\",value,re.IGNORECASE):\n return None\n try:\n return int(value)\n except:\n pass\n try:\n return float(value)\n except:\n pass\n return value", "def fetch_val_for_key(key):\r\n\r\n # first try to find it in the FLAGS\r\n try:\r\n return THEANO_FLAGS_DICT[key]\r\n except KeyError:\r\n pass\r\n\r\n # next try to find it in the config file\r\n\r\n # config file keys can be of form option, or section.option\r\n key_tokens = key.split('.')\r\n if len(key_tokens) > 2:\r\n raise KeyError(key)\r\n\r\n if len(key_tokens) == 2:\r\n section, option = key_tokens\r\n else:\r\n section, option = 'global', key\r\n try:\r\n try:\r\n return theano_cfg.get(section, option)\r\n except ConfigParser.InterpolationError:\r\n return theano_raw_cfg.get(section, option)\r\n except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):\r\n raise KeyError(key)", "def parse_boolean(val: str) -> str | bool:\n val = val.lower()\n if val in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n return True\n if val in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n return False\n return val", "def cpp_flag(compiler):\n flags = ['-std=c++17', '-std=c++14', '-std=c++11']\n\n for flag in flags:\n if has_flag(compiler, flag):\n return flag\n\n raise RuntimeError('Unsupported compiler -- at least C++11 support '\n 'is needed!')", "def cpp_flag(compiler):\n flags = ['-std=c++17', '-std=c++14', '-std=c++11']\n\n for flag in flags:\n if has_flag(compiler, flag):\n return flag\n\n raise RuntimeError('Unsupported compiler -- at least C++11 support '\n 'is needed!')", "def _arg_to_command(k: str, v: Optional[Union[str, int, float]] = None):\n command = _arg_to_flag(k)\n if v is not None:\n command += f' {v}'\n return command", "def get_bool(options, name, default=False):\n value = options.get(name)\n if not value:\n return default\n if value.lower() == 'true':\n return True\n elif value.lower() == 'false':\n return False\n else:\n raise zc.buildout.UserError(\n \"Invalid value for %s option: %s\" % (name, value))", "def parse_value(cls, value):\n return bool(value)", "def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)", "def get_boolval(record, field_name):\n val = recordval(record, field_name)\n if val.lower() not in [\"y\", \"yes\", \"n\", \"no\", \"\"]:\n # TODO: support these alternates in the datahub!\n parser_error(\"bad value in \"+field_name+\": '\"+val+\"'-- try 'Yes' or 'No'\")\n return val", "def get_alternative_flag(_image_path : str) -> str:\n image_extless = os.path.splitext(os.path.basename(_image_path))[0]\n\n #if the final character is an integer, then there's no alternative flag\n if re.search(ONLY_INTEGERS_REGEX, image_extless[-1]):\n alternative_flag = ''\n else:\n alternative_flag = image_extless[-1]\n\n return alternative_flag", "def parse_bool(value):\n if value in (\"true\", \"True\", \"yes\", \"1\", \"on\"):\n return True\n if value in (\"false\", \"False\", \"None\", \"no\", \"0\", \"off\"):\n return False\n return bool(int(value))", "def get_opt(self):\n opts, args = self.parser.parse_args()\n if opts.path is not None:\n opts.path = os.path.abspath(os.path.expandvars(os.path.expanduser(opts.path)))\n if opts.output == \"-\":\n opts.output = sys.__stdout__\n else:\n filepath = os.path.dirname(os.path.realpath(os.path.expanduser(opts.output)))\n if not os.access(filepath,os.W_OK):\n self.parser.error(\"Cannot write to %s\"%filepath)\n if os.path.isfile(opts.output):\n self.parser.error(\"File already exists: %s\"%opts.output) \n if not opts.dryrun:\n try: \n opts.output = open(opts.output,\"w\")\n except:\n self.parser.error(\"Cannot write to %s\"%opts.output)\n else:\n opts.output = sys.__stdout__\n try:\n opts.whitelist = open(opts.whitelist)\n except:\n self.parser.error(\"Cannot open whitelist.\")\n return opts", "def _parse_boolean(node, key):\n element = node.get(key)\n if element is not None:\n return bool(element)\n else:\n return None", "def mock_gitlab_get_flag_value(monkeypatch):\n mock_flag_value = mock.Mock()\n mock_flag_value.return_value = None\n monkeypatch.setattr(\"libgitlab._get_flag_value\", mock_flag_value)\n return mock_flag_value", "def _flags_to_try(source, flags, auto_flags, mode):\n flags = CompilerFlags(flags)\n if sys.version_info >= (3, 8):\n if re.search(r\"# *type:\", source):\n flags = flags | CompilerFlags('type_comments')\n yield flags\n return\n if not auto_flags:\n yield flags\n return\n if PY3:\n yield flags\n return\n if mode == \"eval\":\n if re.search(r\"\\bprint\\b\", source):\n flags = flags | CompilerFlags(\"print_function\")\n yield flags\n return\n yield flags\n if re.search(r\"\\bprint\\b\", source):\n yield flags ^ CompilerFlags(\"print_function\")", "def _fromflagname(cls, name:str, default=...) -> enum.Enum:\n if default is not Ellipsis:\n return cls._LOOKUP.get(name, default)\n return cls._LOOKUP[name]", "def getParameter(self, value):\n if value in self.commandLineDefaults:\n return self.commandLineDefaults[value]\n if value in self.defaults:\n return self.defaults[value]\n return None", "def check_flag ( params, string, delete ) :\n i = 0\n value = None\n size = len(string)\n for line in params :\n tmp = line.find(string)\n if tmp != -1 :\n start = tmp + size\n sel_string = line[start:]\n if delete :\n params.pop(i)\n value = sel_string\n i += 1\n return value", "def c_flag(opt, test_not=False):\n if test_not:\n if opt: return \"FALSE\"\n else: return \"TRUE\"\n else:\n if opt: return \"TRUE\"\n else: return \"FALSE\"", "def extract_bool_arg(request, key, default=False):\n if key in request.args:\n value = request.args[key][0].lower()\n if value == 'true':\n return True\n elif value == 'false':\n return False\n else:\n raise InvalidQueryArgument(\n 'Invalid \"{}\" query argument: \"{}\". '\n 'Must be \"true\" or \"false\". '\n 'Defaults to \"{}\" if not provided'\n .format(key, value, str(default).lower()))\n else:\n return default", "def parse_value(self, value_name, default=None):\n\t\treturn self.cfg_root.find(value_name).text", "def parse_args(self, parse_args_request: ParseArgsRequest) -> OptionValueContainer:\n\n flag_value_map = parse_args_request.flag_value_map\n namespace = parse_args_request.namespace\n\n mutex_map: DefaultDict[str, list[str]] = defaultdict(list)\n for args, kwargs in self._option_registrations:\n self._validate(args, kwargs)\n dest = self.parse_dest(*args, **kwargs)\n\n # Compute the values provided on the command line for this option. Note that there may be\n # multiple values, for any combination of the following reasons:\n # - The user used the same flag multiple times.\n # - The user specified a boolean flag (--foo) and its inverse (--no-foo).\n # - The option has multiple names, and the user used more than one of them.\n #\n # We also check if the option is deprecated, but we only do so if the option is explicitly\n # specified as a command-line flag, so we don't spam users with deprecated option values\n # specified in config, which isn't something they control.\n implicit_value = kwargs.get(\"implicit_value\")\n if implicit_value is None and self.is_bool(kwargs):\n implicit_value = True # Allows --foo to mean --foo=true.\n\n flag_vals: list[int | float | bool | str] = []\n\n def add_flag_val(v: int | float | bool | str | None) -> None:\n if v is None:\n if implicit_value is None:\n raise ParseError(\n f\"Missing value for command line flag {arg} in {self._scope_str()}\"\n )\n flag_vals.append(implicit_value)\n else:\n flag_vals.append(v)\n\n for arg in args:\n # If the user specified --no-foo on the cmd line, treat it as if the user specified\n # --foo, but with the inverse value.\n if self.is_bool(kwargs):\n inverse_arg = self._inverse_arg(arg)\n if inverse_arg in flag_value_map:\n flag_value_map[arg] = [self._invert(v) for v in flag_value_map[inverse_arg]]\n implicit_value = self._invert(implicit_value)\n del flag_value_map[inverse_arg]\n\n if arg in flag_value_map:\n for v in flag_value_map[arg]:\n add_flag_val(v)\n del flag_value_map[arg]\n\n # Get the value for this option, falling back to defaults as needed.\n try:\n value_history = self._compute_value(\n dest, kwargs, flag_vals, parse_args_request.passthrough_args\n )\n self._history[dest] = value_history\n val = value_history.final_value\n except ParseError as e:\n # Reraise a new exception with context on the option being processed at the time of error.\n # Note that other exception types can be raised here that are caught by ParseError (e.g.\n # BooleanConversionError), hence we reference the original exception type as type(e).\n args_str = \", \".join(args)\n raise type(e)(\n softwrap(\n f\"\"\"\n Error computing value for {args_str} in {self._scope_str()} (may also be\n from PANTS_* environment variables). Caused by:\n\n {e}\n \"\"\"\n )\n )\n\n # If the option is explicitly given, check deprecation and mutual exclusion.\n if val.rank > Rank.HARDCODED:\n self._check_deprecated(dest, kwargs)\n mutex_dest = kwargs.get(\"mutually_exclusive_group\")\n mutex_map_key = mutex_dest or dest\n mutex_map[mutex_map_key].append(dest)\n if len(mutex_map[mutex_map_key]) > 1:\n raise MutuallyExclusiveOptionError(\n softwrap(\n f\"\"\"\n Can only provide one of these mutually exclusive options in\n {self._scope_str()}, but multiple given:\n {', '.join(mutex_map[mutex_map_key])}\n \"\"\"\n )\n )\n\n setattr(namespace, dest, val)\n\n if not parse_args_request.allow_unknown_flags and flag_value_map:\n # There were unconsumed flags.\n raise UnknownFlagsError(tuple(flag_value_map.keys()), self.scope)\n return namespace.build()", "def get_bool(section, option, default=False):\n\tres = get(section, option, default)\n\n\tif res == default:\n\t\treturn default\n\n\tif res.lower() == \"true\" or res == \"1\":\n\t\treturn True\n\n\treturn default", "def option(number, default='no'):\n return answer(number).get('options', default)", "def process_check_input_argument():\n\n try:\n input_argv = sys.argv[1]\n if input_argv == \"0\":\n stand_alone_flag = 0\n else:\n stand_alone_flag = 0\n except IndexError:\n stand_alone_flag = 1\n\n return stand_alone_flag", "def __getitem__(self, name):\n assert self._unparsed is not None, \\\n ('Flags have not been parsed yet: cannot access flag %r' % name)\n try:\n return self._defs[name].value\n except KeyError as err:\n if self._parent is not None:\n return self._parent[name]\n raise err", "def strtobool(value: str) -> Any:\n\n if isinstance(value, str):\n if value.lower() == \"true\":\n return True\n elif value.lower() == \"false\":\n return False\n return value", "def _parse_option_value(line, option_name):\n try:\n option_value = line.split('=')[1].strip()\n except IndexError:\n option_value = ''\n if not option_value:\n raise ValueError(\"No value specified for {} option.\".format(option_name))\n return option_value", "def _adaptConfigurationValue (cls, value : String) -> Object:\n\n Logging.trace(\">>: %r\", value)\n uppercasedValue = value.upper()\n\n if uppercasedValue in cls._validBooleanValueNames:\n result = (uppercasedValue in cls._trueBooleanValueNames)\n elif (cls._integerRegExp.match(value)\n or cls._hexIntegerRegExp.match(value)): \n result = int(value)\n elif cls._realRegExp.match(value):\n result = float(value)\n else:\n result = value\n \n Logging.trace(\"<<: %r\", result)\n return result", "def parseCmd():\n\n flaged = False\n for arg in sys.argv:\n #ie, if it's the program name\n if arg.endswith(\".py\"):\n continue\n \n #If arg matches one of useFlags, print usage and return\n for f in useFlags:\n if f==arg: \n print(usageText)\n return None\n \n #If we get here, it's a config file\n confArg = arg \n if not confArg.endswith(\".txt\"):\n confArg = arg+\".txt\"\n if confArg.startswith(\"use/\"):\n confArg = confArg[4:]\n \n f = open(configsLocation+confArg)\n theThingToReturn = parseConfig(f)\n \n #If we get here, args are done with no early returns. Return the thing.\n return theThingToReturn", "def __get(self, option=None):\n\n general = self.parser.options('general')\n\n gen = {}\n if not general:\n if option:\n return None\n return gen\n\n for item in general:\n value = self.parser.get('general', item).strip()\n if value:\n gen[item] = value\n\n if option:\n if gen.has_key(option):\n return gen[option]\n return None\n return gen", "def getStdSwitches( targetPlatform, targetName ):\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n # We need defaults because the macro parser needs the switch to\n # correctly parse c++ code.\n\n\n fileName = os.path.join( 'build/%s/CMakeFiles/%s.dir/flags.make' %\n ( targetPlatform, targetName ) )\n\n Any.requireIsDirNonEmpty( 'build/%s' % targetPlatform )\n Any.requireIsFileNonEmpty( fileName )\n\n # read-in ground truth information\n logging.debug( 'parsing %s', fileName )\n content = FastScript.getFileContent( fileName, splitLines=True )\n raw_C_CFLAGS = ''\n raw_CPP_CFLAGS = ''\n regexp_C_CFLAGS = re.compile( r'^C_FLAGS\\s=\\s+(.*)$' )\n regexp_CPP_CFLAGS = re.compile( r'^CXX_FLAGS\\s=\\s+(.*)$' )\n\n for line in content:\n tmp = regexp_C_CFLAGS.search( line )\n\n if tmp:\n raw_C_CFLAGS = tmp.group( 1 )\n\n tmp = regexp_CPP_CFLAGS.search( line )\n\n if tmp:\n raw_CPP_CFLAGS = tmp.group( 1 )\n\n # get the default language standards\n standards = Compilers.getDefaultLanguageStandard(targetPlatform)\n cStdSwitch = '-std={}'.format( standards[ 'c' ] )\n cppStdSwitch = '-std={}'.format( standards[ 'c++' ] )\n\n # look if the user specified different standards in the C_FLAGS/CPP_FLAGS\n # CMake variables\n candidates = shlex.split( raw_C_CFLAGS )\n for candidate in candidates:\n if candidate.startswith( '-std=' ):\n cStdSwitch = candidate\n\n candidates = shlex.split( raw_CPP_CFLAGS )\n for candidate in candidates:\n if candidate.startswith( '-std=' ):\n cppStdSwitch = candidate\n\n return Switches( c=cStdSwitch, cpp=cppStdSwitch )", "def read_item ( input_arr, item, default, message = True ) :\n tmp = check_flag(input_arr, item,True)\n if tmp == None :\n value = default\n if message:\n print (\"The value for \",item,\" was not declared. It will be set to\", value, \"by default.\")\n else :\n if isinstance(default,bool) :\n value = bool(tmp)\n elif isinstance(default,int) :\n value = int(tmp)\n elif isinstance(default,float) :\n value = float(tmp)\n else :\n value = tmp\n return value", "def _parseFeature(self, name, value=None):\n supported = self._parse([(name, value)])\n return supported.getFeature(name)", "def mdlValue(value):\n if isinstance(value, bool):\n return \"yes\" if value else \"no\"\n elif isinstance(value, list):\n value = \" \".join(map(mdlValue, value))\n value = str(value)\n if value == '':\n return '\"\"'\n if \"\\n\" in value:\n return '\"*%s*\"' % value.replace('\\\\', '\\\\\\\\').replace('\"*', '\"\\\\*')\n if re.search('[ \\t{}=]', value) or '//' in value:\n return '\"%s\"' % re.sub(r'([\"\\\\])', r'\\\\\\1', value)\n return value", "def get_flag(self, flag_name):\n flags = {'C':0, # Carry\n 'Z':1, # Zero\n 'I':2, # Interrctrl_upt mask\n 'D':3, # Decimal\n 'B':4, # Break\n 'V':6, # Overflow\n 'N':7} # Negative\n\n flags_reg = self.get_register('P')\n flag_index = flags[flag_name]\n return (flags_reg >> flag_index) & 1", "def get_feature_flag(self, account, flag, signing_account=None):\n account = Account(account, hive_instance=self.hive)\n return self._conveyor_method(account, signing_account,\n \"conveyor.get_feature_flag\",\n [account['name'], flag])", "def get_setting_value(self, title, setting):\r\n return self.parser.get(title, setting)", "def _get_option(self, arg_name: str) -> Any:\n try:\n return getattr(self, f\"__{arg_name}\")\n except AttributeError as ex:\n raise AnalysisError(\n f\"The argument {arg_name} is selected but not defined. \"\n \"This key-value pair should be defined in the analysis option.\"\n ) from ex", "def get_commandlinearg(self, keyname, defaultval=None):\n if (hasattr(self.commandlineargs,keyname)):\n val = getattr(self.commandlineargs,keyname)\n if (val != None):\n return val\n try:\n # try to access commandline args as dictionary\n return self.commandlineargs[keyname]\n except:\n pass\n # return default val\n return defaultval", "def test_get_build_number(self):\n pass", "def boolean(val):\n\tif val == \"True\" or val == \"1\":\n\t\treturn True\n\telse:\n\t\treturn False", "def get_flags(self):\n return self.short_flag, self.long_flag", "def process_bool_arg(arg):\n if isinstance(arg, bool):\n return arg\n elif isinstance(arg, basestring):\n if arg.lower() in [\"true\", \"1\"]:\n return True\n elif arg.lower() in [\"false\", \"0\"]:\n return False", "def castOutputToBuiltInType(key, value):\n\n if isinstance(value, bool):\n return 'yes' if value else 'no'\n if isinstance(value, Enum):\n value = value.name\n if key in ['bind_npi', 'dst_npi', 'src_npi']:\n return addr_npi_name_map[value]\n if key in ['bind_ton', 'dst_ton', 'src_ton']:\n return addr_ton_name_map[value]\n if key == 'ripf':\n return replace_if_present_flap_name_map[value]\n if key == 'priority':\n return priority_flag_name_map[value]\n else:\n return value", "def castInputToBuiltInType(key, value):\n\n try:\n if key in ['bind_npi', 'dst_npi', 'src_npi']:\n return addr_npi_value_map[value]\n elif key in ['bind_ton', 'dst_ton', 'src_ton']:\n return addr_ton_value_map[value]\n elif key == 'ripf':\n return replace_if_present_flap_value_map[value]\n elif key == 'priority':\n return priority_flag_value_map[value]\n elif key in ['con_fail_retry', 'con_loss_retry', 'ssl']:\n if value == 'yes':\n return True\n elif value == 'no':\n return False\n else:\n raise KeyError('Boolean value must be expressed by yes or no.')\n elif (key == 'loglevel' and\n value not in [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]):\n raise KeyError('loglevel must be numeric value of 10, 20, 30, 40 or 50.')\n elif isinstance(value, str) and value.lower() == 'none':\n value = None\n except KeyError:\n raise UnknownValue('Unknown value for key %s: %s' % (key, value))\n\n return value", "def to_python(self, value):\n if value is None:\n return value\n value = super(BitOptionsField, self).to_python(value)\n return BitOptions(self.options.flags, value)" ]
[ "0.6345531", "0.62959224", "0.6244244", "0.6242397", "0.6095011", "0.60254776", "0.5950006", "0.5894679", "0.58870685", "0.5881222", "0.5880513", "0.57614744", "0.57475334", "0.57326", "0.5719314", "0.5702136", "0.5597625", "0.55545485", "0.55461335", "0.55367213", "0.54923135", "0.5464997", "0.54585445", "0.5441802", "0.5435559", "0.5425351", "0.5423785", "0.5418251", "0.54103285", "0.5378774", "0.53632313", "0.5361407", "0.5360665", "0.5360503", "0.53540605", "0.5350304", "0.5340185", "0.5331629", "0.5322973", "0.5322973", "0.5322973", "0.53159255", "0.53030634", "0.5273624", "0.5247039", "0.523785", "0.52344537", "0.52317744", "0.5225567", "0.522127", "0.5216462", "0.52060276", "0.5202673", "0.51995957", "0.51950365", "0.51939625", "0.5179365", "0.5179365", "0.51532704", "0.5137737", "0.5137169", "0.5130668", "0.51299274", "0.5129082", "0.51160043", "0.51115596", "0.5106526", "0.51022243", "0.5088813", "0.50887823", "0.5085785", "0.5085429", "0.5082955", "0.5081273", "0.50763255", "0.50735515", "0.506358", "0.50635314", "0.5056884", "0.50525373", "0.5051567", "0.50473696", "0.50452274", "0.5044467", "0.50305873", "0.5025166", "0.5017133", "0.5015097", "0.50134724", "0.5010375", "0.5006469", "0.5002704", "0.50003165", "0.499706", "0.4996098", "0.49874413", "0.49869347", "0.49793637", "0.4978095", "0.49755022", "0.497073" ]
0.0
-1
The variable containing all positional arguments passed to the function
def pyarg(self): return self._pyarg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPositionalArgs():", "def get_args(self):\r\n return self.args", "def punkte(self):\n return self.args", "def args(self) -> tuple[Basic, ...]:\n return self._args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def variables(self):\n return tuple(flatten([a.variables for a in self.args]))", "def GetFunctionParametersAndValues():\n frame = inspect.currentframe().f_back\n args, _, _, values = inspect.getargvalues(frame)\n return ([(i, values[i]) for i in args])", "def args(self):\n return self._args.copy()", "def getargvalues(frame):\r\n args, varargs, varkw = getargs(frame.f_code)\r\n return ArgInfo(args, varargs, varkw, frame.f_locals)", "def func_args(self) -> str:\n\n return self.call_data[10:]", "def get_all_arguments(self):\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if args.count('self') > 0:\n args.remove('self')\n return args", "def get_assign_args(self, arguments):\n pass", "def arguments_from_call_funccode(f):\n fc = fc_or_c(f.__call__)\n argcount = fc.co_argcount\n args = list(fc.co_varnames[1:argcount])\n if not args:\n raise RuntimeError('Function has variable number of arguments')\n return args", "def get_partial_arguments(self):\n return (), {}", "def __parameters__(self) -> tuple[TypeVar, ...]:\n return super().__getattribute__(\"_parameters\")", "def get_xx_args_dict(self):\n return self.__xx_args", "def filter_args(fn, args_tuple):\n sig = inspect.signature(fn)\n flag_var_positional = any([\n inspect.Parameter.VAR_POSITIONAL == value.kind for\n value in sig.parameters.values()])\n if flag_var_positional:\n return args_tuple\n else:\n num_args = len(sig.parameters.items())\n return args_tuple[:num_args]", "def variable_argument(self):\n if self.is_variadic():\n if self.args[-1] == '...':\n # An unnamed variable argument replaces __VA_ARGS__\n return \"__VA_ARGS__\"\n else:\n # Strip '...' from argument name\n return self.args[-1][:-3]\n else:\n return None", "def args(self) -> List[str]:\n return self.__args", "def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}", "def getArgument(self, *args):\n return _libsbml.FunctionDefinition_getArgument(self, *args)", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"args\")", "def args(cls):\n try:\n args = getfullargspec(cls.__init__)\n except TypeError:\n return []\n return args[0]", "def get_x_args_dict(self):\n return self.__x_args", "def _get_args(function, varargs=False):\n\n try:\n params = signature(function).parameters\n except ValueError:\n # Error on builtin C function\n return []\n args = [\n key\n for key, param in params.items()\n if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)\n ]\n if varargs:\n varargs = [\n param.name\n for param in params.values()\n if param.kind == param.VAR_POSITIONAL\n ]\n if len(varargs) == 0:\n varargs = None\n return args, varargs\n else:\n return args", "def get(self):\n return self.args, self.kwargs", "def preprocess_arguments(self, *args, **kwargs):\n return (args, kwargs)", "def _get_arg_name(self, arg, variable_name):", "def varfunc(self, fields=[]):\n self.func_arguments = fields", "def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"args\")", "def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"args\")", "def args(self):\n\t\tret = []\n\t\tfor argname in self._arg_names:\n\t\t\tret += [self._args[argname]]\n\t\treturn ret", "def derive_args(func):\n args = inspect.getfullargspec(func).args\n if args and is_selfish_name(args[0]):\n del args[0]\n return args", "def extractArguments(frame):\n\n\targuments = ([], None, None)\n\ttry:\n\t\tsource = textwrap.dedent(str().join(inspect.getsourcelines(frame)[0]).replace(\"\\\\\\n\", str()))\n\texcept (IOError, TypeError) as error:\n\t\treturn arguments\n\n\ttry:\n\t\tnode = ast.parse(source)\n\texcept:\n\t\treturn arguments\n\n\tif not node.body:\n\t\treturn arguments\n\n\tnode = node.body[0]\n\tif not isinstance(node, ast.FunctionDef):\n\t\treturn arguments\n\n\treturn [arg.id for arg in node.args.args], node.args.vararg, node.args.kwarg", "def test_args(self):\n args = forge.args\n assert isinstance(args, forge._signature.VarPositional)\n assert args.name == 'args'\n assert args.converter is None\n assert args.validator is None", "def __call__(self, *args):\n return args[self.i_dim]", "def args_str(self):", "def _extract_args(self, func):\n sig = inspect.signature(func)\n\n # Backwards compatibility\n if len(sig.parameters) == 1:\n ((name, parameter),) = sig.parameters.items()\n if (\n parameter.kind is parameter.POSITIONAL_OR_KEYWORD\n and parameter.annotation in (parameter.empty, argparse.Namespace)\n ):\n self._require_namespace = name\n return\n\n for name, parameter in sig.parameters.items():\n if parameter.annotation is argparse.Namespace:\n self._require_namespace = name\n else:\n arg = Argument.from_parameter(name, parameter)\n action = arg.register_with_proxy(self)\n self._args.append((name, action.dest))", "def function(args):\n pass", "def args(self) -> Optional[str]:\n return pulumi.get(self, \"args\")", "def args(self):\n return self._args_[: self.nargs()]", "def arguments(args_to_pop=None) :\n posname, kwname, args = inspect.getargvalues(inspect.stack()[1][0])[-3:]\n posargs = args.pop(posname, [])\n args.update(args.pop(kwname, []))\n if args_to_pop is not None :\n for arg in args_to_pop :\n args.pop(arg)\n return args, posargs", "def args(self) -> typing.Tuple[str, typing.List[str]]:\n func = inspect.stack()[1][3]\n command = func[len(self.CMD_PREFIX):]\n return ('{} {}'.format(sys.argv[0], command),\n sys.argv[2:])", "def __getnewargs__(self):\n return ()", "def argument_list(self):\n answer = self._call('argument_list')\n return answer.names", "def __len__(self):\n if self.args is None:\n return 0\n return len(vars(self.args))", "def getArgs():\n cache = _arg_cache\n if not cache:\n cache.append(parseArgs())\n return cache[0]", "def args(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"args\")", "def get_args( self, **kwargs ):\n args = []\n for at in self.arg_types:\n args.append( kwargs[at] )\n return args", "def get_arguments(self):\n if self.arguments is not None:\n return self.arguments\n elif self.parent is not None:\n return self.parent.get_arguments()\n else:\n return []", "def _get_arguments(self) -> str:\n func = self.node\n\n # Early logic used to iterate over, `func.get_arguments()`, however when there\n # is an unknown type clang will sometimes fail to provide tokens for that\n # argument. For example in \"unknown_type foo[]\" the brackets will cause clang\n # to return back no tokens for the argument.\n start = func.location\n end = func.extent.end\n if func.is_definition():\n # When a function is a definition the last child is the compound statement\n # so we need to move prior to the compound statement\n children = list(func.get_children())\n body_start = children[-1].extent.start.offset\n end = cindex.SourceLocation.from_offset(func.tu, start.file, body_start - 1)\n\n extent = cindex.SourceRange.from_locations(start, end)\n non_comment_tokens = (\n t\n for t in cindex.TokenGroup.get_tokens(func.tu, extent=extent)\n if t.kind != cindex.TokenKind.COMMENT\n )\n\n # Even though this will place spaces around all the tokens, the sphinx C domain\n # will provide some formatting to make it look nicer in the final output.\n full_signature = \" \".join(t.spelling for t in non_comment_tokens)\n\n _, _, arguments = full_signature.partition(\"(\")\n arguments = arguments.rstrip(\")\")\n arguments = arguments.strip()\n\n return arguments", "def positional_args(func):\n def inner(s, parser, *args, **kwargs):\n clargs = parser.run()\n return func(s, clargs.posn, *args, **kwargs)\n \n if (func.__doc__ != None): inner.__doc__=func.__doc__+\"\\n\\n[decorated by @positional_arguments]\\n\"\n inner.__name__=func.__name__\n return inner", "def args(self):\n return self._parse_args", "def _get_call_argument(self, bind_c_arg):\n original_arg = bind_c_arg.original_function_argument_variable\n arg_var = self.scope.find(original_arg.name, category='variables')\n if original_arg.is_ndarray:\n start = LiteralInteger(1) # C_F_Pointer leads to default Fortran lbound\n stop = None\n indexes = [Slice(start, stop, step) for step in bind_c_arg.strides]\n return IndexedElement(arg_var, *indexes)\n else:\n return arg_var", "def dataargs(self):\n return self.argsbytype(Data)", "def arg_names(self):\n return self._arg_names", "def arg_names(self):\n return self._arg_names", "def get_Callable_args_res(clb):\n try:\n return clb.__args__, clb.__result__\n except AttributeError:\n # Python 3.6\n return clb.__args__[:-1], clb.__args__[-1]", "def variadic_args(self, /, *args, **kwargs):\n return self._func(args, **kwargs)", "def get_arguments_string(self):\n result = self.__get_client_server_arg_string('')\n result = self.__get_x_args_string(result)\n result = self.__get_xx_args_string(result)\n result = self.__get_system_property_args_string(result)\n result = self.__get_unsorted_args_string(result)\n return result", "def get_num_positional_args(fun):\n sig = inspect.signature(fun)\n return len([\n name for name, param in sig.parameters.items() if param.kind in\n [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]\n ])", "def parameters(self):\n return self.vars", "def names(self):\n result = []\n result.extend(self.positional_arguments)\n if self.arbitary_positional_arguments is not None:\n result.append(self.arbitary_positional_arguments)\n if self.arbitary_keyword_arguments is not None:\n result.append(self.arbitary_keyword_arguments)\n result.extend(self.keyword_arguments)\n return result", "def parameters(self):", "def extra_args(self):\n return []", "def __getinitargs__(self):\n return (self.cutout,)", "def getargspec(self,obj):\n\n if inspect.isfunction(obj):\n func_obj = obj\n elif inspect.ismethod(obj):\n func_obj = obj.im_func\n else:\n raise TypeError, 'arg is not a Python function'\n args, varargs, varkw = inspect.getargs(func_obj.func_code)\n return args, varargs, varkw, func_obj.func_defaults", "def get_id_args(func, arg):\n\n return \"{} {}\".format(func.__name__, arg)", "def args(self, default_args=(), diff=()):\n args = self._args\n if not args:\n args = default_args\n\n return self.expand_vars(args, diff=diff)", "def get_mandatory_arguments(self):\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n\n if defaults is not None:\n args = args[:-len(defaults)]\n\n if args.count('self') > 0:\n args.remove('self')\n return args", "def argnames(method):\n return [arg for arg in method.__code__.co_varnames if arg != \"self\"]", "def __get_x_args_string(self, incremental_result):\n _method_name = '__get_x_args_string'\n\n self._logger.entering(incremental_result, class_name=self._class_name, method_name=_method_name)\n result = incremental_result\n result = self.__get_x_size_args(result)\n result = self.__get_x_value_args(result)\n result = self.__get_x_other_args(result)\n self._logger.exiting(class_name=self._class_name, method_name=_method_name, result=result)\n return result", "def getRequiredArguments(self):\n if self._initValue.needsArgument:\n return [self._initValue.getArgument()]\n else:\n return []", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def _sorted_args(self):\n return self.args", "def arguments(self):\n return parse_arguments(self['data'])", "def _get_args(idx, *args):\n new_args = []\n for arg in list(args[0]):\n if isinstance(arg, Iterable):\n new_args.append(arg[idx])\n else:\n new_args.append(arg)\n\n return new_args", "def arglist(self) -> List:\n return self.argv[1:]", "def countParam(self):\n return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]" ]
[ "0.785117", "0.7138741", "0.7081171", "0.6903433", "0.6763262", "0.6763262", "0.6763262", "0.6745838", "0.67380923", "0.6622345", "0.65949064", "0.65869516", "0.65081775", "0.64930344", "0.64645815", "0.6435059", "0.6429822", "0.6423793", "0.6422557", "0.6418105", "0.6416629", "0.63916314", "0.638471", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.6368917", "0.63629526", "0.63625073", "0.63459355", "0.6325823", "0.6318515", "0.6292775", "0.6248152", "0.62339526", "0.62339526", "0.6221283", "0.6202718", "0.6198589", "0.6195021", "0.6181336", "0.6177034", "0.61619425", "0.61454463", "0.61450577", "0.61321896", "0.6123793", "0.6119264", "0.6118163", "0.6104679", "0.6104498", "0.610374", "0.60958004", "0.60872805", "0.6065377", "0.60513437", "0.6028913", "0.60232896", "0.6017203", "0.60151505", "0.600506", "0.600506", "0.5990372", "0.59811455", "0.597316", "0.59250265", "0.5919955", "0.5913436", "0.5913385", "0.5912351", "0.59111273", "0.5909145", "0.5906257", "0.59052753", "0.59031695", "0.5900731", "0.5896582", "0.5885271", "0.5873769", "0.5870865", "0.5864205", "0.5845889", "0.5836876", "0.5835512" ]
0.61285037
62
The variable containing all keyword arguments passed to the function
def pykwarg(self): return self._pykwarg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def get_used_kwargs(self):\n return self._used_kwargs", "def get_keyword_args(function):\n argspec = inspect.getargspec(function)\n kwargs = argspec.args[len(argspec.args) - len(argspec.defaults):]\n kwargs = {arg: value for arg, value in zip(kwargs, argspec.defaults)}\n return kwargs", "def get(self):\n return self.args, self.kwargs", "def getPositionalArgs():", "def kwargs(self):\n return self._kwargs", "def kwargs(self):\n return self._kwargs", "def get_unused_kwargs(self):\n return self._unused_kwargs", "def get_partial_arguments(self):\n return (), {}", "def get_kwargs(self):\n return {}", "def params(self, **kwargs):\n return kwargs", "def get_args(self):\r\n return self.args", "def varfunc(self, fields=[]):\n self.func_arguments = fields", "def extract_keywords(func):\n if hasattr(func, 'im_func'):\n func = func.im_func\n\n try:\n return func.func_code.co_varnames[-len(func.func_defaults):]\n except (TypeError, ValueError, IndexError):\n return tuple()", "def get_x_args_dict(self):\n return self.__x_args", "def getArgs(useKwargFormat=True, includeVariableArgs=True, numFramesAgo=1, excludeList=[]):\n\tframe = inspect.getouterframes(inspect.currentframe())[numFramesAgo][0]\n\targNames, varArgs_name, varKwargs_name, locals_ = inspect.getargvalues(frame)\n\tvarArgs = locals_[varArgs_name] if varArgs_name != None else tuple()\n\tvarKwargs = locals_[varKwargs_name] if varKwargs_name != None else {}\n\tnotArgs = set(locals_.iterkeys()) - set(argNames)\n\t\n\tfor notArg in notArgs:\tdel locals_[notArg]\n\texcludeList.append(\"self\")\n\texcludeList.append(\"cls\")\n\tmixedKwargsArgs = OrderedDict((argName, locals_[argName]) for argName in argNames if argName not in excludeList)\n\t\n\tif useKwargFormat == True:\n\t\tkwargs = dict(mixedKwargsArgs)\n\t\tif includeVariableArgs:\n\t\t\tkwargs.update(varKwargs)\n\t\treturn kwargs\n\telif useKwargFormat == False:\n\t\targs = tuple(mixedKwargsArgs.values())\n\t\tif includeVariableArgs:\n\t\t\targs += varArgs\n\t\treturn args\n\telif useKwargFormat == None:\n\t\tkwargs = dict(mixedKwargsArgs)\n\t\tif includeVariableArgs:\n\t\t\tkwargs.update(varKwargs)\n\t\treturn varArgs, kwargs\n\telse:\n\t\traise Exception(\"Invalid useKwargFormat\")", "def get_parameters(**kwargs):\r\n parameters = vars(global_file.params)\r\n for key, value in kwargs.items():\r\n parameters[str(key)] = value\r\n return parameters", "def get_input_arguments(kwargs, function, warn=True):\n np.set_printoptions(threshold=20)\n print('\\narguments to {}:'.format(function.__qualname__))\n params = inspect.signature(function)\n input_kwargs = {}\n not_arguments = {}\n for k, v in kwargs.items():\n if k in params.parameters:\n input_kwargs[k] = v\n print_item(k, v)\n else:\n not_arguments[k] = v\n if warn:\n print('\\nother arguments:')\n for k, v in not_arguments.items():\n #print('{}: {}'.format(k, v))\n print_item(k, v)\n print('\\n')\n return input_kwargs", "def get_input_arguments(kwargs, function, warn=True):\n np.set_printoptions(threshold=20)\n print('\\narguments to {}:'.format(function.__qualname__))\n params = inspect.signature(function)\n input_kwargs = {}\n not_arguments = {}\n for k, v in kwargs.items():\n if k in params.parameters:\n input_kwargs[k] = v\n print_item(k, v)\n else:\n not_arguments[k] = v\n if warn:\n print('\\nother arguments:')\n for k, v in not_arguments.items():\n #print('{}: {}'.format(k, v))\n print_item(k, v)\n print('\\n')\n return input_kwargs", "def __parameters__(self) -> tuple[TypeVar, ...]:\n return super().__getattribute__(\"_parameters\")", "def parameters(self):", "def get_xx_args_dict(self):\n return self.__xx_args", "def _get_reproducing_arguments(self):\n reproducing_arguments = {\n 'include': self.include,\n 'exclude': self.exclude,\n 'copy': self.copy,\n }\n args_names = {name: getattr(self, name) for name in self.args_names}\n reproducing_arguments.update(args_names)\n return reproducing_arguments", "def test_keyword(self):\n varargs = ()\n kwargs = {'default' : 12}\n method = getattr(self.foo,'f_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['default'] == 12)\n self.assert_(len(var_dict) == 1)", "def get_arguments(self, **include):\n d = dict(self._kwargs)\n\n for k in include:\n if not include[k] and k in d:\n d.pop(k)\n return d", "def preprocess_arguments(self, *args, **kwargs):\n return (args, kwargs)", "def punkte(self):\n return self.args", "def extra_target_arguments(self):\n return {}", "def test_kwargs(self):\n kwargs = forge.kwargs\n assert isinstance(kwargs, forge._signature.VarKeyword)\n assert kwargs.name == 'kwargs'\n assert kwargs.converter is None\n assert kwargs.validator is None", "def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params", "def test_onearg_and_keyword(self):\n varargs = (12,)\n kwargs = {'default' : 13}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 13)\n self.assert_(len(var_dict) == 2)", "def build_arg_list(fn, env):\r\n kw = {}\r\n argspec = inspect.getargspec(fn)\r\n\r\n # if there is a **kw argument in the fn definition,\r\n # just pass along the environment\r\n if argspec[2]:\r\n kw = env\r\n #else for each entry in the arglist set the value from the environment\r\n else:\r\n #skip self\r\n argnames = argspec[0][1:]\r\n for name in argnames:\r\n if name in env:\r\n kw[name] = env[name]\r\n return kw", "def parameters(self) -> Dict[str, Any]:\n return self.data[\"args\"].get(\"parameters\", {})", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def extra_args(self):\n return []", "def params(self):\n pass", "def get_params(self):", "def get_all_arguments(self):\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if args.count('self') > 0:\n args.remove('self')\n return args", "def get_argument_as_keywords(self):\n status = True\n arg_kv = self.get_values_for_mandatory_args()\n if len(arg_kv) != len(self.req_args_list):\n msg = 'could not execute %s without mandatory arguments' % (object)\n self.data_repository = skip_and_report_status(self.data_repository, msg)\n status = False\n arg_kv = self.get_values_for_optional_args(arg_kv)\n return arg_kv, status", "def get_args(self):\n rqst = self.request\n args = rqst.arguments()\n resp = {}\n for arg in args:\n resp[arg] = repr(rqst.get_all(arg))\n return resp", "def args(self):\n return self._args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def args(self) -> tuple[Basic, ...]:\n return self._args", "def get_dict(**kwargs):\n return kwargs", "def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}", "def func_args(self) -> str:\n\n return self.call_data[10:]", "def getargvalues(frame):\r\n args, varargs, varkw = getargs(frame.f_code)\r\n return ArgInfo(args, varargs, varkw, frame.f_locals)", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def args(self):\n return self._args.copy()", "def names(self):\n result = []\n result.extend(self.positional_arguments)\n if self.arbitary_positional_arguments is not None:\n result.append(self.arbitary_positional_arguments)\n if self.arbitary_keyword_arguments is not None:\n result.append(self.arbitary_keyword_arguments)\n result.extend(self.keyword_arguments)\n return result", "def get_arguments(self, args=(), kwargs=None, onlykeys=False, onlyused=False,\n func=None):\n if func is None:\n func = self.__init__\n\n # check what parameters to add\n adds, params, kwargs = _helper_parameters(func=func, args=args, kwargs=kwargs,\n onlykeys=onlykeys, onlyused=onlyused)\n\n _map_parameters = getattr(self, \"_map_parameters\", None)\n for add, key in zip(adds, params):\n if add and key not in kwargs:\n try:\n if _map_parameters is not None and key in _map_parameters:\n mapped_key = _map_parameters[key]\n # if mapped_key is None then it means variable is not\n # assigned in the __init__ of the instance so ignore it\n if mapped_key is not None:\n kwargs[key] = getattr(self, mapped_key)\n else:\n kwargs[key] = getattr(self, key)\n except AttributeError:\n e, msg, traceback = sys.exc_info()\n msg.args = (\n msg.args[0] + \". Review @copy_support decorator or \"\n \"BaseCopySupporter class for more info.\",)\n raise_(e, msg, traceback)\n\n if onlykeys:\n return kwargs\n return args, kwargs", "def args_str(self):", "def parameters(self):\n return self.vars", "def _invocation_params(self) -> Dict[str, Any]:\n return self._default_params", "def GetFunctionParametersAndValues():\n frame = inspect.currentframe().f_back\n args, _, _, values = inspect.getargvalues(frame)\n return ([(i, values[i]) for i in args])", "def parameters(self):\n pass", "def __getnewargs__(self):\n return ()", "def list_kwargs(func):\n \n details = inspect.getargspec(func)\n nopt = len(details.defaults)\n \n return details.args[-nopt:]", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def variables(self):\n return tuple(flatten([a.variables for a in self.args]))", "def __getinitargs__(self):\n\n return (self.name, self.value)", "def __getnewargs__(self):\n return ({'pairs': self.__pairs,\n 'app': self.__app,\n 'namespace': self.__namespace},)", "def _prepare_args(local_vars):\n return {k: v for k, v in local_vars.items() if k != 'self'}", "def get_assign_args(self, arguments):\n pass", "def get_args( self, **kwargs ):\n args = []\n for at in self.arg_types:\n args.append( kwargs[at] )\n return args", "def get_kwd_args(func):\n try:\n sig = inspect.signature(func)\n except AttributeError:\n args, _, _, defaults = inspect.getargspec(func)\n if defaults:\n kwonlyargs = args[-len(defaults):]\n else:\n kwonlyargs = []\n else:\n kwonlyargs = {p.name:p.default for p in sig.parameters.values()\n if p.default is not p.empty}\n\n return kwonlyargs", "def params():\n raise NotImplementedError", "def add_kwargs():\n pass", "def pyarg(self):\n return self._pyarg", "def get_params(self):\n pass", "def _helper_parameters(func, args=(), kwargs=None, onlykeys=False, onlyused=False):\n if kwargs is None:\n kwargs = {}\n # params = list(inspect.signature(self.__init__).parameters.keys())\n params = inspect.getargspec(func).args[1:] # TODO replace deprecated getargspec to work with py2 and py3, perhaps by getfullargspec\n\n if onlykeys and not onlyused: # only add to keywords\n covered = 0 # simulate no args\n else:\n covered = len(args)\n\n if onlyused and onlykeys: # only add modified by user\n adds = [(True if i < covered or key in kwargs else False) for i, key in\n enumerate(params)]\n # add keys from args\n for i, val in enumerate(args):\n kwargs[params[i]] = val\n elif onlyused:\n adds = [(True if i >= covered and key in kwargs else False) for i, key\n in\n enumerate(params)]\n else:\n adds = [(True if i >= covered else False) for i, key in\n enumerate(params)]\n return adds, params, kwargs", "def all_the_kwargs(**kwargs):\n kwargs_counter = len(kwargs)\n return kwargs_counter", "def extract_captured_arguments(func):\n captured_arguments = getattr(func, ATTR_NAME)\n if type(captured_arguments) is not _CapturedArguments: # pylint: disable=unidiomatic-typecheck\n # The attribute was not set by tcm, so effectively it does not exist.\n raise AttributeError\n delattr(func, ATTR_NAME)\n return captured_arguments", "def _get_args(function, varargs=False):\n\n try:\n params = signature(function).parameters\n except ValueError:\n # Error on builtin C function\n return []\n args = [\n key\n for key, param in params.items()\n if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)\n ]\n if varargs:\n varargs = [\n param.name\n for param in params.values()\n if param.kind == param.VAR_POSITIONAL\n ]\n if len(varargs) == 0:\n varargs = None\n return args, varargs\n else:\n return args", "def test_kw_args_with_keywords():\n assert arguments.fun_opt_kw_params(visited_color='blue',\n link_color='red',\n back_color='yellow',\n fore_color='orange') == ('orange',\n 'yellow',\n 'red', 'blue')", "def get_params(self):\n return {}", "def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))", "def __getinitargs__(self):\n return (self.cutout,)", "def get_params(self, deep=...):\n ...", "def kwargs(self):\n return self.environ.get('router.kwargs', {})", "def _get_params(self):\r\n return self.k._get_params()", "def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args", "def getParameters(self): #$NON-NLS-1$\r", "def get_kwargs(d):\n raise NotImplementedError(\"subclass must implement get_kwargs()\")", "def args(self) -> List[str]:\n return self.__args", "def get_mandatory_arguments(self):\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n\n if defaults is not None:\n args = args[:-len(defaults)]\n\n if args.count('self') > 0:\n args.remove('self')\n return args", "def param(self):\r\n\r\n return []", "def _get_context(argspec, kwargs):\n if argspec.has_kwargs:\n return kwargs\n return dict((arg, kwargs[arg]) for arg in argspec.args if arg in kwargs)", "def my_func(a: 'a string',\n b: int = 1,\n *args: 'additional positional args',\n kw1: 'first keyword-only arg',\n kw2: 'second keyword-only arg' = 10,\n **kwargs: 'additional keyword-only args') -> str:\n pass", "def format_arguments(self, **kwargs):\n return kwargs", "def parameters(self):\n return self._params", "def param(self):\r\n return []", "def _get_parameters(self):\n return None", "def kwargs(kwargs):\n run_kwargs(kwargs)", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def define_parameters(self):", "def get_variables_func(arguments, exclude):\n names = [name for name in arguments.keys() if name not in exclude]\n return lambda obj: {name: getattr(obj, name) for\n name in names}", "def variable_argument(self):\n if self.is_variadic():\n if self.args[-1] == '...':\n # An unnamed variable argument replaces __VA_ARGS__\n return \"__VA_ARGS__\"\n else:\n # Strip '...' from argument name\n return self.args[-1][:-3]\n else:\n return None", "def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()" ]
[ "0.6939219", "0.6754662", "0.66321796", "0.659612", "0.6564183", "0.64470226", "0.64470226", "0.64356077", "0.6427776", "0.64146596", "0.63882744", "0.63835824", "0.6379224", "0.6308331", "0.62969697", "0.62953955", "0.62880987", "0.62775856", "0.62775856", "0.6273936", "0.6250937", "0.62402236", "0.62225306", "0.621984", "0.62055194", "0.62026393", "0.618893", "0.6148354", "0.61439735", "0.61415476", "0.61373997", "0.61358505", "0.61358094", "0.6116249", "0.61147255", "0.61123747", "0.6083915", "0.60822254", "0.6062359", "0.60395217", "0.60392123", "0.60392123", "0.60392123", "0.60120875", "0.60025364", "0.59992427", "0.59911674", "0.5988696", "0.5981716", "0.59772", "0.59738827", "0.5971754", "0.5952845", "0.5951618", "0.5948206", "0.5931184", "0.5927307", "0.59215796", "0.5919297", "0.59179527", "0.59080917", "0.5900614", "0.5900058", "0.58990115", "0.58961284", "0.5872504", "0.5867296", "0.5852849", "0.5848699", "0.5836492", "0.58335185", "0.5832028", "0.5827197", "0.5817659", "0.58116394", "0.5811117", "0.5810446", "0.5810334", "0.5807969", "0.5806792", "0.5806087", "0.5797197", "0.5796535", "0.5796227", "0.5795233", "0.57918125", "0.5771077", "0.5763464", "0.5749166", "0.57425034", "0.57407695", "0.57396543", "0.57356215", "0.5735295", "0.57340646", "0.57328385", "0.57320803", "0.5721852", "0.5721543", "0.5716666" ]
0.6603397
3
The flags indicating the types of the objects to be collected from the python arguments passed to the function
def flags(self): return self._flags
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_check_types():", "def format_flags(self):\n flags = []\n if self.is_unique:\n flags.append('Unique')\n if self.is_weak:\n flags.append('Weak')\n if self.is_ctor:\n flags.append('Constructor')\n if self.is_warning:\n flags.append('Warning')\n if self.is_ref:\n flags.append('Indirect reference')\n if self.is_reloc:\n flags.append('Reloc function')\n if self.is_debug:\n flags.append('Debug')\n if self.is_dynamic:\n flags.append('Dynamic')\n if self.is_func:\n flags.append('Function')\n if self.is_file:\n flags.append('File')\n if self.is_object:\n flags.append('Object')\n return flags", "def __repr_args__(self):\n args = dict(super().__repr_args__())\n try:\n del args['type_hint']\n except KeyError:\n pass\n return args.items()", "def set_arg_types( self ):\n if self.mode == 'grad':\n self.function = terms.dw_grad\n use_method_with_name( self, self.get_fargs_grad, 'get_fargs' )\n elif self.mode == 'div':\n self.function = terms.dw_div\n use_method_with_name( self, self.get_fargs_div, 'get_fargs' )\n else:\n self.function = self.d_eval\n use_method_with_name( self, self.get_fargs_eval, 'get_fargs' )\n self.use_caches = {'state_in_volume_qp' : [['parameter_s']],\n 'div_vector' : [['parameter_v']]}", "def get_flags(self, args):\n\n\t\tpositional = []\n\n\t\tfor argument in args:\n\t\t\t# A flag is an instance of a subclass of\n\t\t\t# flags.Flags if it was passed alone\n\t\t\tif isinstance(argument, flags.Flags):\n\t\t\t\tpositional.append(argument)\n\n\t\t\t# or is an integer if it was (bitwise) OR'd\n\t\t\t# with another flag (a \"flag combination\")\n\t\t\telif isinstance(argument, int):\n\t\t\t\tif argument < 0 or argument >= flags.LIMIT:\n\t\t\t\t\traise errors.FlagError(\"Flag value '{0}' is out of range \"\n\t\t\t\t\t\t\t\t\t\t \"!\".format(argument))\n\t\t\t\tpositional.append(argument)\n\n\t\t\t# Dictionaries store 'always'-arguments\n\t\t\telif isinstance(argument, dict):\n\t\t\t\tfor key, value in argument.items():\n\t\t\t\t\t# Simple 'always'-argument where one string\n\t\t\t\t\t# is mapped to one formatting flag-combination\n\t\t\t\t\tif isinstance(key, str):\n\t\t\t\t\t\tself.always[key] = value\n\n\t\t\t\t\t# Complex 'always'-argument with a\n\t\t\t\t\t# tuple containing strings, each with the same\n\t\t\t\t\t# flag-combination (same value)\n\t\t\t\t\telif isinstance(key, tuple):\n\t\t\t\t\t\tfor i in key:\n\t\t\t\t\t\t\tself.always[i] = value\n\t\t\t\t\telse:\n\t\t\t\t\t\traise errors.EcstasyError(\"Key '{0}' in dictionary \"\n\t\t\t\t\t\t\t\t\t\t\t\t \"argument passed is neither \"\n\t\t\t\t\t\t\t\t\t\t\t\t \"a string nor a tuple \"\n\t\t\t\t\t\t\t\t\t\t\t\t \"of strings!\".format(key))\n\n\t\t\telif isinstance(argument, collections.Iterable):\n\t\t\t\tpositional += self.get_flags(argument)\n\n\t\t\telse:\n\t\t\t\traise errors.EcstasyError(\"Argument '{0}' is neither a flag, a \"\n\t\t\t\t\t\t\t\t\t\t \"(bitwise) OR'd flag-combination, a \"\n\t\t\t\t\t\t\t\t\t\t \"dictionary nor an iterable of \"\n\t\t\t\t\t\t\t\t\t\t \"positional arguments \"\n\t\t\t\t\t\t\t\t\t\t \"!\".format(argument))\n\n\t\treturn positional", "def get_parsed_flags():\n return Flags.parsed_args", "def argument_types(self):\r\n class ArgumentsIterator(collections.Sequence):\r\n def __init__(self, parent):\r\n self.parent = parent\r\n self.length = None\r\n\r\n def __len__(self):\r\n if self.length is None:\r\n self.length = conf.lib.clang_getNumArgTypes(self.parent)\r\n\r\n return self.length\r\n\r\n def __getitem__(self, key):\r\n # FIXME Support slice objects.\r\n if not isinstance(key, int):\r\n raise TypeError(\"Must supply a non-negative int.\")\r\n\r\n if key < 0:\r\n raise IndexError(\"Only non-negative indexes are accepted.\")\r\n\r\n if key >= len(self):\r\n raise IndexError(\"Index greater than container length: \"\r\n \"%d > %d\" % ( key, len(self) ))\r\n\r\n result = conf.lib.clang_getArgType(self.parent, key)\r\n if result.kind == TypeKind.INVALID:\r\n raise IndexError(\"Argument could not be retrieved.\")\r\n\r\n return result\r\n\r\n assert self.kind == TypeKind.FUNCTIONPROTO\r\n return ArgumentsIterator(self)", "def cmd_type(args):", "def read_flags():\n return flag_args", "def extra_args(self):\n return []", "def _collect_repr_args(self, poargs, kwargs):", "def flags(self) -> UserFlag:", "def _clean_objects(*args):\n\n for arg in args:\n attrs = dir(arg)\n # QuadContourSet\n if 'collections' in attrs:\n for item in arg.collections: item.remove()\n # Wind barbs and contour labels\n elif '__len__' in attrs:\n for item in arg:\n item.remove()\n # Text strings and everything else\n elif 'get_text' in attrs: arg.remove()", "def get_args( self, **kwargs ):\n args = []\n for at in self.arg_types:\n args.append( kwargs[at] )\n return args", "def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args", "def get_types(*args, **kwargs) -> list:\n arg_types = []\n for arg in args:\n arg_types.append(type(arg))\n for values in kwargs.values():\n arg_types.append(type(values))\n return arg_types", "def get_flags(args):\r\n\r\n flags = 0\r\n\r\n if args.regexfilepattern is not None:\r\n flags |= pygrep.FILE_REGEX_MATCH\r\n\r\n if not args.regexp:\r\n flags |= pygrep.LITERAL\r\n elif args.dotall:\r\n flags |= pygrep.DOTALL\r\n\r\n if args.ignore_case:\r\n flags |= pygrep.IGNORECASE\r\n\r\n if args.recursive:\r\n flags |= pygrep.RECURSIVE\r\n\r\n if args.regexdirpattern:\r\n flags |= pygrep.DIR_REGEX_MATCH\r\n\r\n return flags", "def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n return get_init_arguments_and_types(cls)", "def dataargs(self):\n return self.argsbytype(Data)", "def print_types(self):\n print type(self.posXposYposZ)\n print type(self.posXposYnegZ)\n print type(self.posXnegYposZ)\n print type(self.posXnegYnegZ)\n print type(self.negXposYposZ)\n print type(self.negXposYnegZ)\n print type(self.negXnegYposZ)\n print type(self.negXnegYnegZ)", "def _propagate_types(self):\n pass", "def argument(*name_or_flags, **kwargs):\n\n return (list(name_or_flags), kwargs)", "def _default_arguments(self, obj):\n \n if not (inspect.isfunction(obj) or inspect.ismethod(obj)):\n # for classes, check for __init__,__new__\n if inspect.isclass(obj):\n obj = (getattr(obj,'__init__',None) or\n getattr(obj,'__new__',None))\n # for all others, check if they are __call__able\n elif hasattr(obj, '__call__'):\n obj = obj.__call__\n # XXX: is there a way to handle the builtins ?\n try:\n args,_,_1,defaults = inspect.getargspec(obj)\n if defaults:\n return args[-len(defaults):]\n except TypeError: pass\n return []", "def take_action_on_flags(self, *args, **kwargs):\r\n pass", "def extract_argument_types(*args: Sequence[Any]) -> str:\n collapsed_args = []\n\n for arg in args:\n if is_list_like(arg):\n collapsed_nested = []\n for nested in arg:\n if is_list_like(nested):\n collapsed_nested.append(f\"({extract_argument_types(nested)})\")\n else:\n collapsed_nested.append(_get_argument_readable_type(nested))\n collapsed_args.append(\",\".join(collapsed_nested))\n else:\n collapsed_args.append(_get_argument_readable_type(arg))\n\n return \",\".join(collapsed_args)", "def get_expected_flags(argv, as_kwargs=False):\n if not argv:\n return []\n command = argv[0]\n expected_kwargs = {}\n expected_kwargs.update(agent_args)\n expected_kwargs.update(non_agent_args)\n expected_kwargs.update(xagents.commands[command][0])\n if len(argv) > 1:\n agent_data = xagents.agents[argv[1]]\n expected_kwargs.update(agent_data['module'].cli_args)\n if issubclass(agent_data['agent'], OffPolicy) or argv[1] == 'acer':\n expected_kwargs.update(off_policy_args)\n if not as_kwargs:\n return expected_kwargs.keys()\n return [flag.replace('-', '_') for flag in expected_kwargs.keys()]", "def __init__(self, *args):\n self.types = tuple([trait_from(arg) for arg in args])\n self.fast_validate = (9, self.types)", "def determine_arg_locations(self, arg_types): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def __getnewargs__(self):\n return ()", "def _create_args(self, func_args):\n self.llvm_ret_type = self._from_ctype(self.signature.ret_type)\n self.llvm_arg_types = \\\n [self._from_ctype(a) for a in self.signature.arg_ctypes]", "def show_type(self, arg):\n return (str(arg), str(type(arg)), arg)", "def _get_flags(args: Sequence[str]) -> Dict[str, bool]:\n flags = {}\n for arg in args:\n if arg.startswith(FLAG_MARKER):\n flag_name = arg[len(FLAG_MARKER):]\n if flag_name and flag_name not in OMIT_FLAGS:\n flags[flag_name] = True\n else:\n break # Ignore flags after initial CLI call\n return flags", "def get_type_check(self, arg, option):\n pass", "def _func_serialize(self, args):\n return args", "def affects(*args, type: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def targetids(obj, reftype):", "def _cast_types(args):\n\targs.x_val = None if args.x_val == 'None' else int(args.x_val)\n\targs.test_size = float(args.test_size)\n\targs.alpha = float(args.alpha)\n\targs.fit_prior = (args.fit_prior in ['True', \"True\", 'true', \"true\"])\n\n\t# class_prior - array like type (problem to convert)\n\tif args.class_prior == \"None\" or args.class_prior == 'None':\n\t\targs.class_prior = None\n\n\t# --------- #\n\treturn args", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def targets(obj, reftype):", "def getargspec(self,obj):\n\n if inspect.isfunction(obj):\n func_obj = obj\n elif inspect.ismethod(obj):\n func_obj = obj.im_func\n else:\n raise TypeError, 'arg is not a Python function'\n args, varargs, varkw = inspect.getargs(func_obj.func_code)\n return args, varargs, varkw, func_obj.func_defaults", "def _harvest(modyool, arg_pattern=None, test=None):\n if not test:\n def test(obj):\n if callable(obj) and inspect.isfunction(obj):\n func_sig = pep362.Signature(obj)\n parameter_dict = func_sig._parameters\n if arg_pattern is not None:\n if all([len(parameter_dict)==1,\n arg_pattern in parameter_dict]):\n return True\n return True\n names = set(dir(modyool)) - set(dir(__builtins__))\n matches = []\n count=0\n for name in names:\n count+=1\n obj = getattr(modyool, name)\n if test(obj):\n matches.append(obj)\n return dict([m.__name__, m] for m in matches)", "def flags(cls):\n\n assert cls.__bases__ == (object,)\n\n d = dict(cls.__dict__)\n new_type = type(cls.__name__, (int,), d)\n new_type.__module__ = cls.__module__\n\n map_ = {}\n for key, value in iteritems(d):\n if key.upper() == key and isinstance(value, integer_types):\n value_instance = new_type(value)\n setattr(new_type, key, value_instance)\n map_[value] = key\n\n def str_(self):\n value = int(self)\n matches = []\n for k, v in map_.items():\n if value & k:\n matches.append(\"%s.%s\" % (type(self).__name__, v))\n value &= ~k\n if value != 0 or not matches:\n matches.append(text_type(value))\n\n return \" | \".join(matches)\n\n def repr_(self):\n return \"<%s: %d>\" % (str(self), int(self))\n\n setattr(new_type, \"__repr__\", repr_)\n setattr(new_type, \"__str__\", str_)\n\n return new_type", "def etypes(self): # -> list[None]:\n ...", "def get_flags(cls):\n return cls.get_short_flag(), cls.get_flag()", "def sources(obj, reftype):", "def type_filter(self, items, types=None):", "def attributeInfo(*args, allAttributes: bool=True, bool: bool=True, enumerated: bool=True,\n hidden: bool=True, inherited: bool=True, internal: bool=True, leaf: bool=True,\n logicalAnd: bool=True, multi: bool=True, short: bool=True, userInterface:\n bool=True, writable: bool=True, type: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def parameterTypes(self, p_int): # real signature unknown; restored from __doc__\n return []", "def melInfo(*args, **kwargs)->List[AnyStr]:\n pass", "def get_all_arguments(self):\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if args.count('self') > 0:\n args.remove('self')\n return args", "def objectTypeUI(*args, isType: AnyStr=\"\", listAll: bool=True, superClasses: bool=True,\n **kwargs)->AnyStr:\n pass", "def _find_labelled_objects_functions():\n\n def _num_args_without_default_value(fn_sig):\n return len(\n [\n param\n for param in fn_sig.parameters.values()\n if param.default is inspect._empty\n ]\n )\n\n def _takes_object_labels_kwarg(fn):\n fn_sig = inspect.signature(fn)\n return (\n \"object_labels\" in fn_sig.parameters\n and _num_args_without_default_value(fn_sig) == 1\n )\n\n fns = [\n (fn_name, fn)\n for (fn_name, fn) in inspect.getmembers(\n sys.modules[__name__], inspect.isfunction\n )\n if not fn_name.startswith(\"_\") and _takes_object_labels_kwarg(fn)\n ]\n\n return dict(fns)", "def ntypes(self): # -> list[None]:\n ...", "def ntypes(self): # -> None:\n ...", "def _signature_types(self):\n if self._parameters.trace_mode in set([\n tensor_tracer_flags.TRACE_MODE_NAN_INF,\n tensor_tracer_flags.TRACE_MODE_NORM,\n tensor_tracer_flags.TRACE_MODE_HISTORY,\n tensor_tracer_flags.TRACE_MODE_MAX_ABS]):\n return {self._parameters.trace_mode: 0}\n if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:\n return self._parameters.summary_signatures\n return {}", "def lookup(obj):\n return list(dir(obj))", "def lookup(obj):\n return list(dir(obj))", "def getImmediatelyAddableTypes(self, context=None):\n return self.getLocallyAllowedTypes()", "def ignorable(*args, **kwargs):\n return args, kwargs", "def etypes(self): # -> None:\n ...", "def flags(self):\n return list(self._flags_generator())", "def process_flags(self):\n\t\tsflags = []\n\t\tfor attr in dir(self):\n\t\t\tif attr[:3] != \"PF_\":\n\t\t\t\tcontinue\n\t\t\tvalue = getattr(self, attr)\n\t\t\tif value & self.fields[\"flags\"]:\n\t\t\t\tsflags.append(attr)\n\n\t\treturn sflags", "def inspect_many(self, *objects, all: bool=False):\n inspected = []\n for object in objects:\n inspected.append(self.__inspect_arg(object))\n\n return inspected", "def check_object_input_type(func):\n @functools.wraps(func)\n def wrapper_check_input_type(ref, *args):\n new_args = [ref]\n for X in list(args):\n new_args.append(_check_type(X))\n return func(*new_args)\n return wrapper_check_input_type", "def test_argument_types(self):\n funcs = [\n CityHash32,\n CityHash64,\n CityHash128,\n CityHash64WithSeed,\n CityHash64WithSeeds,\n CityHash128WithSeed,\n ]\n args = [b\"ab\\x00c\", bytearray(b\"ab\\x00c\"), memoryview(b\"ab\\x00c\")]\n for func in funcs:\n values = set(func(arg) for arg in args)\n self.assertEqual(len(values), 1, values)", "def get_member_variables(\n obj: Any, return_discarded: bool = False\n) -> Union[List[str], Tuple[List[str], Dict[str, List[str]]]]:\n valid_member_variables = []\n discarded_member_variables: Dict[str, List[str]] = {\n \"mangled\": [],\n \"is_type\": [],\n \"invalid_attr\": [],\n \"is_method\": [],\n \"is_boost_enum\": [],\n \"is_boost_class\": [],\n }\n for attr in dir(obj):\n if attr.startswith(\"__\"):\n discarded_member_variables[\"mangled\"].append(attr)\n continue\n\n try:\n value = getattr(obj, attr)\n except RuntimeError:\n discarded_member_variables[\"invalid_attr\"].append(attr)\n continue\n\n if is_type(value):\n discarded_member_variables[\"is_type\"].append(attr)\n elif is_method(value):\n discarded_member_variables[\"is_method\"].append(attr)\n elif is_boost_enum(value):\n discarded_member_variables[\"is_boost_enum\"].append(attr)\n elif is_boost_class(value):\n discarded_member_variables[\"is_boost_class\"].append(attr)\n else:\n valid_member_variables.append(attr)\n\n if return_discarded:\n return valid_member_variables, discarded_member_variables\n\n return valid_member_variables", "def sourceids(obj, reftype):", "def do_all(self, arg):\n l = []\n if len(arg) == 0:\n for obj in storage.all().values():\n l.append(str(obj))\n print(l)\n else:\n coms = tuple(arg.split())\n if coms[0] in self.cls:\n for k, v in storage.all().items():\n if coms[0] in k:\n l.append(str(v))\n print(l)\n else:\n print(\"** class doesn't exist **\")", "def gen_args(self, obj, pa_names = False):\n\n pal, kwal = get_class_total_args(type(obj))\n\n try:\n get_val = type(obj).__get_init_arg_val__\n except AttributeError:\n get_val = getattr\n\n for pa in pal:\n v = get_val(obj, pa)\n self.gen_field((pa + \" = \") if pa_names else \"\")\n self.pprint(v)\n\n for kwa, default in kwal.items():\n try:\n v = get_val(obj, kwa)\n except AttributeError:\n # If value cannot be obtained, skip the argument generation\n continue\n\n # generate only arguments with non-default values\n if (v is default) or (v == default):\n continue\n\n self.gen_field(kwa + \" = \")\n self.pprint(v)", "def test_ignore_objects_coming_from_arguments():\n class SomeClass(object):\n \"\"\" empty docstring. \"\"\"\n def __init__(self, opts=None):\n self.opts = opts\n\n def dunc(self, arg):\n \"\"\"Don't try to analyze this.\"\"\"\n return \"A{0}{1}\".format(arg, self.opts)\n\n def func(self):\n \"\"\"Don't try to analyze the following string.\"\"\"\n return 'AAA{0[iface]}BBB{0[port]}'.format(self.opts)\n\n return SomeClass", "def _enumerate_argument_types(self, idl_argument):\n argument_type = idl_argument.idl_type\n # TODO(dglazkov): What should we do with primitive nullable args?\n if (argument_type.is_nullable and\n argument_type.inner_type.is_primitive_type):\n raise ValueError('Primitive nullable types are not supported.')\n\n idl_types = []\n if idl_argument.is_optional:\n idl_types.append(None) # None is used to convey optionality.\n if argument_type.is_union_type:\n idl_types = idl_types + argument_type.member_types\n else:\n idl_types.append(argument_type)\n return idl_types", "def preprocess_arguments(self, *args, **kwargs):\n return (args, kwargs)", "def test_pype_get_arguments_all():\n context = Context({\n 'pype': {\n 'name': 'pipe name',\n 'pipeArg': 'argument here',\n 'useParentContext': 'parent context bool',\n 'skipParse': 'skip parse',\n 'raiseError': 'raise err'\n }\n })\n\n (pipeline_name,\n use_parent_context,\n pipe_arg,\n skip_parse,\n raise_error) = pype.get_arguments(context)\n\n assert pipeline_name == 'pipe name'\n assert use_parent_context == 'parent context bool'\n assert skip_parse == 'skip parse'\n assert raise_error == 'raise err'", "def generate_python_argument_types(argtypes: Union[List, str], outdir: str, prefix: str = 'sc', types=None):\n if type(argtypes) is str:\n argtypes = json.load(open(argtypes, 'r'))\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n type_to_args = collections.defaultdict(set)\n for arg in argtypes:\n argtype = argtypes[arg]\n if types is not None:\n if argtype not in types:\n continue\n type_to_args[argtype].add(arg)\n for argtype in type_to_args:\n real_args = sorted(list(type_to_args[argtype]))\n arguments_to_python(real_args, argtype, outdir, prefix)", "def _types(cls):\n return {}", "def lookup(obj):\n\n return (dir(obj))", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __set_utils_types(self):\n self.__arrayt = type(self.c_byte * 1)\n # self.__cfuncptrt = type(type(self.memmove))\n # class _p(self.Structure):\n # pass\n # self.__ptrt = type(self.POINTER(_p))\n self.__basic_types_name = {\n 'c_bool': '?',\n 'c_char': 'c',\n 'c_byte': 'b',\n 'c_ubyte': 'B',\n 'c_short': 'h',\n 'c_ushort': 'H',\n 'c_int': 'i', # c_int is c_long\n 'c_uint': 'I',\n 'int': 'i',\n 'c_longlong': 'q',\n 'c_ulonglong': 'Q',\n 'c_float': 'f',\n 'c_double': 'd',\n 'c_longdouble': 'g',\n 'c_char_p': 's',\n 'c_void_p': 'P',\n # 'c_void': 'P', ## void in array is void_p ##DEBUG\n }\n if self.__longsize == 4:\n # long == int\n self.__basic_types_name.update({'c_long': 'i',\n 'c_ulong': 'I',\n 'long': 'i',\n 'c_void': 'I'})\n elif self.__longsize == 8:\n # long == longlong\n self.__basic_types_name.update({'c_long': 'q',\n 'c_ulong': 'Q',\n 'long': 'q',\n 'c_void': 'Q'})\n # we need to account for the possible changes in c_longdouble\n self.__basic_types = set([getattr(self, k) for k in self.__basic_types_name.keys() if hasattr(self, k)])\n return", "def get_filter_args(self):\n return []", "def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))", "def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))", "def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))", "def print_flags():\n for key, value in vars(FLAGS).items():\n print(key + ' : ' + str(value))" ]
[ "0.5989417", "0.5906855", "0.5896243", "0.5705391", "0.5656125", "0.5627852", "0.55817276", "0.5562059", "0.5550938", "0.5473497", "0.5464566", "0.5436748", "0.5415903", "0.54050153", "0.53860885", "0.53617835", "0.53323925", "0.5305332", "0.53010184", "0.5295375", "0.5273713", "0.5251737", "0.52247846", "0.52180123", "0.52164966", "0.5215409", "0.5213704", "0.52116156", "0.5211158", "0.5208039", "0.52077407", "0.52071756", "0.5187475", "0.5178917", "0.5154428", "0.51481664", "0.5143914", "0.51426965", "0.5134738", "0.51253533", "0.5123532", "0.5121484", "0.5114601", "0.5102442", "0.5096192", "0.50931746", "0.50808823", "0.5068581", "0.5056545", "0.50520134", "0.50459135", "0.50426596", "0.5023967", "0.5020168", "0.50149083", "0.5001376", "0.5001376", "0.4998842", "0.49880427", "0.4982868", "0.49780342", "0.49747637", "0.49715632", "0.49663737", "0.49643302", "0.49620834", "0.49592298", "0.4958896", "0.49514744", "0.49496388", "0.494898", "0.4939017", "0.49389634", "0.49363935", "0.49342453", "0.49310368", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49293053", "0.49272442", "0.49267685", "0.49231404", "0.49231404", "0.49231404", "0.49231404" ]
0.0
-1
The arguments into which the python args and kwargs are collected
def args(self): return self._parse_args
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_arguments(self, *args, **kwargs):\n return (args, kwargs)", "def get_all_arguments(self):\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if args.count('self') > 0:\n args.remove('self')\n return args", "def args(self):\n return self._args.copy()", "def get_args( self, **kwargs ):\n args = []\n for at in self.arg_types:\n args.append( kwargs[at] )\n return args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def get_args(self):\r\n return self.args", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def get_assign_args(self, arguments):\n pass", "def arguments(self):\n return parse_arguments(self['data'])", "def _extract_args(self, func):\n sig = inspect.signature(func)\n\n # Backwards compatibility\n if len(sig.parameters) == 1:\n ((name, parameter),) = sig.parameters.items()\n if (\n parameter.kind is parameter.POSITIONAL_OR_KEYWORD\n and parameter.annotation in (parameter.empty, argparse.Namespace)\n ):\n self._require_namespace = name\n return\n\n for name, parameter in sig.parameters.items():\n if parameter.annotation is argparse.Namespace:\n self._require_namespace = name\n else:\n arg = Argument.from_parameter(name, parameter)\n action = arg.register_with_proxy(self)\n self._args.append((name, action.dest))", "def get_x_args_dict(self):\n return self.__x_args", "def parse_args(self):\n return Args(self.args)", "def args(self) -> tuple[Basic, ...]:\n return self._args", "def add_args(self):\n raise NotImplementedError", "def parse_args(self):\n\n # Parse the arguments themselves.\n args = vars( self.parser.parse_args() )\n\n return args", "def _get_reproducing_arguments(self):\n reproducing_arguments = {\n 'include': self.include,\n 'exclude': self.exclude,\n 'copy': self.copy,\n }\n args_names = {name: getattr(self, name) for name in self.args_names}\n reproducing_arguments.update(args_names)\n return reproducing_arguments", "def get_xx_args_dict(self):\n return self.__xx_args", "def unpack_args(kwargs):\n return [v for p in zip(list(kwargs.keys()), list(kwargs.values())) for v in p]", "def args(self):\n\t\tret = []\n\t\tfor argname in self._arg_names:\n\t\t\tret += [self._args[argname]]\n\t\treturn ret", "def _collect_repr_args(self, poargs, kwargs):", "def _get_args_for_run(obj, args, kwargs):\n new_args = []\n for arg in args:\n new_arg = _handle_arg(obj, arg)\n if new_arg is not None:\n new_args.append(new_arg)\n\n for _, value in kwargs.items():\n new_value = _handle_arg(obj, value)\n if new_value is not None:\n new_args.append(new_value)\n\n return new_args", "def args(self) -> List[str]:\n return self.__args", "def _get_init_args(self):\n signature = inspect.signature(self.__init__)\n parameters = signature.parameters\n args = [arg for arg, p in parameters.items()\n if p.kind is p.POSITIONAL_OR_KEYWORD]\n\n return {arg: getattr(self, arg) for arg in args if arg != 'self'}", "def parse_arguments(args):", "def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}", "def get_args(self):\n rqst = self.request\n args = rqst.arguments()\n resp = {}\n for arg in args:\n resp[arg] = repr(rqst.get_all(arg))\n return resp", "def args(self):\n return self.cmd_args", "def getPositionalArgs():", "def _parse_args(self, prepared_args):\n pass", "def _prepare_args(local_vars):\n return {k: v for k, v in local_vars.items() if k != 'self'}", "def args(self):\n return self._args_[: self.nargs()]", "def args(self):\n if not self.__args_updated:\n for inc in self.include_templates:\n self.__args.update(inc.args)\n self.__args_updated = True\n return self.__args", "def dataargs(self):\n return self.argsbytype(Data)", "def extra_args(self):\n return []", "def get_args(inst):\n if is_estimator(inst):\n args = inspect.getargspec(inst.update).args\n args = [arg for arg in args if arg != 'self' and arg != 'X']\n else:\n args = inspect.getargspec(inst).args\n ignore_args = {'self', 'X', 'y', 'pattern', 'normalizer', 'coef'}\n args = [arg for arg in args if arg not in ignore_args]\n\n return args", "def get_exec_args(self):\n return []", "def format_arguments(self, **kwargs):\n return kwargs", "def get(self):\n return self.args, self.kwargs", "def base_arguments(self):\n raise NotImplementedError()", "def get_arguments(self, args=(), kwargs=None, onlykeys=False, onlyused=False,\n func=None):\n if func is None:\n func = self.__init__\n\n # check what parameters to add\n adds, params, kwargs = _helper_parameters(func=func, args=args, kwargs=kwargs,\n onlykeys=onlykeys, onlyused=onlyused)\n\n _map_parameters = getattr(self, \"_map_parameters\", None)\n for add, key in zip(adds, params):\n if add and key not in kwargs:\n try:\n if _map_parameters is not None and key in _map_parameters:\n mapped_key = _map_parameters[key]\n # if mapped_key is None then it means variable is not\n # assigned in the __init__ of the instance so ignore it\n if mapped_key is not None:\n kwargs[key] = getattr(self, mapped_key)\n else:\n kwargs[key] = getattr(self, key)\n except AttributeError:\n e, msg, traceback = sys.exc_info()\n msg.args = (\n msg.args[0] + \". Review @copy_support decorator or \"\n \"BaseCopySupporter class for more info.\",)\n raise_(e, msg, traceback)\n\n if onlykeys:\n return kwargs\n return args, kwargs", "def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()", "def __getnewargs__(self):\n return ({'pairs': self.__pairs,\n 'app': self.__app,\n 'namespace': self.__namespace},)", "def _parse_kwargs(self):\n re_kwargs = r'^[\\w_][\\w\\d_]*=.+$'\n kwargs = [a.split('=') for a in self.args if re.findall(re_kwargs, a)]\n self.kwargs = {k: self._load_json(v) for k, v in kwargs}\n self.args = [a for a in self.args if not re.findall(re_kwargs, a)]", "def args(cls):\n try:\n args = getfullargspec(cls.__init__)\n except TypeError:\n return []\n return args[0]", "def parse_arguments(cls):\r\n parser = argparse.ArgumentParser(description='Easy Infer for model benchmark')\r\n cls.base_arg_parse(parser)\r\n cls.model_arg_parse(parser)\r\n cls.task_arg_parse(parser)\r\n args = parser.parse_args()\r\n return args", "def kwargs(self):\n return self._kwargs", "def kwargs(self):\n return self._kwargs", "def _set_arguments(self):\n self._arguments = []", "def arguments(args_to_pop=None) :\n posname, kwname, args = inspect.getargvalues(inspect.stack()[1][0])[-3:]\n posargs = args.pop(posname, [])\n args.update(args.pop(kwname, []))\n if args_to_pop is not None :\n for arg in args_to_pop :\n args.pop(arg)\n return args, posargs", "def parse_args(self):\n return self.__process_args__(self.parser.parse_args())", "def _get_run_script_args(self):\n raise NotImplementedError", "def _get_arguments(self, rargs):\r\n\r\n args = []\r\n i = 0\r\n count = len(rargs)\r\n while i < count and not self._is_opt(rargs[i]):\r\n args.append(rargs[i])\r\n i += 1\r\n\r\n return args", "def _update_args_and_kargs(self):\n if self.kwargs:\n self.value.append(self.args)\n self.value.append(self.kwargs)\n else:\n if self.args:\n self.value.append(self.args)", "def args_extract(self, args, kwargs):\n # make popable (can't pop tuple of args)\n args = list(args)\n\n def getarg(name, num):\n if args and len(args) > num:\n return args.pop(num)\n elif kwargs.get('files'):\n return kwargs.pop('files')\n return None\n\n # First to not affect data = args.pop(0)\n files = getarg('files', 1)\n data = getarg('data', 0)\n\n # make mutable if something\n if files:\n files = MultiValueDict(files)\n if data:\n data = MultiValueDict(data)\n\n return data, files, args, kwargs", "def get_arguments(self, **include):\n d = dict(self._kwargs)\n\n for k in include:\n if not include[k] and k in d:\n d.pop(k)\n return d", "def getargvalues(frame):\r\n args, varargs, varkw = getargs(frame.f_code)\r\n return ArgInfo(args, varargs, varkw, frame.f_locals)", "def full_args():\n return setup_args()", "def get_partial_arguments(self):\n return (), {}", "def get_used_kwargs(self):\n return self._used_kwargs", "def get_args(self):\n req_argv = self._ptr.contents.argv\n args = []\n if bool(req_argv):\n i = 0\n while 1:\n s = bytestostr(req_argv[i])\n i += 1\n if s == None:\n break\n args.append(s)\n return args", "def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)", "def determine_arg_locations(self, arg_types): # pragma: no cover\n raise NotImplementedError(\"Implement this\")", "def extract_arguments(args, method):\n intersection = lambda list1, list2: [x for x in list1 if x in list2]\n filterByKey = lambda keys, data: {x: data[x] for x in keys if x in data }\n keys = intersection(signature(method).parameters.keys(), args.keys())\n params = filterByKey(keys, args)\n return params", "def get_mandatory_arguments(self):\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n\n if defaults is not None:\n args = args[:-len(defaults)]\n\n if args.count('self') > 0:\n args.remove('self')\n return args", "def __init__(self, args, kwargs):\n self._args_dec = list(args)\n self._kwargs_dec = dict(kwargs)", "def _get_args(self):\n parser = ArgumentParser(\n description=\"Dynamically generates Snakefiles for data \"\n \"integration and machine learning pipelines.\"\n )\n\n parser.add_argument(\n \"-c\",\n \"--config\",\n help=(\n \"Configuration filepath. (Will look for file named config.yml \"\n \"in current working directory, if none specified.)\"\n ),\n )\n\n parser.add_argument(\n \"-r\",\n \"--run\",\n default=False,\n help=(\n \"Runs pipeline, in addition to generating Snakefile.\"\n ),\n )\n\n # convert command-line args to a dict and return\n args = parser.parse_args()\n\n args = dict(\n (k, v) for k, v in list(vars(args).items()) if v is not None\n )\n\n return args", "def _sorted_args(self):\n return self.args", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def extra_target_arguments(self):\n return {}", "def add_extra_args(self):\n pass", "def parse_known_args(self):\n args, other_args = self.parser.parse_known_args()\n args = self.__process_args__(args)\n return args, other_args", "def arguments(**kw):\n return export_arguments('cc', _all_arguments, _groups, **kw)", "def create_arg_list(self):\n\n sim = self.sim\n\n py_kernel_args = sim.kernel_args # Python variables that are passed into the kernel\n gen_kernel_args = sim.ctx_info['kernel_arguments'] # A list of needed kernel arguments from kernel autogen (Mako)\n\n list_for_kernel = gen_kernel_args[self.short_name]\n\n python_args_needed = [z[0] for z in list_for_kernel]\n\n self.arg_list = [py_kernel_args[z] for z in python_args_needed]\n\n # Loop over the arg_list...if the argument is a function, call it!\n for i in range(len(self.arg_list)):\n value = self.arg_list[i]\n if inspect.isfunction(value):\n self.arg_list[i] = value()\n\n additional_cl_args = [sim.queue, self.kernel_global_size, self.kernel_local_size]\n\n self.arg_list = additional_cl_args + self.arg_list", "def _generate_run_args(self, args_list, kwargs):\n return _get_args_for_run(self, args_list, kwargs)", "def _parse_args(self):\n parser = argparse.ArgumentParser()\n _, args = parser.parse_known_args()\n self.args = [a for a in args if a != '']", "def get_arguments(self):\n if self.arguments is not None:\n return self.arguments\n elif self.parent is not None:\n return self.parent.get_arguments()\n else:\n return []", "def punkte(self):\n return self.args", "def get_kwargs(self):\n return {}", "def __get_arguments(args=None, logger=None, stats=None):\n\n if not args:\n parser = get_parser()\n add_boto_cli_arguments(parser)\n # Parse only the known arguments added by add_boto_cli_arguments().\n # We only need those arguments to create Boto object, nothing else.\n # parse_known_args() return (Namespace, list of unknown arguments),\n # we only care about the Namespace object here.\n args = parser.parse_known_args()[0]\n\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n return {\n 'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()),\n 'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()),\n 'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()),\n 'region': getattr(args, 'boto_region', DEFAULT['region']()),\n 'logger': logger,\n 'stats': stats,\n }", "def get_arguments(args_in):\n parser = argparse.ArgumentParser(description=\"Model Evaluation\")\n\n add_global_arguments(parser)\n args = parser.parse_args(args_in)\n check_global_arguments(args)\n\n return args", "def normalize_args(self, args):\n return args", "def transformer_arguments(self) -> Dict[str, Any]:\n return {**self.transformer_required_arguments(), **self.transformer_optional_arguments()}", "def params(self, **kwargs):\n return kwargs", "def args():\n # pytest's Config class stores a dictionary of argparse argument name => dest. Go through this\n # dictionary and return back an args object whose attributes map dest to its option value.\n pytest_args = {arg: pytest.config.getoption(arg)\n for arg in pytest.config._opt2dest.values()}\n return type('args', (object,), pytest_args)", "def getCloneArgs(self):\n\n values = {\n \"locals_arg\": self.subnode_locals_arg.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def _transform_args(self) -> None:\n self.args = None if self.args == [] else self.args", "def setup_args(cls) -> ParlaiParser:\n # we want to later deprecate this for add_cmdline_args", "def __init__(self, *args, **kwargs):\n self._args = args\n self._kwargs = kwargs", "def prepare_arguments(self, parser):\n pass", "def get_args():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n\n arg('--raw_source_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/source_data'))\n arg('--meta_data_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/test_dir'))\n arg('--img_data_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/test_dir/input_data'))\n arg('--output_data_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/test_dir/output_data'))\n\n arg('--img_partition_option', default='zoom_1_256_256')\n\n input_args = parser.parse_known_args()[0]\n\n return input_args" ]
[ "0.7451631", "0.72647053", "0.72190857", "0.7194664", "0.71847206", "0.71847206", "0.71847206", "0.7184045", "0.71487767", "0.697183", "0.6968741", "0.69624984", "0.69224155", "0.6873307", "0.68517804", "0.68445677", "0.6818598", "0.68127906", "0.6803711", "0.6803707", "0.6779153", "0.6729213", "0.6716305", "0.6687784", "0.6668649", "0.66589254", "0.66575664", "0.66329455", "0.66239136", "0.6622079", "0.6621637", "0.6598846", "0.65779847", "0.6553622", "0.6537273", "0.65222377", "0.6521335", "0.65201026", "0.65138125", "0.650661", "0.6491509", "0.64884263", "0.64793175", "0.64731157", "0.64538723", "0.64213896", "0.64197576", "0.6408747", "0.6404957", "0.6404957", "0.64011425", "0.6385383", "0.63825685", "0.6369928", "0.63695884", "0.63651466", "0.63609433", "0.63595945", "0.6357616", "0.63429695", "0.6336105", "0.6333405", "0.6329889", "0.6329223", "0.6327137", "0.6326338", "0.63162243", "0.62998784", "0.6290917", "0.6284848", "0.6279753", "0.6279753", "0.6279753", "0.6279753", "0.6279753", "0.6279753", "0.6279753", "0.6279753", "0.6274014", "0.6270547", "0.62705314", "0.6250357", "0.62400585", "0.62395847", "0.6234987", "0.62327325", "0.6230077", "0.61958826", "0.61945236", "0.6193052", "0.61894506", "0.61803055", "0.617934", "0.6177853", "0.6175513", "0.6172964", "0.61719126", "0.617083", "0.6166028", "0.61567104" ]
0.7118921
9
The PyArgKeywords object which contains all the names of the function's arguments
def arg_names(self): return self._arg_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_keyword_args(function):\n argspec = inspect.getargspec(function)\n kwargs = argspec.args[len(argspec.args) - len(argspec.defaults):]\n kwargs = {arg: value for arg, value in zip(kwargs, argspec.defaults)}\n return kwargs", "def names(self):\n result = []\n result.extend(self.positional_arguments)\n if self.arbitary_positional_arguments is not None:\n result.append(self.arbitary_positional_arguments)\n if self.arbitary_keyword_arguments is not None:\n result.append(self.arbitary_keyword_arguments)\n result.extend(self.keyword_arguments)\n return result", "def argnames(self):\n if self.get_key is None:\n return set()\n return set(self.get_key.names)", "def extract_keywords(func):\n if hasattr(func, 'im_func'):\n func = func.im_func\n\n try:\n return func.func_code.co_varnames[-len(func.func_defaults):]\n except (TypeError, ValueError, IndexError):\n return tuple()", "def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args", "def parameter_names(self) -> List[str]:", "def get_argument_as_keywords(self):\n status = True\n arg_kv = self.get_values_for_mandatory_args()\n if len(arg_kv) != len(self.req_args_list):\n msg = 'could not execute %s without mandatory arguments' % (object)\n self.data_repository = skip_and_report_status(self.data_repository, msg)\n status = False\n arg_kv = self.get_values_for_optional_args(arg_kv)\n return arg_kv, status", "def argument_list(self):\n answer = self._call('argument_list')\n return answer.names", "def build_arg_list(fn, env):\r\n kw = {}\r\n argspec = inspect.getargspec(fn)\r\n\r\n # if there is a **kw argument in the fn definition,\r\n # just pass along the environment\r\n if argspec[2]:\r\n kw = env\r\n #else for each entry in the arglist set the value from the environment\r\n else:\r\n #skip self\r\n argnames = argspec[0][1:]\r\n for name in argnames:\r\n if name in env:\r\n kw[name] = env[name]\r\n return kw", "def inspect_args_func(frame):\n args, _, _, values = inspect.getargvalues(frame)\n return {key: values[key] for key in args if key != 'self'}", "def pykwarg(self):\n return self._pykwarg", "def list_kwargs(func):\n \n details = inspect.getargspec(func)\n nopt = len(details.defaults)\n \n return details.args[-nopt:]", "def getPositionalArgs():", "def argnames(method):\n return [arg for arg in method.__code__.co_varnames if arg != \"self\"]", "def args(self):\n\t\tret = []\n\t\tfor argname in self._arg_names:\n\t\t\tret += [self._args[argname]]\n\t\treturn ret", "def test_kw_args_with_keywords():\n assert arguments.fun_opt_kw_params(visited_color='blue',\n link_color='red',\n back_color='yellow',\n fore_color='orange') == ('orange',\n 'yellow',\n 'red', 'blue')", "def args(self) -> List[str]:\n return self.__args", "def _get_args(function, varargs=False):\n\n try:\n params = signature(function).parameters\n except ValueError:\n # Error on builtin C function\n return []\n args = [\n key\n for key, param in params.items()\n if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)\n ]\n if varargs:\n varargs = [\n param.name\n for param in params.values()\n if param.kind == param.VAR_POSITIONAL\n ]\n if len(varargs) == 0:\n varargs = None\n return args, varargs\n else:\n return args", "def get_xx_args_dict(self):\n return self.__xx_args", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def get_kwd_args(func):\n try:\n sig = inspect.signature(func)\n except AttributeError:\n args, _, _, defaults = inspect.getargspec(func)\n if defaults:\n kwonlyargs = args[-len(defaults):]\n else:\n kwonlyargs = []\n else:\n kwonlyargs = {p.name:p.default for p in sig.parameters.values()\n if p.default is not p.empty}\n\n return kwonlyargs", "def keywords(self):\n return list(self._kw)", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def argdict(self):\n return dict((arg.name, val) for arg, val in zip(self.sig, self))", "def getargvalues(frame):\r\n args, varargs, varkw = getargs(frame.f_code)\r\n return ArgInfo(args, varargs, varkw, frame.f_locals)", "def getargspec(self,obj):\n\n if inspect.isfunction(obj):\n func_obj = obj\n elif inspect.ismethod(obj):\n func_obj = obj.im_func\n else:\n raise TypeError, 'arg is not a Python function'\n args, varargs, varkw = inspect.getargs(func_obj.func_code)\n return args, varargs, varkw, func_obj.func_defaults", "def generate_arg_and_kwags():\n def gen_func(\n #df: DataSource,\n option: List[list],\n style: List[dict]\n )->List[Tuple[list, dict]]:\n\n if len(option) != len(style):\n raise SystemError(\"option and style must be same size list.\")\n\n arg_and_kwarg = []\n for o, s in zip(option, style):\n arg = [*o]\n kwargs = s\n arg_and_kwarg.append((arg, kwargs))\n return arg_and_kwarg\n return gen_func", "def get_json_argument_list():\n list_of_arguments_to_get = [\"finish_time\", \"segmentation_training_samples\", \"patch_count_per_image\", \"learning_rate\", \"batch_k\",\n \"batch_p\", \"flip_augment\", \"standardize\", \"margin\", \"metric\"]\n\n return list_of_arguments_to_get", "def GetKeywords(self):\n return [FS_COMMANDS, FS_STDLIB, FS_FUNC, FS_CLASS]", "def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params", "def derive_args(func):\n args = inspect.getfullargspec(func).args\n if args and is_selfish_name(args[0]):\n del args[0]\n return args", "def _get_arg_help(docstring):\r\n arg_help = {}\r\n\r\n if docstring is None:\r\n return arg_help\r\n\r\n last = None\r\n import re\r\n for line in docstring.split('\\n'):\r\n if line == '':\r\n continue\r\n match = re.search('^\\s*:param[\\w ]* (\\w+):\\s(.*)$', line)\r\n if match:\r\n last = match.group(1)\r\n arg_help[last] = match.group(2)\r\n else:\r\n arg_help[last] += ' %s' % line.strip()\r\n return arg_help", "def format_args(self, **kwargs: Any) -> str:\n decl = self.declaration\n\n # The logic allows this to be used for both function like and non\n # function like macros.\n # 'SOME_DEFINE'.partition('(')\n # >>> 'SOME_DEFINE', '', ''\n #\n # 'FUNCTION_LIKE(_a, _b)'.partition('(')\n # >>> 'FUNCTION_LIKE', '(', '_a, _b)'\n _, part, args = decl.partition(\"(\")\n return part + args", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def _get_param_names(self):\r\n return sorted([p\r\n for p in self.__dict__\r\n if p != 'additional_args'])", "def _get_init_args(self):\n signature = inspect.signature(self.__init__)\n parameters = signature.parameters\n args = [arg for arg, p in parameters.items()\n if p.kind is p.POSITIONAL_OR_KEYWORD]\n\n return {arg: getattr(self, arg) for arg in args if arg != 'self'}", "def _generate_keywords(self):\n _keywords = [*self._lookup_opcodes_dir.keys(), *self._registers_list.keys()]\n for key in _keywords:\n self._keywords.extend(key.split(\" \"))\n return", "def get_id_args(func, arg):\n\n return \"{} {}\".format(func.__name__, arg)", "def python_func_kw_matches(self,text):\n\n if \".\" in text: # a parameter cannot be dotted\n return []\n try: regexp = self.__funcParamsRegex\n except AttributeError:\n regexp = self.__funcParamsRegex = re.compile(r'''\n '.*?' | # single quoted strings or\n \".*?\" | # double quoted strings or\n \\w+ | # identifier\n \\S # other characters\n ''', re.VERBOSE | re.DOTALL)\n # 1. find the nearest identifier that comes before an unclosed\n # parenthesis e.g. for \"foo (1+bar(x), pa\", the candidate is \"foo\"\n tokens = regexp.findall(self.get_line_buffer())\n tokens.reverse()\n iterTokens = iter(tokens); openPar = 0\n for token in iterTokens:\n if token == ')':\n openPar -= 1\n elif token == '(':\n openPar += 1\n if openPar > 0:\n # found the last unclosed parenthesis\n break\n else:\n return []\n # 2. Concatenate any dotted names (e.g. \"foo.bar\" for \"foo.bar(x, pa\" )\n ids = []\n isId = re.compile(r'\\w+$').match\n while True:\n try:\n ids.append(iterTokens.next())\n if not isId(ids[-1]):\n ids.pop(); break\n if not iterTokens.next() == '.':\n break\n except StopIteration:\n break\n # lookup the candidate callable matches either using global_matches\n # or attr_matches for dotted names\n if len(ids) == 1:\n callableMatches = self.global_matches(ids[0])\n else:\n callableMatches = self.attr_matches('.'.join(ids[::-1]))\n argMatches = []\n for callableMatch in callableMatches:\n try: namedArgs = self._default_arguments(eval(callableMatch,\n self.namespace))\n except: continue\n for namedArg in namedArgs:\n if namedArg.startswith(text):\n argMatches.append(\"%s=\" %namedArg)\n return argMatches", "def test_star_args_with_keywords():\n assert arguments.fun_star_params(visited_color='orange',\n link_color='yellow',\n back_color='red',\n fore_color='blue') == ('orange',\n 'yellow',\n 'red', 'blue')", "def get_arg_name(args):\n names = []\n for arg in args:\n if type(arg).__name__ == 'ID':\n names.append(arg.name)\n elif type(arg).__name__ == 'UnaryOp':\n names.append(arg.expr.name)\n elif type(arg).__name__ == 'StructRef':\n #############################################\n # So far, we don't care about this situation:\n # fun(a->b)\n # POSSIBLE CODE HERE\n #############################################\n names.append(None)\n return names", "def fetch_arguments(op_def, arg, ws):\n return [fetch_argument(op_def, desc, ws) for desc in arg.strings]", "def keywords(self):\n defined_keywords = [\n ('allowempty_map', 'allowempty_map'),\n ('assertion', 'assertion'),\n ('default', 'default'),\n ('class', 'class'),\n ('desc', 'desc'),\n ('enum', 'enum'),\n ('example', 'example'),\n ('extensions', 'extensions'),\n ('format', 'format'),\n ('func', 'func'),\n ('ident', 'ident'),\n ('include_name', 'include'),\n ('length', 'length'),\n ('map_regex_rule', 'map_regex_rule'),\n ('mapping', 'mapping'),\n ('matching', 'matching'),\n ('matching_rule', 'matching_rule'),\n ('name', 'name'),\n ('nullable', 'nullable'),\n ('parent', 'parent'),\n ('pattern', 'pattern'),\n ('pattern_regexp', 'pattern_regexp'),\n ('range', 'range'),\n ('regex_mappings', 'regex_mappings'),\n ('required', 'required'),\n ('schema', 'schema'),\n ('schema_str', 'schema_str'),\n ('sequence', 'sequence'),\n ('type', 'type'),\n ('type_class', 'type_class'),\n ('unique', 'unique'),\n ('version', 'version'),\n ]\n found_keywords = []\n\n for var_name, keyword_name in defined_keywords:\n if getattr(self, var_name, None):\n found_keywords.append(keyword_name)\n\n return found_keywords", "def get_args( self, **kwargs ):\n args = []\n for at in self.arg_types:\n args.append( kwargs[at] )\n return args", "def _formal_params(self, doclet):\n name, paren, params = self.arguments[0].partition('(')\n return ('(%s' % params) if params else '(%s)' % ', '.join(doclet['meta']['code']['paramnames'])", "def _extract_args(self, func):\n sig = inspect.signature(func)\n\n # Backwards compatibility\n if len(sig.parameters) == 1:\n ((name, parameter),) = sig.parameters.items()\n if (\n parameter.kind is parameter.POSITIONAL_OR_KEYWORD\n and parameter.annotation in (parameter.empty, argparse.Namespace)\n ):\n self._require_namespace = name\n return\n\n for name, parameter in sig.parameters.items():\n if parameter.annotation is argparse.Namespace:\n self._require_namespace = name\n else:\n arg = Argument.from_parameter(name, parameter)\n action = arg.register_with_proxy(self)\n self._args.append((name, action.dest))", "def get_contexts(self):\n return tuple(getattr(self, name) for name in self.__argnames__)", "def arguments(**kw):\n return export_arguments('cc', _all_arguments, _groups, **kw)", "def allkeywords(f):\n @_fntools.wraps(f)\n def wrapper(*a, **k):\n a = list(a)\n for idx, arg in enumerate(_inspect.getargspec(f).args, -_inspect.ismethod(f)): # or [0] in 2.5\n if arg in k:\n if idx < len(a):\n a.insert(idx, k.pop(arg))\n else:\n break\n return f(*a, **k)\n return wrapper", "def print_args():\n for key, value in vars(ARGS).items():\n print(key + ' : ' + str(value))", "def GetFunctionParametersAndValues():\n frame = inspect.currentframe().f_back\n args, _, _, values = inspect.getargvalues(frame)\n return ([(i, values[i]) for i in args])", "def get_all_arguments(self):\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if args.count('self') > 0:\n args.remove('self')\n return args", "def func_args(self) -> str:\n\n return self.call_data[10:]", "def pyarg(self):\n return self._pyarg", "def generateKwargsAsString(self):\n args = \"\"\n axisList = self.tabWidget.currentWidget()\n\n for axisWidget in axisList.getAxisWidgets():\n args += \"%s = %s, \" % (axisWidget.axis.id,\n axisWidget.getCurrentValuesAsStr())\n\n # Generate additional args\n args += 'squeeze = 0'\n args += \", order = '%s' \" % axisList.getAxesOrderString()\n return args", "def _validate_arglist_and_kwlist(self, p, items, keywords):\n kwnames = set()\n args = []\n kws = []\n self._validate_arglist_list(items, p.lexer.lexer)\n for arg in items:\n if isinstance(arg, ast.keyword):\n kws.append(arg)\n kwnames.add(arg.arg)\n else:\n args.append(arg)\n for kw in keywords:\n if not isinstance(kw, ast.keyword):\n msg = 'only named arguments may follow *expression'\n tok = FakeToken(p.lexer.lexer, p.lineno(2))\n syntax_error(msg, tok)\n if kw.arg in kwnames:\n msg = 'keyword argument repeated'\n tok = FakeToken(p.lexer.lexer, kw.lineno)\n syntax_error(msg, tok)\n kwnames.add(kw.arg)\n kws.extend(keywords)\n\n return args, kws", "def get_x_args_dict(self):\n return self.__x_args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def args(self):\n return self._args", "def args(cls):\n try:\n args = getfullargspec(cls.__init__)\n except TypeError:\n return []\n return args[0]", "def get_input_arguments(kwargs, function, warn=True):\n np.set_printoptions(threshold=20)\n print('\\narguments to {}:'.format(function.__qualname__))\n params = inspect.signature(function)\n input_kwargs = {}\n not_arguments = {}\n for k, v in kwargs.items():\n if k in params.parameters:\n input_kwargs[k] = v\n print_item(k, v)\n else:\n not_arguments[k] = v\n if warn:\n print('\\nother arguments:')\n for k, v in not_arguments.items():\n #print('{}: {}'.format(k, v))\n print_item(k, v)\n print('\\n')\n return input_kwargs", "def get_input_arguments(kwargs, function, warn=True):\n np.set_printoptions(threshold=20)\n print('\\narguments to {}:'.format(function.__qualname__))\n params = inspect.signature(function)\n input_kwargs = {}\n not_arguments = {}\n for k, v in kwargs.items():\n if k in params.parameters:\n input_kwargs[k] = v\n print_item(k, v)\n else:\n not_arguments[k] = v\n if warn:\n print('\\nother arguments:')\n for k, v in not_arguments.items():\n #print('{}: {}'.format(k, v))\n print_item(k, v)\n print('\\n')\n return input_kwargs", "def get_required_kwargs(fun, skip_positional=0):\n sig = inspect.signature(fun)\n # the params from signature with up to skip_positional filtered out\n # (less only if there is not enough of positional args)\n params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())\n if i >= skip_positional or param.kind not in\n [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]\n return [\n name for name, param in params if param.default is inspect.Parameter.empty\n and param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY]\n ]", "def argument_types(self):\r\n class ArgumentsIterator(collections.Sequence):\r\n def __init__(self, parent):\r\n self.parent = parent\r\n self.length = None\r\n\r\n def __len__(self):\r\n if self.length is None:\r\n self.length = conf.lib.clang_getNumArgTypes(self.parent)\r\n\r\n return self.length\r\n\r\n def __getitem__(self, key):\r\n # FIXME Support slice objects.\r\n if not isinstance(key, int):\r\n raise TypeError(\"Must supply a non-negative int.\")\r\n\r\n if key < 0:\r\n raise IndexError(\"Only non-negative indexes are accepted.\")\r\n\r\n if key >= len(self):\r\n raise IndexError(\"Index greater than container length: \"\r\n \"%d > %d\" % ( key, len(self) ))\r\n\r\n result = conf.lib.clang_getArgType(self.parent, key)\r\n if result.kind == TypeKind.INVALID:\r\n raise IndexError(\"Argument could not be retrieved.\")\r\n\r\n return result\r\n\r\n assert self.kind == TypeKind.FUNCTIONPROTO\r\n return ArgumentsIterator(self)", "def get_param_names(self):\n return list(self.params.keys())", "def GetKeywords(self):\n if wx.VERSION >= (2, 9, 0, 0, ''):\n return [(0, R_KEYWORDS), (1, R_KEYWORDS2), (2, R_KEYWORDS3)]\n else:\n return [(1, KEYWORDS)]", "def invalid_args(func, argdict):\r\n args, _, keywords, _ = inspect.getargspec(func)\r\n if keywords:\r\n return set() # All accepted\r\n return set(argdict) - set(args)", "def __arg_list(self):\n args = []\n try:\n arg = self.__arg()\n args.append(arg)\n if arg.token.endswith(\"...\"):\n return args\n\n while True:\n self.match_value(Punctuator, \",\")\n\n arg = self.__arg()\n if arg.token.endswith(\"...\"):\n return args\n\n args.append(arg)\n except ParseError:\n return args", "def _make_args(self, args, defaults=[], vararg=None, kwonlyargs=[],\n kw_defaults=[], kwarg=None):\n # On Python 2 convert vararg and kwarg to raw name, raise error using\n # lineno stored on the node and lexer from self.\n # On Python 3.3 extract name and annotation\n # After should be straight forward\n raise NotImplementedError()", "def get_keywords(self):\r\n\t\treturn list(self.keyword_headlines().keys())", "def get_num_positional_args(fun):\n sig = inspect.signature(fun)\n return len([\n name for name, param in sig.parameters.items() if param.kind in\n [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]\n ])", "def args(self):\n if not self.arg_names:\n return []\n modes = self.arg_modes or [\"i\"] * len(self.arg_names)\n args = [\n (name, typ)\n for name, typ, mode in zip(self.arg_names, self.arg_types, modes)\n if mode in (\"i\", \"b\", \"v\") # IN, INOUT, VARIADIC\n ]\n\n def arg(name, typ, num):\n num_args = len(args)\n num_defaults = len(self.arg_defaults)\n has_default = num + num_defaults >= num_args\n default = (\n self.arg_defaults[num - num_args + num_defaults]\n if has_default\n else None\n )\n return ColumnMetadata(name, typ, [], default, has_default)\n\n return [arg(name, typ, num) for num, (name, typ) in enumerate(args)]", "def test_kw_args_with_positional():\n assert arguments.fun_opt_kw_params('blue', 'red', 'yellow',\n 'orange') == ('blue', 'red', 'yellow',\n 'orange')", "def _helper_parameters(func, args=(), kwargs=None, onlykeys=False, onlyused=False):\n if kwargs is None:\n kwargs = {}\n # params = list(inspect.signature(self.__init__).parameters.keys())\n params = inspect.getargspec(func).args[1:] # TODO replace deprecated getargspec to work with py2 and py3, perhaps by getfullargspec\n\n if onlykeys and not onlyused: # only add to keywords\n covered = 0 # simulate no args\n else:\n covered = len(args)\n\n if onlyused and onlykeys: # only add modified by user\n adds = [(True if i < covered or key in kwargs else False) for i, key in\n enumerate(params)]\n # add keys from args\n for i, val in enumerate(args):\n kwargs[params[i]] = val\n elif onlyused:\n adds = [(True if i >= covered and key in kwargs else False) for i, key\n in\n enumerate(params)]\n else:\n adds = [(True if i >= covered else False) for i, key in\n enumerate(params)]\n return adds, params, kwargs", "def get_fn_arg_contexts(cls, ctx: AntlrTelParser.FnContext) -> List[Any]:\n if len(ctx.children) <= 3:\n # [fn_name,(,)] => 3 children means no args, return empty array\n return []\n else:\n # Skip fnname and '(', step 2 to skip ','\n return ctx.children[2::2]", "def getListOfParameters(self, *args):\n return _libsbml.KineticLaw_getListOfParameters(self, *args)", "def help_args():\n pass", "def parse_kw_args(tagname, bits, args_spec=None, restrict=False):\n\n args = []\n\n if restrict:\n if args_spec is None:\n raise ValueError(\"you must pass an args_spec dict if you want to restrict allowed args\")\n allowed = list(args_spec.keys())\n do_validate = True\n else:\n do_validate = args_spec is not None\n\n for bit in bits:\n try:\n name, val = bit.split('=')\n except ValueError:\n raise template.TemplateSyntaxError(\n \"keyword arguments to '%s' tag must have 'key=value' form (got : '%s')\" \\\n % (tagname, bit)\n )\n\n name = str(name)\n if do_validate:\n if restrict:\n if name in allowed:\n # we only want each name once\n del allowed[allowed.index(name)]\n else:\n raise template.TemplateSyntaxError(\n \"keyword arguments to '%s' tag must be one of % (got : '%s')\" \\\n % (tagname, \",\".join(allowed), name)\n )\n\n validate = args_spec[name]\n else:\n validate = args_spec.get(name, None)\n\n if validate is not None:\n if callable(validate):\n try:\n val = validate(val)\n except Exception, e:\n raise template.TemplateSyntaxError(\n \"invalid optional argument '%s' for '%s' tag: '%s' (%s)\" \\\n % (tagname, name, val, e)\n )\n else:\n # assume re\n if re.match(validate, val) is None:\n raise template.TemplateSyntaxError(\n \"invalid optional argument '%s' for '%s' tag: '%s' (doesn't match '%s')\" \\\n % (tagname, name, val, validate)\n )\n\n # should be ok if we managed to get here \n args.append((name, val))\n\n return args", "def args(self) -> tuple[Basic, ...]:\n return self._args", "def get_used_kwargs(self):\n return self._used_kwargs", "def getArgs(func):\n # exclude the defaults at the end (hence the [:-1])\n args = list(utils.flatten(inspect.getargspec(func)[:-1]))\n return set(args).difference(set([None]))", "def get_python_function_arguments(f):\n # Note that we only return non-optional arguments (we assume that any optional args are not specified).\n # This allows to, e.g., accept max(a, b, *more, name='') as a binary function\n param_specs = inspect.getfullargspec(f)\n annotations = param_specs.annotations\n arg_names = param_specs.args\n defaults = param_specs.defaults # \"if this tuple has n elements, they correspond to the last n elements listed\n # in args\"\n if defaults:\n arg_names = arg_names[:-len(defaults)]\n return (arg_names, annotations)", "def get_args(self):\n rqst = self.request\n args = rqst.arguments()\n resp = {}\n for arg in args:\n resp[arg] = repr(rqst.get_all(arg))\n return resp", "def parameter_names(self):\n return [x for x in self.transformations.values() if isinstance(x, str)]", "def test_single_keyword_arg_provided(self):\n _func = required_parameters('arg1')(undecorated_func)\n self.assertEqual(_func(arg1='hello'), 'foo')", "def find_abbreviations(self, kwargs):\n new_kwargs = []\n try:\n sig = self.signature()\n except (ValueError, TypeError):\n # can't inspect, no info from function; only use kwargs\n return [ (key, value, value) for key, value in kwargs.items() ]\n\n for param in sig.parameters.values():\n for name, value, default in _yield_abbreviations_for_parameter(param, kwargs):\n if value is empty:\n raise ValueError('cannot find widget or abbreviation for argument: {!r}'.format(name))\n new_kwargs.append((name, value, default))\n return new_kwargs", "def _get_required_args(func):\n module_logger.debug(f\"_get_required_args: func={func}\")\n fas = inspect.getfullargspec(func)\n module_logger.debug(f\"_get_required_args: fas={fas}\")\n len_args = len(fas.args)\n len_args += len(fas.kwonlyargs)\n if fas.kwonlydefaults is not None:\n len_args -= len(fas.kwonlydefaults)\n if fas.defaults is not None:\n len_args -= len(fas.defaults)\n return len_args", "def compile_parameter_list(self):\r\n if self.__tokenizer.token_type() != TYPES_DIC[\"SYMBOL\"]:\r\n type = self.__get_type()\r\n self.__advance()\r\n name = self.__tokenizer.identifier()\r\n self.__subroutine_symbols.define(name, type, \"argument\")\r\n self.__advance()\r\n while self.__tokenizer.symbol() != ')':\r\n self.__advance()\r\n type = self.__get_type()\r\n self.__advance()\r\n name = self.__tokenizer.identifier()\r\n self.__subroutine_symbols.define(name, type, \"argument\")\r\n self.__advance()", "def test_onearg_and_keyword(self):\n varargs = (12,)\n kwargs = {'default' : 13}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 13)\n self.assert_(len(var_dict) == 2)", "def _get_arguments(self) -> str:\n func = self.node\n\n # Early logic used to iterate over, `func.get_arguments()`, however when there\n # is an unknown type clang will sometimes fail to provide tokens for that\n # argument. For example in \"unknown_type foo[]\" the brackets will cause clang\n # to return back no tokens for the argument.\n start = func.location\n end = func.extent.end\n if func.is_definition():\n # When a function is a definition the last child is the compound statement\n # so we need to move prior to the compound statement\n children = list(func.get_children())\n body_start = children[-1].extent.start.offset\n end = cindex.SourceLocation.from_offset(func.tu, start.file, body_start - 1)\n\n extent = cindex.SourceRange.from_locations(start, end)\n non_comment_tokens = (\n t\n for t in cindex.TokenGroup.get_tokens(func.tu, extent=extent)\n if t.kind != cindex.TokenKind.COMMENT\n )\n\n # Even though this will place spaces around all the tokens, the sphinx C domain\n # will provide some formatting to make it look nicer in the final output.\n full_signature = \" \".join(t.spelling for t in non_comment_tokens)\n\n _, _, arguments = full_signature.partition(\"(\")\n arguments = arguments.rstrip(\")\")\n arguments = arguments.strip()\n\n return arguments", "def _represent_args(*args, **kwargs):\n argument_strings = [repr(a) for a in args]\n keyword_strings = [\"=\".join((k, repr(v))) for k, v in kwargs.items()]\n return \", \".join(argument_strings + keyword_strings)", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))", "def __getnewargs__(self):\n return ({'pairs': self.__pairs,\n 'app': self.__app,\n 'namespace': self.__namespace},)", "def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)", "def arguments_from_docstring(doc):\n\n if doc is None:\n raise RuntimeError('__doc__ is None')\n\n doc = doc.lstrip()\n\n # care only the firstline\n # docstring can be long\n line = doc.split('\\n', 1)[0] # get the firstline\n if line.startswith(\"('...',)\"):\n line = doc.split('\\n', 2)[1] # get the second line\n p = re.compile(r'^[\\w|\\s.]+\\(([^)]*)\\).*')\n # 'min(iterable[, key=func])\\n' -> 'iterable[, key=func]'\n sig = p.search(line)\n if sig is None:\n return []\n # iterable[, key=func]' -> ['iterable[' ,' key=func]']\n sig = sig.groups()[0].split(',')\n ret = []\n for s in sig:\n # get the last one after all space after =\n # ex: int x= True\n tmp = s.split('=')[0].split()[-1]\n # clean up non _+alphanum character\n tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])\n ret.append(tmp)\n # re.compile(r'[\\s|\\[]*(\\w+)(?:\\s*=\\s*.*)')\n # ret += self.docstring_kwd_re.findall(s)\n ret = list(filter(lambda x: x != '', ret))\n\n if len(ret) == 0:\n raise RuntimeError('Your doc is unparsable\\n' + doc)\n\n return ret", "def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n trainer_default_params = inspect.signature(cls).parameters\n name_type_default = []\n for arg in trainer_default_params:\n arg_type = trainer_default_params[arg].annotation\n arg_default = trainer_default_params[arg].default\n try:\n arg_types = tuple(arg_type.__args__)\n except AttributeError:\n arg_types = (arg_type,)\n\n name_type_default.append((arg, arg_types, arg_default))\n\n return name_type_default", "def Dict(**args):\n return args" ]
[ "0.6879302", "0.6823232", "0.6740888", "0.6619218", "0.6581217", "0.6491079", "0.64394325", "0.6434128", "0.63784575", "0.6353484", "0.6328109", "0.6305639", "0.6271534", "0.62503344", "0.61853814", "0.61465836", "0.61248285", "0.60984993", "0.60940886", "0.6034932", "0.6010237", "0.5984834", "0.59805983", "0.5965739", "0.59585035", "0.5955378", "0.5951753", "0.59460723", "0.5945526", "0.5922179", "0.590881", "0.59012026", "0.589215", "0.58882207", "0.5878184", "0.58638614", "0.58539194", "0.584791", "0.58155006", "0.5810042", "0.58016217", "0.57895434", "0.5788769", "0.57840365", "0.57814956", "0.57776946", "0.5758584", "0.5757881", "0.57560164", "0.574679", "0.5728085", "0.5722768", "0.5719081", "0.571425", "0.5693369", "0.567901", "0.5666716", "0.5655951", "0.5655951", "0.5655951", "0.56547797", "0.56521076", "0.56521076", "0.5650189", "0.56343067", "0.5621169", "0.56103337", "0.5610239", "0.5607466", "0.5607306", "0.56014156", "0.559886", "0.5591271", "0.5583961", "0.55775857", "0.5575953", "0.557396", "0.55611545", "0.55519974", "0.5551628", "0.55474937", "0.55472314", "0.5532461", "0.5529691", "0.55242467", "0.5523207", "0.5499", "0.5493939", "0.54911256", "0.54830503", "0.5472165", "0.5466985", "0.5466185", "0.54644835", "0.54593605", "0.54583234", "0.5455486", "0.5451966", "0.5451327" ]
0.660815
4
Create FunctionDef responsible for casting python argument to C
def Python_to_C(c_object): try : cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)] except KeyError: errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal') cast_func = FunctionDef(name = cast_function, body = [], arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)], results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)]) return cast_func
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def C_to_Python(c_object):\n try :\n cast_function = c_to_py_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)],\n results = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)])\n\n return cast_func", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def convert_result_as_arg(self, node, ordered_functions):\n return ordered_functions # XXX - do nothing for now\n options = node.options\n fmt_func = node.fmtdict\n# if options.F_string_len_trim is False: # XXX what about vector?\n# return\n\n ast = node.ast\n result_typemap = ast.typemap\n result_name = None\n\n # Check if result needs to be an argument.\n attrs = ast.attrs\n meta = ast.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup in [\"char\", \"string\"]:\n result_name = fmt_func.F_string_result_as_arg\n# result_as_arg = fmt_func.F_string_result_as_arg\n# result_name = result_as_arg or fmt_func.C_string_result_as_arg\n# elif result_typemap.base == \"vector\":\n# has_vector_result = True\n# elif result_is_ptr:\n# if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n# need_cdesc_result = True\n# elif attrs[\"dimension\"]:\n# need_cdesc_result = True\n\n if not result_name:\n return\n\n##########\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n# generated_suffix = \"buf\"\n C_new._generated = \"result_to_arg\"\n fmt_func = C_new.fmtdict\n# fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix + \"XXX\"\n# fmt_func.function_suffix = fmt_func.function_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=True, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n##########\n\n # decl: const char * getCharPtr2()\n new_arg = C_new.ast.result_as_arg(result_name)\n new_arg.const = False # must be writeable\n# attrs = new_arg.attrs\n# new_arg.metaattrs[\"deref\"] = None\n # Special case for wrapf.py to override \"allocatable\"\n\n # Special case for wrapf.py to override \"allocatable\"\n node.ast.metaattrs[\"deref\"] = None\n new_arg.metaattrs[\"deref\"] = \"result\"\n new_arg.metaattrs[\"is_result\"] = True\n C_new.ast.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.metaattrs[\"deref\"] = None\n\n node.wrap.fortran = False\n# node.wrap.c = False\n\n return\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)", "def cpp_function(self):", "def arg_to_buffer(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if node.wrap.c is False:\n# if options.wrap_c is False: # XXX cdesc.yaml GetScalar2\n # The user does not require a C wrapper.\n # This can be the case if the Fortran wrapper is doing all\n # the work via splicer or fstatements.\n return\n\n # If a C++ function returns a std::string instance,\n # the default wrapper will not compile since the wrapper\n # will be declared as char. It will also want to return the\n # c_str of a stack variable. Warn and turn off the wrapper.\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = ast.declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n\n if node.wrap.fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n if options.F_string_len_trim is False: # XXX what about vector?\n return\n\n # Arguments.\n # Is result or any argument a string or vector?\n # If so, additional arguments will be passed down so\n # create buffer version of function.\n buf_args = {}\n for arg in declarator.params:\n has_buf_arg = None\n arg_typemap = arg.typemap\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif attrs[\"cdesc\"]:\n # User requested cdesc.\n has_buf_arg = \"cdesc\"\n elif arg_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\", \"copy\"]:\n has_buf_arg = \"cdesc\"\n # XXX - this is not tested\n # XXX - tested with string **arg+intent(out)+dimension(ndim)\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"char\":\n if arg.ftrim_char_in:\n pass\n elif declarator.is_indirect():\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n has_buf_arg = \"cdesc\"\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"vector\":\n if meta[\"intent\"] == \"in\":\n # Pass SIZE.\n has_buf_arg = \"buf\"\n else:\n has_buf_arg = \"cdesc\"\n elif (arg_typemap.sgroup == \"native\" and\n meta[\"intent\"] == \"out\" and\n meta[\"deref\"] != \"raw\" and\n declarator.get_indirect_stmt() in [\"**\", \"*&\"]):\n # double **values +intent(out) +deref(pointer)\n has_buf_arg = \"cdesc\"\n #has_buf_arg = \"buf\" # XXX - for scalar?\n buf_args[declarator.user_name] = has_buf_arg\n # --- End loop over function parameters\n has_buf_arg = any(buf_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n attrs = ast.declarator.attrs\n meta = ast.declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n # Result default to \"allocatable\".\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.base == \"vector\":\n need_buf_result = \"cdesc\"\n elif result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n if meta[\"dimension\"]:\n # int *get_array() +deref(pointer)+dimension(10)\n need_buf_result = \"cdesc\"\n\n # Functions with these results need wrappers.\n if not (need_buf_result or\n has_buf_arg):\n return\n\n # XXX node.wrap.fortran = False\n # Preserve wrap.c.\n # This keep a version which accepts char * arguments.\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"buf\"\n C_new._generated = \"arg_to_buffer\"\n C_new.splicer_group = \"buf\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n \n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=node.options.wrap_c)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if buf_args[declarator.user_name]:\n meta[\"api\"] = buf_args[declarator.user_name]\n if arg.ftrim_char_in:\n continue\n arg_typemap = arg.typemap\n if arg_typemap.base == \"vector\":\n # Do not wrap the orignal C function with vector argument.\n # Meaningless to call without the size argument.\n # TODO: add an option where char** length is determined by looking\n # for trailing NULL pointer. { \"foo\", \"bar\", NULL };\n node.wrap.c = False\n node.wrap.lua = False # NotImplemented\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n # Add additional argument to hold result.\n # This will allocate a new character variable to hold the\n # results of the C++ function.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n # We've added an argument to fill, use api=buf.\n result_as_string.declarator.metaattrs[\"api\"] = \"buf\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n \n # Fortran function may call C subroutine if string/vector result\n node._PTR_F_C_index = C_new._function_index", "def compile_function(self, function, arguments):", "def get_pytype(self, c_arg, parse_arg):\n if isinstance(c_arg, FunctionAddress):\n return 'O'\n else:\n try:\n return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)]\n except KeyError as e:\n raise NotImplementedError(\"Type not implemented for argument collection : \"+str(type(parse_arg))) from e", "def test_callback_from_c(self):\n source = io.StringIO(\"\"\"\n int add(int x, int y);\n int x(int a) {\n return add(a + 1, 13);\n }\n \"\"\")\n arch = get_current_arch()\n obj = cc(source, arch, debug=True)\n def my_add(x: int, y: int) -> int:\n return x + y + 2\n imports = {\n 'add': my_add\n }\n m = load_obj(obj, imports=imports)\n y = m.x(101)\n self.assertEqual(117, y)", "def fortran_c_wrapper(self) -> str:\n if self.fc_override is not None:\n return self.fc_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\"$F_PREFIX$\", self.f_prefix)\n\n result = ''\n\n # declaration\n in_parameters = self._fc_in_parameters()\n return_type, out_parameters = self._fc_out_parameters()\n if self.may_throw:\n out_parameters.append('int * err_code')\n out_parameters.append('char ** err_msg')\n out_parameters.append('std::size_t * err_msg_len')\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n par_str = ', '.join(in_parameters + out_parameters)\n result += '{} {}({}) {{\\n'.format(return_type, func_name, par_str)\n\n # convert input\n for par in self.params:\n result += '{}'.format(par.fc_convert_input())\n\n # call C++ function and return result\n if self.may_throw:\n result += ' try {\\n'\n result += ' *err_code = 0;\\n'\n result += indent(self._fc_cpp_call(), 4*' ')\n result += indent(self._fc_return(), 4*' ')\n result += ' }\\n'\n for exception, code in error_codes.items():\n if code != 0:\n catch = ''\n catch += 'catch (std::{} const & e) {{\\n'.format(exception)\n catch += ' *err_code = {};\\n'.format(code)\n catch += ' static std::string msg;\\n'\n catch += ' msg = e.what();\\n'\n catch += ' *err_msg = const_cast<char*>(msg.data());\\n'\n catch += ' *err_msg_len = msg.size();\\n'\n catch += '}\\n'\n result += indent(catch, 4*' ')\n result += self._fc_return_default()\n else:\n result += self._fc_cpp_call()\n result += self._fc_return()\n result += '}\\n\\n'\n return result", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def itkSingleValuedCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def dispatchMacroEnvFunction(self, tree, tree_parent):\n cpp_func_name = \"getMacroProperty\"\n py_func = tree.attr\n # extract type from function name\n py_type = py_func[len(cpp_func_name):]\n if py_type not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_type}' is not a valid FLAME GPU type\")\n # get cpp type\n t = self._fgpu_types[py_type]\n cpp_func_name += f\"<{t}\"\n # mess with the parent to extract (and remove arguments so they dont end up in the argument list)\n if not tree_parent.args :\n self.RaiseError(tree, f\" Macro environment function '{py_func}' is expected to have some arguments.\")\n # if more than one arg then the rest are bounds to translate\n if len(tree_parent.args) > 1:\n bounds = tree_parent.args[1:]\n # process bounds by appending to cpp function template arguments\n for i in bounds:\n if isinstance(i, ast.Num): # num required for python 3.7\n if not isinstance(i.n, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.n}\"\n else: # all Python > 3.7 \n if not isinstance(i, ast.Constant):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).\")\n if not isinstance(i.value, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.value}\"\n # remove bounds from argument list (in place)\n del tree_parent.args[1:]\n cpp_func_name += \">\"\n self.write(cpp_func_name)", "def itkCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def _make_array(self, c):\n return (c * ctypes.py_object)()", "def make_get_python_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_LANG_python_TYPE_* {}_get_out_struct(){{\n return &___madz_LANG_python_OUTPUT;\n}}\n\n\"\"\"\n return res.format(self.python_mangle)", "def create_checked_function():\n\n ffi = cffi.FFI()\n ffi.cdef(\"\"\"\nint overhead(int32_t* list, size_t num, char* utf8, int* error);\n\"\"\")\n c = ffi.dlopen(\"./liboverhead/liboverhead.so\")\n overhead = c.overhead\n\n error_type = ffi.typeof(\"int*\")\n\n def func(list_, text):\n # typecheck/convert text\n if isinstance(text, unicode):\n text = text.encode(\"utf-8\")\n elif text is None:\n text = ffi.NULL\n elif not isinstance(text, str):\n raise TypeError\n\n len_ = len(list_)\n error = ffi.new(error_type)\n result = overhead(list_, len_, text, error)\n\n if not result:\n raise Exception(\"Error occured: %d\" % error[0])\n\n return result\n\n return func", "def adaptPythonToCpp(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptPythonToCpp(self, *args)", "def create_function():\n\n ffi = cffi.FFI()\n ffi.cdef(\"\"\"\nint overhead(int32_t* list, size_t num, char* utf8, int* error);\n\"\"\")\n c = ffi.dlopen(\"./liboverhead/liboverhead.so\")\n overhead = c.overhead\n\n def func(list_, length, text, error):\n return overhead(list_, length, text, error)\n\n return overhead", "def build(self, cres):\n _launch_threads()\n # Build wrapper for ufunc entry point\n ctx = cres.target_context\n library = cres.library\n signature = cres.signature\n llvm_func = library.get_function(cres.fndesc.llvm_func_name)\n wrapper, env = build_gufunc_wrapper(library, ctx, llvm_func,\n signature, self.sin, self.sout,\n fndesc=cres.fndesc,\n env=cres.environment)\n\n ptr = library.get_pointer_to_function(wrapper.name)\n\n # Get dtypes\n dtypenums = []\n for a in signature.args:\n if isinstance(a, types.Array):\n ty = a.dtype\n else:\n ty = a\n dtypenums.append(as_dtype(ty).num)\n\n return dtypenums, ptr, env", "def _PythonToCtype(data, c_type):\n if c_type is actuator_util.Vec3:\n # Handle Vec3.\n assert len(data) == 3\n c_data = c_type()\n c_data.x = data[0]\n c_data.y = data[1]\n c_data.z = data[2]\n return c_data\n elif hasattr(c_type, '_length_'):\n # Handle arrays.\n length = getattr(c_type, '_length_')\n assert len(data) == length\n\n c_data = c_type()\n for i in range(length):\n c_data[i] = _PythonToCtype(data[i], getattr(c_type, '_type_'))\n\n elif hasattr(c_type, '_fields_'):\n # Handle structures.\n fields = autogen_util.GetCFields(c_type)\n assert set(data.keys()) == {field for field, _ in fields}\n\n c_data = c_type()\n for field, field_type in fields:\n setattr(c_data, field, _PythonToCtype(data[field], field_type))\n\n else:\n c_data = c_type(data)\n\n return c_data", "def get_C_code(self, C_function_name):\n from cascada.bitvector.printing import BvCCodePrinter\n\n width2type = BvCCodePrinter._width2C_type\n\n # in C, * binds to the declarator, not the type specifier\n input_vars_c = ', '.join([\"{} {}\".format(width2type(v.width), v.name) for v in self.input_vars])\n output_vars_c = ', '.join([\"{} *{}\".format(width2type(v.width), v.name) for v in self.output_vars])\n if self.external_vars:\n external_vars_c = ', '.join([\"{} {}\".format(width2type(v.width), v.name) for v in self.external_vars])\n external_vars_c = external_vars_c + \", \"\n else:\n external_vars_c = \"\"\n\n aux = f\"void {C_function_name}({input_vars_c}, {external_vars_c}{output_vars_c})\"\n header = f\"{aux};\"\n body = f\"#include <stdint.h>\\n{aux}{{\" # stdint for uint_*\n\n outvar2outvar_c = {v: core.Variable(\"*\" + v.name, v.width, allowed_symbols=\"*\") for v in self.output_vars}\n\n def primary_assignment2C_code(my_var, my_expr):\n assert isinstance(my_expr, (core.Constant, core.Variable, operation.PrimaryOperation))\n if my_var in self.output_vars:\n return f\"*{my_var} = {my_expr.crepr()};\"\n else:\n return f\"{width2type(my_var.width)} {my_var} = {my_expr.crepr()};\"\n\n for var, expr in self.assignments.items():\n expr = expr.xreplace(outvar2outvar_c)\n if isinstance(expr, operation.SecondaryOperation):\n expr = expr.doit(eval_sec_ops=True)\n body += f\"\\n\\t{primary_assignment2C_code(var, expr)}\"\n body += \"\\n};\"\n\n return header, body", "def make_func_code(params):\n class FuncCode(object):\n __slots__ = ('co_varnames', 'co_argcount')\n fc = FuncCode()\n fc.co_varnames = params\n fc.co_argcount = len(params)\n return fc", "def cast(*args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_cast(*args)", "def call_ccall(x):\n ret = c_call(x)\n return ret, cython.typeof(ret)", "def call_cdef_inline(x):\n ret = cdef_inline(x)\n return ret, cython.typeof(ret)", "def _build_comute_argtype(num_nd, num_nd_write):\n ret = [_xc_func_p, ctypes.c_size_t]\n ret += [_ndptr] * num_nd\n ret += [_ndptr_w] * num_nd_write\n return tuple(ret)", "def adaptCorbaToCpp(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptCorbaToCpp(self, *args)", "def cython_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cfs = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n cf = self.cython_functionname(x)[1]\n elif argkind is Arg.LIT:\n cf = self.cython_literal(x)\n elif argkind is Arg.VAR:\n cf = x\n elif isinstance(x, Number):\n cf = self.cython_literal(x)\n else:\n try:\n cf = self.cython_functionname(x)[1] # guess type\n except TypeError:\n cf = x # guess variable\n cfs.append(cf)\n fname += '' if 0 == len(cfs) else \"_\" + \"_\".join(cfs)\n return fname", "def result_as_arg(self, node, C_new):\n F_new = C_new.clone()\n\n # Fortran function should wrap the new C function\n F_new._PTR_F_C_index = C_new._function_index\n F_new.wrap.assign(fortran=True)\n # Do not add '_bufferify'\n F_new.fmtdict.function_suffix = node.fmtdict.function_suffix\n\n # Do not wrap original function (does not have result argument)\n node.wrap.fortran = False\n return F_new", "def build_func_body(func_name, arg_dict, return_type):\n body = \"\"\n arg_list = \"\"\n\n # the following are pointers to scalar outputs\n # Note: pBufferSize was renamed pBufferSizeInBytes in v6.5\n scalar_ptr_outputs = ['nnzTotalDevHostPtr',\n 'pBufferSize',\n 'pBufferSizeInBytes',\n 'resultDevHostPtr']\n\n is_creator = 'cusparseCreate' in func_name\n is_getter = 'cusparseGet' in func_name\n\n if return_type == 'cusparseStatus_t' and not (is_creator or is_getter):\n is_return = False\n else:\n is_return = True\n\n # else:\n return_str = ''\n for k, v in arg_dict.items():\n\n \"\"\"\n set some flags based on the name/type of the argument\n will use these flags to determine whether and how to call ffi.new or\n ffi.cast on each variable\n \"\"\"\n is_ptr = '*' in v\n is_cusparse_type = '_t' in v\n is_cusparse_ptr = is_ptr and is_cusparse_type\n is_output_scalar = k in scalar_ptr_outputs\n if k in ['alpha', 'beta']:\n is_scalar = True\n else:\n is_scalar = False\n if is_getter:\n is_gpu_array = False\n else:\n is_gpu_array = is_ptr and (not is_cusparse_ptr) and (not is_scalar)\n if 'Complex' in v:\n is_complex = True\n else:\n is_complex = False\n\n # convert variable to appropriate type for the FFI\n if is_output_scalar:\n # for scalar outputs make a new pointer\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_getter and is_ptr and (return_type == 'cusparseStatus_t'):\n # any pointers in cusparseGet* are new outputs to be created\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n elif is_gpu_array:\n # pass pointer to GPU array data (use either .ptr or .gpudata)\n body += \"%s = ffi.cast('%s', %s.ptr)\\n\" % (k, v, k)\n elif is_cusparse_ptr:\n if is_creator:\n # generate custom cusparse type\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n else:\n # cast to the custom cusparse type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_ptr and is_scalar:\n # create new pointer, with value initialized to scalar\n if is_complex:\n # complex case is a bit tricky. requires ffi.buffer\n body += \"%sffi = ffi.new('%s')\\n\" % (k, v)\n if 'cusparseC' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex64(%s).tostring()\\n\" % (k, k)\n elif 'cusparseZ' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex128(%s).tostring()\\n\" % (k, k)\n else:\n body += \"%s = ffi.new('%s', %s)\\n\" % (k, v, k)\n elif is_ptr or v == 'cudaStream_t':\n # case non-scalar pointer to appropriate type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n else:\n # don't need explicit cast for plain int, float, etc\n pass\n\n # build the list of arguments to pass to the API\n if is_ptr and is_scalar and is_complex:\n # take into account modified argument name for complex scalars\n arg_list += \"%sffi, \" % k\n else:\n arg_list += \"%s, \" % k\n\n # add the function call and optionally return the result\n last_key = k\n arg_list = arg_list[:-2] # remove trailing \", \"\n if is_getter and return_type != 'cusparseStatus_t':\n body += \"return ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n else:\n # check cusparseStatus_t state before returning\n call_str = \"status = ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n body += split_line(call_str, break_pattern=', ', nmax=76)\n body += \"cusparseCheckStatus(status)\\n\"\n if is_return:\n # len(arg_dict) == 2) is to avoid return for cusparseGetLevelInfo\n if is_creator or (is_getter and (len(arg_dict) == 2)):\n body += \"return %s[0]\\n\" % last_key\n else:\n body += \"#TODO: return the appropriate result\"\n body += '\\n\\n'\n return reindent(body, numSpaces=4, lstrip=False)", "def cpp_type_to_python(self, ot: str):\n t = ot\n t = remove_cvref(t)\n t = self._remove_variable_type_prefix(t)\n try:\n return cpp_base_type_to_python(t)\n except KeyError:\n pass\n if is_function_pointer_type(t):\n func = function_pointer_type_info(t)\n args = \",\".join([self.cpp_type_to_python(arg.type) for arg in func.args])\n return f'Callable[[{args}], {self.cpp_type_to_python(func.ret_type)}]'\n\n if is_function_type(t):\n func = function_type_info(t)\n args = \",\".join([self.cpp_type_to_python(arg.type) for arg in func.args])\n return f'Callable[[{args}], {self.cpp_type_to_python(func.ret_type)}]'\n\n if is_pointer_type(t):\n cpp_base = self.resolve_to_basic_type_remove_const(pointer_base(t))\n if is_pointer_type(cpp_base) or is_array_type(cpp_base):\n return f'\"level 2 pointer:{t}\"' # un-convertible: level 2 pointer\n if cpp_base in ARRAY_BASES:\n return ARRAY_BASES[cpp_base]\n return self.cpp_type_to_python(cpp_base)\n if is_array_type(t):\n b = array_base(t)\n if b in ARRAY_BASES: # special case: string array\n return ARRAY_BASES[b]\n base = self.cpp_type_to_python(b)\n return f'List[{base}]'\n if is_tuple_type(t):\n es = tuple_elements(t)\n bases = [self.cpp_type_to_python(i) for i in es]\n bases_str = \",\".join(bases)\n return f'Tuple[{bases_str}]'\n\n # check classes\n objects = self.objects\n if t in objects:\n o = objects[t]\n if isinstance(o, GeneratorClass) or isinstance(o, GeneratorEnum):\n return t.replace(\"::\", \".\").strip(\" .\") # todo fix this\n if isinstance(o, GeneratorTypedef):\n return self.cpp_type_to_python(o.target)\n\n if t.startswith(\"(anonymous\"):\n return f'\"{t}\"'\n\n # this means this is\n logger.warning(\"%s might be an internal symbol, failed to resolve to basic type\", t)\n return t", "def compile(self, args):\n if args not in self._compileinfos:\n cres = compile_with_dppl(self.py_func, None, args, debug=self.debug)\n func = cres.library.get_function(cres.fndesc.llvm_func_name)\n cres.target_context.mark_ocl_device(func)\n first_definition = not self._compileinfos\n self._compileinfos[args] = cres\n libs = [cres.library]\n\n if first_definition:\n # First definition\n cres.target_context.insert_user_function(self, cres.fndesc,\n libs)\n else:\n cres.target_context.add_user_function(self, cres.fndesc, libs)\n\n else:\n cres = self._compileinfos[args]\n\n return cres.signature", "def _create_args(self, func_args):\n self.llvm_ret_type = self._from_ctype(self.signature.ret_type)\n self.llvm_arg_types = \\\n [self._from_ctype(a) for a in self.signature.arg_ctypes]", "def get_func_type(self, *args):\n return _ida_hexrays.cfunc_t_get_func_type(self, *args)", "def cpp_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cts = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n ct = self.cpp_type(x)\n elif argkind is Arg.LIT:\n ct = self.cpp_literal(x)\n elif isinstance(x, Number):\n ct = self.cpp_literal(x)\n else:\n try:\n ct = self.cpp_type(x) # guess it is a type\n except TypeError:\n ct = x # guess it is a variable\n cts.append(ct)\n fname += '' if 0 == len(cts) else \"< \" + \", \".join(cts) + \" >\"\n return fname", "def itkMultipleValuedCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_cast(*args)", "def get_func_type(self, *args):\n return _ida_hexrays.cfuncptr_t_get_func_type(self, *args)", "def cast(*args):\n return _ITKCostFunctionsPython.itkCumulativeGaussianCostFunction_cast(*args)", "def cfunc_type(self):\n tif = ida_typeinf.tinfo_t()\n result = self.get_func_type(tif)\n if not result:\n return\n return tif", "def _llvm_jit_code(args, expr, signature, callback_type):\n if callback_type is None:\n jit = LLVMJitCode(signature)\n else:\n jit = LLVMJitCodeCallback(signature)\n\n jit._create_args(args)\n jit._create_function_base()\n jit._create_param_dict(args)\n strmod = jit._create_function(expr)\n if False:\n print(\"LLVM IR\")\n print(strmod)\n fptr = jit._compile_function(strmod)\n return fptr", "def _fc_function_definitions(self) -> str:\n result = 'extern \"C\" {\\n\\n'\n for namespace in self.namespaces:\n for member in namespace.members:\n result += member.fortran_c_wrapper()\n\n result += '}\\n\\n'\n return result", "def _generate_type_caster(\n py_name: str, cpp_name: str, generate_load: bool,\n generate_cast: bool) -> Generator[str, None, None]:\n yield 'namespace pybind11 {'\n yield 'namespace detail {'\n yield f'template <> struct type_caster<{cpp_name}> {{'\n yield ' public:'\n yield I + f'PYBIND11_TYPE_CASTER({cpp_name}, _(\"{py_name}\"));'\n yield ''\n if generate_load:\n yield I + 'bool load(handle src, bool) {'\n yield I + I + 'using ::clif::Clif_PyObjAs;'\n yield I + I + 'return Clif_PyObjAs(src.ptr(), &value);'\n yield I + '}'\n yield ''\n if generate_cast:\n yield I + (f'static handle cast({cpp_name} src, return_value_policy, '\n 'handle) {')\n yield I + I + 'using ::clif::Clif_PyObjFrom;'\n yield I + I + 'return Clif_PyObjFrom(src, {});'\n yield I + '}'\n yield '};'\n yield '} // namespace detail'\n yield '} // namespace pybind11'\n yield ''", "def fptrunc(self, typ):", "def compile_cutils():\r\n\r\n types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',\r\n 'int256', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\r\n 'float16', 'float32', 'float64', 'float80', 'float96', 'float128',\r\n 'float256']]\r\n\r\n complex_types = ['npy_' + t for t in ['complex32', 'complex64',\r\n 'complex128', 'complex160', 'complex192', 'complex512']]\r\n\r\n inplace_map_template = \"\"\"\r\n #if defined(%(typen)s)\r\n static void %(type)s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)\r\n {\r\n int index = mit->size;\r\n while (index--) {\r\n %(op)s\r\n\r\n PyArray_MapIterNext(mit);\r\n PyArray_ITER_NEXT(it);\r\n }\r\n }\r\n #endif\r\n \"\"\"\r\n\r\n floatadd = \"((%(type)s*)mit->dataptr)[0] = ((%(type)s*)mit->dataptr)[0] + ((%(type)s*)it->dataptr)[0];\"\r\n complexadd = \"\"\"\r\n ((%(type)s*)mit->dataptr)[0].real = ((%(type)s*)mit->dataptr)[0].real + ((%(type)s*)it->dataptr)[0].real;\r\n ((%(type)s*)mit->dataptr)[0].imag = ((%(type)s*)mit->dataptr)[0].imag + ((%(type)s*)it->dataptr)[0].imag;\r\n \"\"\"\r\n\r\n fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': floatadd % {'type': t}}\r\n for t in types] +\r\n [inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': complexadd % {'type': t}}\r\n for t in complex_types])\r\n\r\n fn_array = (\"static inplace_map_binop addition_funcs[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(type)s_inplace_add,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"\"\"NULL};\r\n \"\"\")\r\n\r\n type_number_array = (\"static int type_numbers[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(typen)s,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"-1000};\")\r\n\r\n code = (\"\"\"\r\n #include <Python.h>\r\n #include \"numpy/arrayobject.h\"\r\n\r\n extern \"C\"{\r\n static PyObject *\r\n run_cthunk(PyObject *self, PyObject *args)\r\n {\r\n PyObject *py_cthunk = NULL;\r\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\r\n return NULL;\r\n\r\n if (!PyCObject_Check(py_cthunk)) {\r\n PyErr_SetString(PyExc_ValueError,\r\n \"Argument to run_cthunk must be a PyCObject.\");\r\n return NULL;\r\n }\r\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\r\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\r\n void* it = PyCObject_GetDesc(py_cthunk);\r\n int failure = fn(it);\r\n\r\n return Py_BuildValue(\"i\", failure);\r\n }\r\n\r\n #if NPY_API_VERSION >= 0x00000008\r\n typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);\r\n \"\"\" + fns + fn_array + type_number_array +\r\n\r\n\"\"\"\r\nstatic int\r\nmap_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)\r\n{\r\n PyArrayObject *arr = NULL;\r\n PyArrayIterObject *it;\r\n PyArray_Descr *descr;\r\n if (mit->ait == NULL) {\r\n return -1;\r\n }\r\n descr = PyArray_DESCR(mit->ait->ao);\r\n Py_INCREF(descr);\r\n arr = (PyArrayObject *)PyArray_FromAny(op, descr,\r\n 0, 0, NPY_ARRAY_FORCECAST, NULL);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n if ((mit->subspace != NULL) && (mit->consec)) {\r\n PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n }\r\n it = (PyArrayIterObject*)\r\n PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);\r\n if (it == NULL) {\r\n Py_DECREF(arr);\r\n return -1;\r\n }\r\n\r\n (*add_inplace)(mit, it);\r\n\r\n Py_DECREF(arr);\r\n Py_DECREF(it);\r\n return 0;\r\n}\r\n\r\n\r\nstatic PyObject *\r\ninplace_increment(PyObject *dummy, PyObject *args)\r\n{\r\n PyObject *arg_a = NULL, *index=NULL, *inc=NULL;\r\n PyArrayObject *a;\r\n inplace_map_binop add_inplace = NULL;\r\n int type_number = -1;\r\n int i =0;\r\n PyArrayMapIterObject * mit;\r\n\r\n if (!PyArg_ParseTuple(args, \"OOO\", &arg_a, &index,\r\n &inc)) {\r\n return NULL;\r\n }\r\n if (!PyArray_Check(arg_a)) {\r\n PyErr_SetString(PyExc_ValueError, \"needs an ndarray as first argument\");\r\n return NULL;\r\n }\r\n\r\n a = (PyArrayObject *) arg_a;\r\n\r\n if (PyArray_FailUnlessWriteable(a, \"input/output array\") < 0) {\r\n return NULL;\r\n }\r\n\r\n if (PyArray_NDIM(a) == 0) {\r\n PyErr_SetString(PyExc_IndexError, \"0-d arrays can't be indexed.\");\r\n return NULL;\r\n }\r\n type_number = PyArray_TYPE(a);\r\n\r\n\r\n\r\n while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){\r\n if (type_number == type_numbers[i]) {\r\n add_inplace = addition_funcs[i];\r\n break;\r\n }\r\n i++ ;\r\n }\r\n\r\n if (add_inplace == NULL) {\r\n PyErr_SetString(PyExc_TypeError, \"unsupported type for a\");\r\n return NULL;\r\n }\r\n mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);\r\n if (mit == NULL) {\r\n goto fail;\r\n }\r\n if (map_increment(mit, inc, add_inplace) != 0) {\r\n goto fail;\r\n }\r\n\r\n Py_DECREF(mit);\r\n\r\n Py_INCREF(Py_None);\r\n return Py_None;\r\n\r\nfail:\r\n Py_XDECREF(mit);\r\n\r\n return NULL;\r\n}\r\n #endif\r\n\r\n\r\n static PyMethodDef CutilsExtMethods[] = {\r\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\r\n \"Run a theano cthunk.\"},\r\n #if NPY_API_VERSION >= 0x00000008\r\n {\"inplace_increment\", inplace_increment,\r\n METH_VARARGS,\r\n \"increments a numpy array inplace at the passed indexes.\"},\r\n #endif\r\n {NULL, NULL, 0, NULL} /* Sentinel */\r\n };\"\"\")\r\n\r\n if PY3:\r\n # This is not the most efficient code, but it is written this way to\r\n # highlight the changes needed to make 2.x code compile under python 3.\r\n code = code.replace(\"<Python.h>\", '\"numpy/npy_3kcompat.h\"', 1)\r\n code = code.replace(\"PyCObject\", \"NpyCapsule\")\r\n code += \"\"\"\r\n static struct PyModuleDef moduledef = {\r\n PyModuleDef_HEAD_INIT,\r\n \"cutils_ext\",\r\n NULL,\r\n -1,\r\n CutilsExtMethods,\r\n };\r\n\r\n PyMODINIT_FUNC\r\n PyInit_cutils_ext(void) {\r\n import_array();\r\n return PyModule_Create(&moduledef);\r\n }\r\n }\r\n \"\"\"\r\n else:\r\n code += \"\"\"\r\n PyMODINIT_FUNC\r\n initcutils_ext(void)\r\n {\r\n import_array();\r\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\r\n }\r\n } //extern C\r\n \"\"\"\r\n\r\n loc = os.path.join(config.compiledir, 'cutils_ext')\r\n if not os.path.exists(loc):\r\n os.mkdir(loc)\r\n\r\n args = cmodule.GCC_compiler.compile_args()\r\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\r\n preargs=args)", "def boost_initialization():\n global Lib_c \n Lib_c = ctypes.CDLL('./integral_function.so')\n Lib_c.set.restype = None\n Lib_c.set.argtypes = (ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)\n Lib_c.set_target.restype = None\n Lib_c.set_target.argtypes = (ctypes.c_int,)\n Lib_c.function.restype = ctypes.c_double\n Lib_c.function.argtypes = (ctypes.c_int,ctypes.c_double)", "def _compile_C_code(header, body, return_unloaded=False, verbose=False):\n import importlib\n import tempfile\n import uuid\n\n import cffi\n\n module_name = \"module_\" + uuid.uuid4().hex\n\n if \"__uint128\" in header:\n raise ValueError(\"_compile_C_code does not support bit-vector widths \"\n \"larger than 64 bits (cffi does not support __uint128)\")\n\n ffibuilder = cffi.FFI()\n ffibuilder.cdef(header)\n ffibuilder.set_source(module_name, body)\n\n tmpdir = tempfile.TemporaryDirectory()\n lib_path = ffibuilder.compile(tmpdir=tmpdir.name, verbose=verbose)\n\n if return_unloaded:\n return lib_path, module_name, tmpdir\n\n # dynamic import\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n spec = importlib.util.spec_from_file_location(module_name, lib_path)\n pymod_parent = importlib.util.module_from_spec(spec)\n # sys.modules[module_name] = module\n spec.loader.exec_module(pymod_parent)\n\n pymod = pymod_parent\n\n return pymod, tmpdir", "def translate_to_c(Newast):\n ast = parse_file('exampleMin.c', use_cpp=True)\n\n ast.show()\n #print(\"newast: \", Newast.ext[0].decl.type.args.params[0].type.type==ast.ext[0].decl.type.args.params[0].type.type)\n #print(\"newast2: \", Newast.ext[0].decl.type.args.params[0].type.type.coord)\n #print(\"ast2: \", ast.ext[0].decl.type.args.params[0].type.type.coord)\n\n #Newast.show()\n \n # print(ast.ext[0].decl.bitsize)\n # print(Newast.ext[0].decl.bitsize)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.coord)\n # print(Newast.ext[0].decl.type.args.coord)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params)\n # print(Newast.ext[0].decl.type.args.params)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0])\n # print(Newast.ext[0].decl.type.args.params[0])\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type)\n # print(Newast.ext[0].decl.type.args.params[0].type)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type.type)\n # print(Newast.ext[0].decl.type.args.params[0].type.type)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type.type.names)\n # print(Newast.ext[0].decl.type.args.params[0].type.type.names)\n # print(\"----------------------------------\")\n\n generator = c_generator.CGenerator()\n #ast.show()\n\n # tracing the generator for debugging\n # import trace\n # tr = trace.Trace(countcallers=1)\n # tr.runfunc(generator.visit, Newast)\n # tr.results().write_results()\n\n print(generator.visit(Newast))", "def make_wrapper(fname, atypes, rtype, cres):\n fndesc = cres.fndesc\n module = cres.library.create_ir_module(fndesc.unique_name)\n context = cres.target_context\n ll_argtypes = [context.get_value_type(ty) for ty in atypes]\n ll_return_type = context.get_value_type(rtype)\n\n # TODO: design a API for custom wrapping\n if type(rtype).__name__ == 'ArrayPointer':\n wrapty = ir.FunctionType(ir.VoidType(),\n [ll_return_type] + ll_argtypes)\n wrapfn = module.add_function(wrapty, fname)\n builder = ir.IRBuilder(wrapfn.append_basic_block('entry'))\n fnty = context.call_conv.get_function_type(rtype, atypes)\n fn = builder.module.add_function(fnty, cres.fndesc.llvm_func_name)\n status, out = context.call_conv.call_function(\n builder, fn, rtype, atypes, wrapfn.args[1:])\n with cgutils.if_unlikely(builder, status.is_error):\n cgutils.printf(builder,\n f\"rbc: {fname} failed with status code %i\\n\",\n status.code)\n builder.ret_void()\n builder.store(builder.load(out), wrapfn.args[0])\n builder.ret_void()\n else:\n wrapty = ir.FunctionType(ll_return_type, ll_argtypes)\n wrapfn = module.add_function(wrapty, fname)\n builder = ir.IRBuilder(wrapfn.append_basic_block('entry'))\n fnty = context.call_conv.get_function_type(rtype, atypes)\n fn = builder.module.add_function(fnty, cres.fndesc.llvm_func_name)\n status, out = context.call_conv.call_function(\n builder, fn, rtype, atypes, wrapfn.args)\n with cgutils.if_unlikely(builder, status.is_error):\n cgutils.printf(builder,\n f\"rbc: {fname} failed with status code %i\\n\",\n status.code)\n builder.ret(out)\n\n cres.library.add_ir_module(module)", "def adaptPythonToCorba(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptPythonToCorba(self, *args)", "def _deviceVariableFunctionName(self, tree, permitted_prefixes, allow_lengths = True):\n cpp_func_name = \"\"\n py_func = tree.attr\n # extract function name start\n for prefix in permitted_prefixes:\n if py_func.startswith(prefix):\n cpp_func_name = prefix\n py_func = py_func[len(prefix):]\n break # dont allow the else\n else:\n return None\n # check type and lengths\n if allow_lengths:\n #split to get type and Array Length (This could **potentially** be looked up from the model description but current syntax is consistent with swig bindings) \n type_and_length = py_func.split(\"Array\")\n if type_and_length[0] not in self._fgpu_types:\n self.RaiseError(tree, f\"'{type_and_length[0]}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[type_and_length[0]]\n # generate template args\n if (len(type_and_length) == 1):\n cpp_func_name += f\"<{t}>\"\n elif (len(type_and_length) == 2):\n cpp_func_name += f\"<{t}, {type_and_length[1]}>\"\n else:\n return None\n else:\n if py_func not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_func}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[py_func]\n cpp_func_name += f\"<{t}>\"\n # return \n return cpp_func_name", "def parse_capi(lines):\n pattern = r'(\\w+)\\s+(\\**)\\s*(\\w+)\\((.*)\\)' # Float32 *sin(...)\n pexcept = r'except (\\??)(.*)'\n\n functions = []\n for line in lines:\n if line.strip():\n m = re.match(pattern, line)\n restype, stars, fname, argtypes = m.groups()\n rest = line[len(m.group(0)):].strip()\n if rest:\n maybe, badval = re.match(pexcept, rest).groups()\n else:\n maybe, badval = None, None\n\n restype = parse_type(\"%s %s\" % (restype, \" \".join(stars)))\n argtypes = map(parse_type, argtypes.split(','))\n signature = Function(restype, argtypes)\n functions.append(Py_Function(fname, signature, maybe, badval))\n\n return functions", "def cython_py2c(self, name, t, inst_name=None, proxy_name=None):\n t = self.canon(t)\n if isinstance(t, basestring) or 0 == t[-1] or self.isrefinement(t[-1]):\n last = ''\n elif isinstance(t[-1], int):\n last = ' [{0}]'.format(t[-1])\n else:\n last = ' ' + t[-1]\n tkey = t\n tinst = None\n while tkey not in self.cython_py2c_conv and not isinstance(tkey, basestring):\n tinst = tkey\n tkey = tkey[1] if (0 < len(tkey) and self.isrefinement(tkey[1])) else tkey[0]\n if tkey not in self.cython_py2c_conv:\n tkey = t\n while tkey not in self.cython_py2c_conv and \\\n not isinstance(tkey, basestring):\n tkey = tkey[0]\n py2ct = self.cython_py2c_conv[tkey]\n if callable(py2ct):\n self.cython_py2c_conv[t] = py2ct(t, self)\n py2ct = self.cython_py2c_conv[t]\n if py2ct is NotImplemented or py2ct is None:\n raise NotImplementedError('conversion from Python to C/C++ for ' + \\\n str(t) + ' has not been implemented.')\n body_template, rtn_template = py2ct\n var = name if inst_name is None else \"{0}.{1}\".format(inst_name, name)\n proxy_name = \"{0}_proxy\".format(name) if proxy_name is None else proxy_name\n tstr = self.typestr(t, self)\n template_kw = dict(var=var, proxy_name=proxy_name, last=last, t=tstr)\n nested = False\n if self.isdependent(tkey):\n tsig = [ts for ts in self.refined_types if ts[0] == tkey][0]\n for ts, ti in zip(tsig[1:], tinst[1:]):\n if isinstance(ts, basestring):\n template_kw[ts] = self.cython_ctype(ti)\n else:\n template_kw[ti[0]] = ti[2]\n vartype = self.refined_types[tsig]\n if vartype in tsig[1:]:\n vartype = tinst[tsig.index(vartype)][1]\n if self.isrefinement(vartype):\n nested = True\n vdecl, vbody, vrtn = self.cython_py2c(var, vartype)\n template_kw['var'] = vrtn\n body_filled = body_template.format(**template_kw)\n if rtn_template:\n if '{t.cython_ctype}'in body_template:\n deft = tstr.cython_ctype\n elif '{t.cython_ctype_nopred}'in body_template:\n deft = tstr.cython_ctype_nopred\n elif '{t.cython_cytype_nopred}'in body_template:\n deft = tstr.cython_cytype_nopred\n else:\n deft = tstr.cython_cytype\n decl = \"cdef {0} {1}\".format(deft, proxy_name)\n body = body_filled\n rtn = rtn_template.format(**template_kw)\n decl += '\\n'+\"\\n\".join([l for l in body.splitlines() \\\n if l.startswith('cdef')])\n body = \"\\n\".join([l for l in body.splitlines() \\\n if not l.startswith('cdef')])\n else:\n decl = body = None\n rtn = body_filled\n if nested:\n decl = '' if decl is None else decl\n vdecl = '' if vdecl is None else vdecl\n decl = (vdecl + '\\n' + decl).strip()\n decl = None if 0 == len(decl) else decl\n body = '' if body is None else body\n vbody = '' if vbody is None else vbody\n body = (vbody + '\\n' + body).strip()\n body = None if 0 == len(body) else body\n return decl, body, rtn", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def c_code(self, node, name, (x, y), (z, ), sub):\r\n # raise NotImplementedError(\"Unlike Python, C's modulo returns negative\r\n # modulo on negative dividend (to implement)\")\r\n t = node.inputs[0].type.upcast(*[i.type for i in node.inputs[1:]])\r\n if (str(t) in imap(str, discrete_types) or\r\n t in ['uint8', 'int8', 'uint16', 'int16'] or\r\n t in ['uint32', 'int32', 'uint64', 'int64'] or\r\n t in discrete_types):\r\n # The above or's should not be needed anymore. However, for now we\r\n # keep them out of safety, and verify they are useless with an\r\n # assert.\r\n assert str(t) in imap(str, discrete_types)\r\n x_mod_y = \"THEANO_MACRO_MOD(%(x)s, %(y)s)\" % locals()\r\n x_mod_ymm = \"THEANO_MACRO_MOD(-%(x)s, -%(y)s)\" % locals()\r\n x_mod_ypm = \"THEANO_MACRO_MOD(%(x)s, -%(y)s)\" % locals()\r\n x_mod_ymp = \"THEANO_MACRO_MOD(-%(x)s, %(y)s)\" % locals()\r\n elif (str(t) in imap(str, float_types) or\r\n t in ['float32', 'float64'] or\r\n t in float_types):\r\n # The above or's should not be needed anymore. However, for now we\r\n # keep them out of safety, and verify they are useless with an\r\n # assert.\r\n assert str(t) in imap(str, float_types)\r\n x_mod_y = \"fmod(%(x)s,%(y)s)\" % locals()\r\n x_mod_ymm = \"fmod(-%(x)s,-%(y)s)\" % locals()\r\n x_mod_ypm = \"fmod(%(x)s,-%(y)s)\" % locals()\r\n x_mod_ymp = \"fmod(-%(x)s,%(y)s)\" % locals()\r\n elif str(t) in imap(str, complex_types):\r\n raise self.complex_error\r\n else:\r\n raise NotImplementedError('type not supported', t)\r\n\r\n return dedent(\"\"\"\r\n if (%(x)s < 0){\r\n if (%(y)s < 0){\r\n %(z)s = -(%(x_mod_ymm)s);\r\n }else{\r\n %(z)s = - %(x_mod_ymp)s + (%(x_mod_ymp)s != 0 ? %(y)s : 0);\r\n }\r\n }else if (%(y)s < 0){\r\n %(z)s = (%(x_mod_ypm)s) + (%(x_mod_ypm)s != 0 ? %(y)s : 0);\r\n }else{\r\n %(z)s = %(x_mod_y)s;\r\n }\r\n \"\"\") % locals()", "def generate_code(spn_id, spn, meta_types, floating_data_type):\r\n\r\n # make sure we have ids\r\n assign_ids(spn)\r\n\r\n # fill method body according to SPN structure\r\n method_body = generate_method_body(spn, spn, floating_data_type, 0)\r\n\r\n # build parameters used in generated c++ function\r\n method_params = []\r\n passed_params = []\r\n for i, type in enumerate(meta_types):\r\n if type == MetaType.DISCRETE:\r\n method_params += [f'vector <int> possibleValues{i}', f'int nullValueIdx{i}']\r\n passed_params += [f'py::arg(\"possibleValues{i}\")', f'py::arg(\"nullValueIdx{i}\")']\r\n elif type == MetaType.REAL:\r\n method_params += [f'bool inverse{i}', f'bool leftMinusInf{i}', f'float leftCondition{i}',\r\n f'bool rightMinusInf{i}', f'float rightCondition{i}', f'bool leftIncluded{i}',\r\n f'bool rightIncluded{i}', f'float nullValue{i}']\r\n passed_params += [f'py::arg(\"inverse{i}\")', f'py::arg(\"leftMinusInf{i}\")', f'py::arg(\"leftCondition{i}\")',\r\n f'py::arg(\"rightMinusInf{i}\")', f'py::arg(\"rightCondition{i}\")',\r\n f'py::arg(\"leftIncluded{i}\")', f'py::arg(\"rightIncluded{i}\")', f'py::arg(\"nullValue{i}\")']\r\n\r\n value_dictionary = {\r\n 'spn_id': spn_id,\r\n 'method_body': method_body,\r\n 'method_params': ', '.join(method_params),\r\n 'node_count': get_number_of_nodes(spn),\r\n 'passed_params': ', '.join(passed_params),\r\n 'floating_data_type': floating_data_type\r\n }\r\n generated_method = replace_template(TemplatePath.METHOD_MASTER, value_dictionary, 0)\r\n registrate_method = replace_template(TemplatePath.REGISTRATION_MASTER, value_dictionary, 0)\r\n\r\n return generated_method, registrate_method", "def t_CCONST(t):\n return t", "def adaptCorbaToPython(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptCorbaToPython(self, *args)", "def c_code(self, node, name, (a_val, a_ind, a_ptr, b), (z,), sub):\r\n # retrieve dtype number\r\n typenum_z = tensor.TensorType(self.dtype_out, []).dtype_specs()[2]\r\n if node.inputs[0].type.dtype in ('complex64', 'complex128'):\r\n raise NotImplementedError('Complex types are not supported for a_val')\r\n if node.inputs[3].type.dtype in ('complex64', 'complex128'):\r\n raise NotImplementedError('Complex types are not supported for b')\r\n\r\n return \"\"\"\r\n if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_val) != 1\"); %(fail)s;}\r\n if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ind) != 1\"); %(fail)s;}\r\n if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ptr) != 1\"); %(fail)s;}\r\n if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 2\"); %(fail)s;}\r\n\r\n if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {\r\n PyErr_SetString(PyExc_NotImplementedError, \"a_ind dtype not INT32\"); %(fail)s;}\r\n\r\n if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)\r\n {PyErr_SetString(PyExc_NotImplementedError, \"a_ptr dtype not INT32\"); %(fail)s;}\r\n\r\n if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])\r\n {PyErr_SetString(PyExc_NotImplementedError, \"a_val and a_ind have different lengths\"); %(fail)s;}\r\n\r\n if ((!%(z)s)\r\n || (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(a_ptr)s)[0]-1) //a's rows\r\n || (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1]) //b's columns\r\n )\r\n {\r\n {Py_XDECREF(%(z)s);}\r\n npy_intp dims[] = {0, 0};\r\n dims[0] = PyArray_DIMS(%(a_ptr)s)[0]-1;\r\n dims[1] = PyArray_DIMS(%(b)s)[1];\r\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_z)s);\r\n }\r\n\r\n {\r\n // sparse array has size MxK, dense KxN, output MxN\r\n npy_intp M = PyArray_DIMS(%(z)s)[0];\r\n npy_intp N = PyArray_DIMS(%(z)s)[1];\r\n npy_intp K = PyArray_DIMS(%(b)s)[0];\r\n\r\n // strides tell you how many bytes to skip to go to next column/row entry\r\n npy_intp Szm = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;\r\n npy_intp Szn = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;\r\n npy_intp Sbm = PyArray_STRIDES(%(b)s)[0] / PyArray_DESCR(%(b)s)->elsize;\r\n npy_intp Sbn = PyArray_STRIDES(%(b)s)[1] / PyArray_DESCR(%(b)s)->elsize;\r\n npy_intp Sval = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;\r\n npy_intp Sind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;\r\n npy_intp Sptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;\r\n\r\n // pointers to access actual data in the arrays passed as params.\r\n dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);\r\n const dtype_%(a_val)s* __restrict__ Dval = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);\r\n const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(a_ind)s);\r\n const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);\r\n\r\n //npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];\r\n\r\n //clear the output array\r\n memset(Dz, 0, M*N*sizeof(dtype_%(z)s));\r\n\r\n //iterate over the sparse array, making the most of an entry wherever we find it.\r\n // Normal matrix matrix multiply:\r\n // for m\r\n // for n\r\n // for k\r\n // z[m, n] += a[m, k] * b[k, n]\r\n // Here instead:\r\n // for m\r\n // for k (sparse)\r\n // for n\r\n // z[m, n] += a[m, k] * b[k, n]\r\n\r\n // loop over inner dimension\r\n for (npy_int64 m = 0; m < M; ++m)\r\n {\r\n // pointer to m-th row of the output matrix Z\r\n dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(PyArray_BYTES(%(z)s) + PyArray_STRIDES(%(z)s)[0] * m);\r\n\r\n // loop over sparse rows indices through index pointer array\r\n // (amounts to looping over cols k of sparse matrix)\r\n for (npy_int32 k_idx = Dptr[m * Sptr]; k_idx < Dptr[(m+1) * Sptr]; ++k_idx)\r\n {\r\n npy_int32 k = Dind[k_idx * Sind]; // col index of non-null value for row m\r\n const dtype_%(a_val)s Amk = Dval[k_idx * Sval]; // actual value at that location\r\n\r\n // get pointer to k-th row of dense matrix\r\n const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(PyArray_BYTES(%(b)s) + PyArray_STRIDES(%(b)s)[0] * k);\r\n\r\n // loop over final dimension (cols of dense matrix) and perform dot product\r\n for(npy_int32 n = 0; n < N; ++n)\r\n {\r\n zm[n*Szn] += Amk * bk[n*Sbn];\r\n }\r\n }\r\n }\r\n }\r\n\r\n \"\"\" % dict(locals(), **sub)", "def load_c_functions(self):\n\n # Load shared object\n lib = ctypes.cdll.LoadLibrary(os.path.join(self.working_directory,\"models/doubly_constrained/flow_forward_models.so\"))\n lib2 = ctypes.cdll.LoadLibrary(os.path.join(self.working_directory,\"models/doubly_constrained/potential_function.so\"))\n\n # Load DSF procedure flow inference\n self.infer_flows_dsf_procedure = lib.infer_flows_dsf_procedure\n self.infer_flows_dsf_procedure.restype = ctypes.c_double\n self.infer_flows_dsf_procedure.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_size_t,\n ctypes.c_bool,\n ctypes.c_bool]\n\n\n # Load Newton Raphson procedure flow inference\n self.infer_flows_newton_raphson = lib.infer_flows_newton_raphson\n self.infer_flows_newton_raphson.restype = None #ctypes.c_double\n self.infer_flows_newton_raphson.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_double,\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_size_t]\n\n # Load Iterative proportional filtering procedure flow inference\n self.infer_flows_ipf_procedure = lib.infer_flows_ipf_procedure\n self.infer_flows_ipf_procedure.restype = ctypes.c_double\n self.infer_flows_ipf_procedure.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_bool]\n\n # Load Iterative proportional filtering procedure flow inference\n self.infer_flows_ipf_procedure_singly = lib.infer_flows_ipf_procedure_singly\n self.infer_flows_ipf_procedure_singly.restype = ctypes.c_double\n self.infer_flows_ipf_procedure_singly.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_bool]\n\n # Load potential function\n self.potential_stochastic = lib2.potential_stochastic\n self.potential_stochastic.restype = ctypes.c_double\n self.potential_stochastic.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t]", "def __call__(fun_name):", "def type_cast(func,data_entry,*args):\n assert isinstance(data_entry,str)\n assert callable(func)\n try:\n out=func(data_entry,*args)\n except:\n out=None\n return out", "def WrapFunction(lib, funcname, restype, argtypes):\n func = lib.__getattr__(funcname)\n func.restype = restype\n func.argtypes = argtypes\n return func", "def function(fnc, *args, **kwargs):\n return Function(fnc, args=args, kwargs=kwargs).tunable()", "def _init_signature(func_name, restype, argtypes):\n global cfi\n f = getattr(cfi, func_name)\n f.restype = restype\n f.argtypes = argtypes", "def test_vulkan_func_pointer_with_const_member() -> None:\n\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <type category=\"funcpointer\">typedef VkBool32 (VKAPI_PTR *\n <name>PFN_vkDebugReportCallbackEXT</name>)(\n <type>VkDebugReportFlagsEXT</type> flags,\n <type>VkDebugReportObjectTypeEXT</type> objectType,\n <type>uint64_t</type> object,\n <type>size_t</type> location,\n <type>int32_t</type> messageCode,\n const <type>char</type>* pLayerPrefix,\n const <type>char</type>* pMessage,\n <type>void</type>* pUserData);</type>\n \"\"\"\n\n funcptr = funcptr_parser.parse(ET.fromstring(xml))\n\n argument_names = list(funcptr.arguments.keys())\n assert argument_names[4] == \"messageCode\"\n assert funcptr.arguments[\"pLayerPrefix\"].argument_type == \"const char*\"", "def __init__(self, name, c_arg):\n super().__init__(name)\n self._c_arg = c_arg", "def _cmplx_factory_ ( cmplxt , re , im ) :\n return cmplxt ( re , im )", "def __init__(self, compiler_module, function_name, type_signature):\n py_typecheck.check_type(compiler_module,\n iree_compiler.binding.CompilerModule)\n py_typecheck.check_type(function_name, str)\n py_typecheck.check_type(type_signature, computation_types.FunctionType)\n self._compiler_module = compiler_module\n self._function_name = function_name\n self._type_signature = type_signature", "def _wrap_FunctionDef(self, expr):\n if expr.is_private:\n return EmptyNode()\n\n name = self.scope.get_new_name(f'bind_c_{expr.name.lower()}')\n self._wrapper_names_dict[expr.name] = name\n\n # Create the scope\n func_scope = self.scope.new_child_scope(name)\n self.scope = func_scope\n\n self._additional_exprs = []\n\n if any(isinstance(a.var, FunctionAddress) for a in expr.arguments):\n warnings.warn(\"Functions with functions as arguments cannot be wrapped by pyccel\")\n return EmptyNode()\n\n # Wrap the arguments and collect the expressions passed as the call argument.\n func_arguments = [self._wrap(a) for a in expr.arguments]\n call_arguments = [self._get_call_argument(fa) for fa in func_arguments]\n func_to_call = {fa : ca for ca, fa in zip(call_arguments, func_arguments)}\n\n func_results = [self._wrap_FunctionDefResult(r) for r in expr.results]\n\n func_call_results = [r.var.clone(self.scope.get_expected_name(r.var.name)) for r in expr.results]\n\n body = self._get_function_def_body(expr, func_arguments, func_to_call, func_call_results)\n\n body.extend(self._additional_exprs)\n self._additional_exprs.clear()\n\n self.exit_scope()\n\n func = BindCFunctionDef(name, func_arguments, func_results, body, scope=func_scope, original_function = expr,\n doc_string = expr.doc_string)\n\n self.scope.functions[name] = func\n\n return func", "def to_PyMethodDef_entry(items):\r\n\r\n entry_type = items[0]\r\n items = items[1:]\r\n if entry_type == 'method':\r\n return 'FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'function':\r\n return 'FREE_FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'method_template':\r\n return 'FORWARDER(%s<common_type>, %s, \"%s\", %s)' % items\r\n else:\r\n assert False", "def cast(*args):\n return _itkEdgePotentialImageFilterPython.itkEdgePotentialImageFilterICVF33ID3_cast(*args)", "def transform_npu_function(self, _, func: relay.Function) -> relay.Function:\n\n tir_mod, const_dict = _lower_to_tir(func, self.scheduler)\n\n for param in const_dict.keys():\n const_dict[param] = tvm.nd.array(const_dict[param])\n\n compiler_name = \"ethos-u\"\n primfunc = tir_mod[\"main\"]\n primfunc = primfunc.with_attr(\"global_symbol\", func.attrs[\"global_symbol\"])\n primfunc = primfunc.with_attr(\"ethos-u.constants\", const_dict)\n primfunc = primfunc.with_attr(\"target\", tvm.target.Target(compiler_name))\n return primfunc", "def cppdef(src):\n with _stderr_capture() as err:\n errcode = gbl.gInterpreter.Declare(src)\n if not errcode:\n raise SyntaxError('Failed to parse the given C++ code%s' % err.err)\n return True", "def scalar_object_check(py_object, c_object):\n\n try :\n check_type = check_type_registry[c_object.dtype, c_object.precision]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n\n check_func = FunctionDef(name = check_type,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=NativeBool(), name = 'r')])\n\n return FunctionCall(check_func, [py_object])", "def _gen_code(self):\r\n #TODO: maybe generate one C function only to save compile time? Also easier to take that as a basis and hand craft other covariances??\r\n\r\n #generate c functions from sympy objects \r\n argument_sequence = self._sp_x+self._sp_z+self._sp_theta\r\n code_list = [('k',self._sp_k)]\r\n # gradients with respect to covariance input\r\n code_list += [('dk_d%s'%x.name,dx) for x,dx in zip(self._sp_x,self._sp_dk_dx)]\r\n # gradient with respect to parameters\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta,self._sp_dk_dtheta)]\r\n # gradient with respect to multiple output parameters\r\n if self.output_dim > 1:\r\n argument_sequence += self._sp_theta_i + self._sp_theta_j\r\n code_list += [('dk_d%s'%theta.name,dtheta) for theta,dtheta in zip(self._sp_theta_i,self._sp_dk_dtheta_i)]\r\n (foo_c,self._function_code), (foo_h,self._function_header) = \\\r\n codegen(code_list, \"C\",'foobar',argument_sequence=argument_sequence)\r\n #put the header file where we can find it\r\n f = file(os.path.join(tempfile.gettempdir(),'foobar.h'),'w')\r\n f.write(self._function_header)\r\n f.close()\r\n\r\n # Substitute any known derivatives which sympy doesn't compute\r\n self._function_code = re.sub('DiracDelta\\(.+?,.+?\\)','0.0',self._function_code)\r\n\r\n\r\n ############################################################\r\n # This is the basic argument construction for the C code. #\r\n ############################################################\r\n \r\n arg_list = ([\"X2(i, %s)\"%x.name[2:] for x in self._sp_x]\r\n + [\"Z2(j, %s)\"%z.name[2:] for z in self._sp_z])\r\n\r\n # for multiple outputs need to also provide these arguments reversed.\r\n if self.output_dim>1:\r\n reverse_arg_list = list(arg_list)\r\n reverse_arg_list.reverse()\r\n\r\n # Add in any 'shared' parameters to the list.\r\n param_arg_list = [shared_params.name for shared_params in self._sp_theta]\r\n arg_list += param_arg_list\r\n\r\n precompute_list=[]\r\n if self.output_dim > 1:\r\n reverse_arg_list+=list(param_arg_list)\r\n split_param_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['ii', 'jj'] for theta in self._sp_theta_i]\r\n split_param_reverse_arg_list = [\"%s1(%s)\"%(theta.name[:-2].upper(),index) for index in ['jj', 'ii'] for theta in self._sp_theta_i]\r\n arg_list += split_param_arg_list\r\n reverse_arg_list += split_param_reverse_arg_list\r\n # Extract the right output indices from the inputs.\r\n c_define_output_indices = [' '*16 + \"int %s=(int)%s(%s, %i);\"%(index, var, index2, self.input_dim-1) for index, var, index2 in zip(['ii', 'jj'], ['X2', 'Z2'], ['i', 'j'])]\r\n precompute_list += c_define_output_indices\r\n reverse_arg_string = \", \".join(reverse_arg_list)\r\n arg_string = \", \".join(arg_list)\r\n precompute_string = \"\\n\".join(precompute_list)\r\n\r\n # Code to compute argments string needed when only X is provided.\r\n X_arg_string = re.sub('Z','X',arg_string)\r\n # Code to compute argument string when only diagonal is required.\r\n diag_arg_string = re.sub('int jj','//int jj',X_arg_string)\r\n diag_arg_string = re.sub('j','i',diag_arg_string)\r\n if precompute_string == '':\r\n # if it's not multioutput, the precompute strings are set to zero\r\n diag_precompute_string = ''\r\n diag_precompute_replace = ''\r\n else:\r\n # for multioutput we need to extract the index of the output form the input.\r\n diag_precompute_string = precompute_list[0]\r\n diag_precompute_replace = precompute_list[1]\r\n \r\n\r\n # Here's the code to do the looping for K\r\n self._K_code =\\\r\n \"\"\"\r\n // _K_code\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n //target[i*num_inducing+j] = \r\n TARGET2(i, j) += k(%s);\r\n }\r\n }\r\n %s\r\n \"\"\"%(precompute_string,arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n self._K_code_X = \"\"\"\r\n // _K_code_X\r\n // Code for computing the covariance function.\r\n int i;\r\n int j;\r\n int N = target_array->dimensions[0];\r\n int num_inducing = target_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n %s // int ii=(int)X2(i, 1);\r\n TARGET2(i, i) += k(%s);\r\n for (j=0;j<i;j++){\r\n %s //int jj=(int)X2(j, 1);\r\n double kval = k(%s); //double kval = k(X2(i, 0), shared_lengthscale, LENGTHSCALE1(ii), SCALE1(ii));\r\n TARGET2(i, j) += kval;\r\n TARGET2(j, i) += kval;\r\n }\r\n }\r\n /*%s*/\r\n \"\"\"%(diag_precompute_string, diag_arg_string, re.sub('Z2', 'X2', diag_precompute_replace), X_arg_string,str(self._sp_k)) #adding a string representation forces recompile when needed\r\n\r\n # Code to do the looping for Kdiag\r\n self._Kdiag_code =\\\r\n \"\"\"\r\n // _Kdiag_code\r\n // Code for computing diagonal of covariance function.\r\n int i;\r\n int N = target_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for\r\n for (i=0;i<N;i++){\r\n %s\r\n //target[i] =\r\n TARGET1(i)=k(%s);\r\n }\r\n %s\r\n \"\"\"%(diag_precompute_string,diag_arg_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code to compute gradients\r\n grad_func_list = []\r\n if self.output_dim>1:\r\n grad_func_list += c_define_output_indices\r\n grad_func_list += [' '*16 + 'TARGET1(%i+ii) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += [' '*16 + 'TARGET1(%i+jj) += PARTIAL2(i, j)*dk_d%s(%s);'%(self.num_shared_params+i*self.output_dim, theta.name, reverse_arg_string) for i, theta in enumerate(self._sp_theta_i)]\r\n grad_func_list += ([' '*16 + 'TARGET1(%i) += PARTIAL2(i, j)*dk_d%s(%s);'%(i,theta.name,arg_string) for i,theta in enumerate(self._sp_theta)])\r\n grad_func_string = '\\n'.join(grad_func_list) \r\n\r\n self._dK_dtheta_code =\\\r\n \"\"\"\r\n // _dK_dtheta_code\r\n // Code for computing gradient of covariance with respect to parameters.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N;i++){\r\n for (j=0;j<num_inducing;j++){\r\n%s\r\n }\r\n }\r\n %s\r\n \"\"\"%(grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") # adding a string representation forces recompile when needed\r\n\r\n\r\n # Code to compute gradients for Kdiag TODO: needs clean up\r\n diag_grad_func_string = re.sub('Z','X',grad_func_string,count=0)\r\n diag_grad_func_string = re.sub('int jj','//int jj',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('j','i',diag_grad_func_string)\r\n diag_grad_func_string = re.sub('PARTIAL2\\(i, i\\)','PARTIAL1(i)',diag_grad_func_string)\r\n self._dKdiag_dtheta_code =\\\r\n \"\"\"\r\n // _dKdiag_dtheta_code\r\n // Code for computing gradient of diagonal with respect to parameters.\r\n int i;\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (i=0;i<N;i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_grad_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n\r\n # Code for gradients wrt X, TODO: may need to deal with special case where one input is actually an output.\r\n gradX_func_list = []\r\n if self.output_dim>1:\r\n gradX_func_list += c_define_output_indices\r\n gradX_func_list += [\"TARGET2(i, %i) += PARTIAL2(i, j)*dk_dx_%i(%s);\"%(q,q,arg_string) for q in range(self._real_input_dim)]\r\n gradX_func_string = \"\\n\".join(gradX_func_list)\r\n\r\n self._dK_dX_code = \\\r\n \"\"\"\r\n // _dK_dX_code\r\n // Code for computing gradient of covariance with respect to inputs.\r\n int i;\r\n int j;\r\n int N = partial_array->dimensions[0];\r\n int num_inducing = partial_array->dimensions[1];\r\n int input_dim = X_array->dimensions[1];\r\n //#pragma omp parallel for private(j)\r\n for (i=0;i<N; i++){\r\n for (j=0; j<num_inducing; j++){\r\n %s\r\n }\r\n }\r\n %s\r\n \"\"\"%(gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a string representation forces recompile when needed\r\n \r\n\r\n diag_gradX_func_string = re.sub('Z','X',gradX_func_string,count=0)\r\n diag_gradX_func_string = re.sub('int jj','//int jj',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('j','i',diag_gradX_func_string)\r\n diag_gradX_func_string = re.sub('PARTIAL2\\(i, i\\)','2*PARTIAL1(i)',diag_gradX_func_string)\r\n\r\n # Code for gradients of Kdiag wrt X\r\n self._dKdiag_dX_code= \\\r\n \"\"\"\r\n // _dKdiag_dX_code\r\n // Code for computing gradient of diagonal with respect to inputs.\r\n int N = partial_array->dimensions[0];\r\n int input_dim = X_array->dimensions[1];\r\n for (int i=0;i<N; i++){\r\n %s\r\n }\r\n %s\r\n \"\"\"%(diag_gradX_func_string,\"/*\"+str(self._sp_k)+\"*/\") #adding a\r\n # string representation forces recompile when needed Get rid\r\n # of Zs in argument for diagonal. TODO: Why wasn't\r\n # diag_func_string called here? Need to check that.\r\n #self._dKdiag_dX_code = self._dKdiag_dX_code.replace('Z[j', 'X[i')\r\n\r\n # Code to use when only X is provided. \r\n self._dK_dtheta_code_X = self._dK_dtheta_code.replace('Z[', 'X[')\r\n self._dK_dX_code_X = self._dK_dX_code.replace('Z[', 'X[').replace('+= PARTIAL2(', '+= 2*PARTIAL2(') \r\n self._dK_dtheta_code_X = self._dK_dtheta_code_X.replace('Z2(', 'X2(')\r\n self._dK_dX_code_X = self._dK_dX_code_X.replace('Z2(', 'X2(')\r\n\r\n\r\n #TODO: insert multiple functions here via string manipulation\r\n #TODO: similar functions for psi_stats\r", "def _from_c_repr(c_repr):\n # We create a dummy module with a global variable of the requested type,\n # parse that module, and return the type of the global variable.\n # Include stdint.h to recognize the intX_t typedefs.\n module = parse(\"\"\"\n #include <stdint.h>\n\n {} a;\n \"\"\".format(c_repr))\n return module.global_vars['a'].type", "def fsig(\n arg_types: ArgTypes, name: Text, span: Span, ctx: DeduceCtx,\n parametric_bindings: Optional[ParametricBindings]\n) -> Tuple[ConcreteType, SymbolicBindings]:\n logging.vlog(5, 'Instantiating for builtin %r @ %s', name, span)\n _Checker(arg_types, name, span).len(2).is_array(0).is_fn(1, argc=1)\n t = arg_types[0].get_element_type() # pytype: disable=attribute-error\n u, symbolic_bindings = parametric_instantiator.instantiate_function(\n span, arg_types[1], (t,), ctx, parametric_bindings, {})\n return_type = ArrayType(u, arg_types[0].size) # pytype: disable=attribute-error\n return FunctionType(arg_types, return_type), symbolic_bindings", "def cast(*args):\n return _itkEdgePotentialImageFilterPython.itkEdgePotentialImageFilterICVF33IF3_cast(*args)", "def create_typedef(*args):\n return _ida_hexrays.create_typedef(*args)", "def _cast_strlist_to_C(py_strlist):\n c_strarr = (str_t * len(py_strlist))()\n c_strarr[:] = py_strlist\n return c_strarr", "def convert_function(self, access_modifier, return_type, func_name, params):\n\n # Run super func_name\n access_modifier, return_type, func_name, params = \\\n super().convert_function(access_modifier, return_type,\n func_name, params)\n\n # Make and return processed function definition\n return [self.make_function_definition(return_type, func_name, params)], []", "def libSetup(path):\n lib = CDLL(path)\n lib.visitPoints.argtypes = [c_int, c_int, c_char_p]\n lib.visitPoints.restype = c_int\n return lib", "def cython_ctype(self, t):\n t = self.canon(t)\n if t in self.cython_ctypes:\n return self.cython_ctypes[t]\n if isinstance(t, basestring):\n if t in self.base_types:\n return self.cython_ctypes[t]\n # must be tuple below this line\n tlen = len(t)\n if 2 == tlen:\n if 0 == t[1]:\n return self.cython_ctype(t[0])\n elif self.isrefinement(t[1]):\n if t[1][0] in self.cython_ctypes:\n subtype = self.cython_ctypes[t[1][0]]\n if callable(subtype):\n subtype = subtype(t[1], self)\n return subtype\n else:\n return self.cython_ctype(t[0])\n else:\n last = '[{0}]'.format(t[-1]) if isinstance(t[-1], int) else t[-1]\n return self._cython_ctype_add_predicate(self.cython_ctype(t[0]), last)\n elif 3 <= tlen:\n assert t[0] in self.template_types\n assert len(t) == len(self.template_types[t[0]]) + 2\n template_name = self.cython_ctypes[t[0]]\n assert template_name is not NotImplemented\n template_filling = []\n for x in t[1:-1]:\n #if isinstance(x, bool):\n # x = _cython_ctypes[x]\n #elif isinstance(x, Number):\n if isinstance(x, Number):\n x = str(x)\n else:\n x = self.cython_ctype(x)\n template_filling.append(x)\n cyct = '{0}[{1}]'.format(template_name, ', '.join(template_filling))\n if 0 != t[-1]:\n last = '[{0}]'.format(t[-1]) if isinstance(t[-1], int) else t[-1]\n cyct = self._cython_ctype_add_predicate(cyct, last)\n return cyct", "def cast(*args):\n return _itkCosImageFilterPython.itkCosImageFilterID3ID3_cast(*args)", "def map_string2func(funcname, clss, compute_capability):\n if \"_get_\" + funcname not in globals():\n raise AttributeError(\"kernel type '\" + funcname + \"' not understood\")\n return globals()[\"_get_\" + funcname](clss, compute_capability)", "def boilerplate(attr: st.EncodingAttr):\n return f\"\"\"\nfunc.func @main(%a: tensor<8x8xf64>,\n %b: tensor<8x8xf64>,\n %c: tensor<8x8xf64>) -> tensor<8x8xf64> attributes {{ llvm.emit_c_interface }} {{\n %t = arith.constant sparse<[[0,0], [0,2], [4,1]], [1.0, 2.0, 3.0]> : tensor<8x8xf64>\n %s = sparse_tensor.convert %t : tensor<8x8xf64> to tensor<8x8xf64, {attr}>\n %0 = call @sddmm(%a, %b, %s, %c) : (tensor<8x8xf64>,\n tensor<8x8xf64>,\n tensor<8x8xf64, {attr}>,\n tensor<8x8xf64>) -> tensor<8x8xf64>\n return %0 : tensor<8x8xf64>\n}}\n\"\"\"", "def cast(*args):\n return _ITKCostFunctionsPython.itkShapePriorMAPCostFunctionIF2F_cast(*args)", "def cast(*args):\n return _ITKCostFunctionsPython.itkShapePriorMAPCostFunctionIF3F_cast(*args)", "def generic_function(self, node, ordered_functions):\n for generic in node.fortran_generic:\n new = node.clone()\n ordered_functions.append(new)\n self.append_function_index(new)\n new._generated = \"fortran_generic\"\n fmt = new.fmtdict\n # XXX append to existing suffix\n if generic.fmtdict:\n fmt.update(generic.fmtdict)\n fmt.function_suffix = fmt.function_suffix + generic.function_suffix\n new.fortran_generic = {}\n new.wrap.assign(fortran=True)\n new.ast.declarator.params = generic.decls\n\n # Try to call original C function if possible.\n # All arguments are native scalar.\n need_wrapper = False\n if new.ast.declarator.is_indirect():\n need_wrapper = True\n \n for arg in new.ast.declarator.params:\n if arg.declarator.is_indirect():\n need_wrapper = True\n break\n elif arg.typemap.sgroup == \"native\":\n pass\n else:\n need_wrapper = True\n break\n\n if need_wrapper:\n # The C wrapper is required to cast constants.\n # generic.yaml: GenericReal\n new.C_force_wrapper = True\n new.wrap.c = True\n new._PTR_C_CXX_index = node._function_index\n else:\n new._PTR_F_C_index = node._function_index\n \n # Do not process templated node, instead process\n # generated functions above.\n # node.wrap.c = False\n node.wrap.fortran = False", "def itkCosImageFilterID3ID3_cast(*args):\n return _itkCosImageFilterPython.itkCosImageFilterID3ID3_cast(*args)", "def _make_cpp_event(type, target):\n return EventCpp(type, target)", "def itkCosImageFilterID2ID2_cast(*args):\n return _itkCosImageFilterPython.itkCosImageFilterID2ID2_cast(*args)", "def register_shape_c_code(type, code, version=()):\r\n Shape.c_code_and_version[type] = (code, version)", "def SBMLFunctionDefinitionConverter_init():\n return _libsbml.SBMLFunctionDefinitionConverter_init()", "def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitCallbackPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n\n if self.signature.ret_arg:\n output_fp_ptr = builder.bitcast(self.fn.args[self.signature.ret_arg],\n ll.PointerType(self.fp_type))\n for i, val in enumerate(ret):\n index = ll.Constant(ll.IntType(32), i)\n output_array_ptr = builder.gep(output_fp_ptr, [index])\n builder.store(val, output_array_ptr)\n builder.ret(ll.Constant(ll.IntType(32), 0)) # return success\n else:\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod" ]
[ "0.70022583", "0.67676306", "0.6716482", "0.65700346", "0.6408847", "0.6320961", "0.6177105", "0.61178625", "0.609686", "0.60915", "0.6037971", "0.60285324", "0.5978813", "0.5958597", "0.59014344", "0.58797467", "0.58504516", "0.58494455", "0.5848168", "0.5846862", "0.5815121", "0.5813147", "0.5790319", "0.57641983", "0.57453537", "0.5734473", "0.57235515", "0.5712704", "0.5711848", "0.56919324", "0.5654568", "0.565398", "0.5653504", "0.56321156", "0.5581493", "0.5571793", "0.5563232", "0.55631405", "0.55475354", "0.55452687", "0.55345213", "0.5520532", "0.5494658", "0.54893047", "0.54817057", "0.5475919", "0.5472627", "0.54689586", "0.54645896", "0.54609966", "0.54604405", "0.53960496", "0.53850996", "0.5356683", "0.5344266", "0.53294694", "0.5308613", "0.5308613", "0.5307777", "0.52947044", "0.52907014", "0.5281514", "0.5280184", "0.5278927", "0.5276421", "0.52643424", "0.52550054", "0.5248458", "0.5215333", "0.52049977", "0.5195425", "0.51893514", "0.5182841", "0.51797175", "0.517909", "0.5176905", "0.516932", "0.5161225", "0.51587266", "0.5154722", "0.51528007", "0.51491725", "0.5145379", "0.5140886", "0.5124449", "0.51129514", "0.5099024", "0.5097939", "0.508929", "0.5082725", "0.5072529", "0.50716126", "0.5068999", "0.5065746", "0.50562656", "0.5054903", "0.5053489", "0.5051426", "0.5050489", "0.5047456" ]
0.7423723
0
Create FunctionDef responsible for casting c argument to python
def C_to_Python(c_object): try : cast_function = c_to_py_registry[(c_object.dtype, c_object.precision)] except KeyError: errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal') cast_func = FunctionDef(name = cast_function, body = [], arguments = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)], results = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)]) return cast_func
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Python_to_C(c_object):\n try :\n cast_function = py_to_c_registry[(c_object.dtype, c_object.precision)]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n cast_func = FunctionDef(name = cast_function,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=c_object.dtype, name = 'v', precision = c_object.precision)])\n\n return cast_func", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{nodename}({argnames});\n return;\n}}\n\n\"\"\"\n res = \"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n fragments = {\n \"maybe_parentheses\": \")\" if isinstance(node.type.return_type.get_type(),pdl.TypeStruct) else \"\",\n \"cast_and_deref\": self.make_c_cast_deref_string(c_gen, node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n \"argnames\":\",\".join(map(\n lambda a: a.name,\n node.type.args))\n }\n res += (fn if not isinstance(node.type.return_type, pdl.TypeTypeNone) else fn_no_return).format(**fragments)\n return res", "def cpp_function(self):", "def convert_result_as_arg(self, node, ordered_functions):\n return ordered_functions # XXX - do nothing for now\n options = node.options\n fmt_func = node.fmtdict\n# if options.F_string_len_trim is False: # XXX what about vector?\n# return\n\n ast = node.ast\n result_typemap = ast.typemap\n result_name = None\n\n # Check if result needs to be an argument.\n attrs = ast.attrs\n meta = ast.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup in [\"char\", \"string\"]:\n result_name = fmt_func.F_string_result_as_arg\n# result_as_arg = fmt_func.F_string_result_as_arg\n# result_name = result_as_arg or fmt_func.C_string_result_as_arg\n# elif result_typemap.base == \"vector\":\n# has_vector_result = True\n# elif result_is_ptr:\n# if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n# need_cdesc_result = True\n# elif attrs[\"dimension\"]:\n# need_cdesc_result = True\n\n if not result_name:\n return\n\n##########\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n# generated_suffix = \"buf\"\n C_new._generated = \"result_to_arg\"\n fmt_func = C_new.fmtdict\n# fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix + \"XXX\"\n# fmt_func.function_suffix = fmt_func.function_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=True, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n##########\n\n # decl: const char * getCharPtr2()\n new_arg = C_new.ast.result_as_arg(result_name)\n new_arg.const = False # must be writeable\n# attrs = new_arg.attrs\n# new_arg.metaattrs[\"deref\"] = None\n # Special case for wrapf.py to override \"allocatable\"\n\n # Special case for wrapf.py to override \"allocatable\"\n node.ast.metaattrs[\"deref\"] = None\n new_arg.metaattrs[\"deref\"] = \"result\"\n new_arg.metaattrs[\"is_result\"] = True\n C_new.ast.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.metaattrs[\"deref\"] = None\n\n node.wrap.fortran = False\n# node.wrap.c = False\n\n return\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)", "def arg_to_CFI(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if options.wrap_fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n \n cfi_args = {}\n for arg in ast.declarator.params:\n declarator = arg.declarator\n name = declarator.user_name\n attrs = declarator.attrs\n meta = declarator.metaattrs\n cfi_args[name] = False\n arg_typemap = arg.typemap\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif meta[\"assumed-rank\"]:\n cfi_args[name] = True\n elif attrs[\"rank\"]:\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"string\":\n cfi_args[name] = True\n elif arg_typemap.sgroup == \"char\":\n if declarator.is_indirect():\n cfi_args[name] = True\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n cfi_args[name] = True\n has_cfi_arg = any(cfi_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n need_buf_result = \"cfi\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cfi\"\n\n if not (need_buf_result or\n has_cfi_arg):\n return False\n\n options.wrap_fortran = False\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"cfi\"\n C_new._generated = \"arg_to_cfi\"\n C_new.splicer_group = \"cfi\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_cfi_suffix\n\n C_new.wrap.assign(c=True)#, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n name = arg.declarator.user_name\n if cfi_args[name]:\n arg.declarator.metaattrs[\"api\"] = generated_suffix\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n result_as_string.declarator.metaattrs[\"api\"] = \"cfi\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n # Fortran function may call C subroutine if string/vector result\n # Fortran function calls bufferify function.\n node._PTR_F_C_index = C_new._function_index\n return True", "def get_pytype(self, c_arg, parse_arg):\n if isinstance(c_arg, FunctionAddress):\n return 'O'\n else:\n try:\n return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)]\n except KeyError as e:\n raise NotImplementedError(\"Type not implemented for argument collection : \"+str(type(parse_arg))) from e", "def make_c_header(self):\n res = \\\n\"\"\"PyThreadState* ___madz_LANG_python_thread_state; //Holds Thread State for this interpreter\nPyObject *___madz_LANG_python_wrapper_module; //Hold Pointer to the _madz.py file representing this plugin\ntypedef struct{{\n{function_pointers}\n}}___madz_LANG_python_TYPE_;\n___madz_LANG_python_TYPE_ ___madz_LANG_python_OUTPUT;\nvoid ___madz_init_imports();\n{fn_dec}\n\n\"\"\"\n c_gen = c_wrapgen.CGenerator([],\"\", self.description)\n #TODO function_pointers, all same except\n fragments ={\"fn_dec\" : \"\", \"function_pointers\" : \"\"}\n fn = \"\"\"{rettype}{fnname}({args});\\n\"\"\"\n pointer = \"\"\" {prettype} (*{nodename})({args});\\n\"\"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frg = {\n \"prettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"rettype\": c_gen.gen_type_string(\"\", node.type.return_type),\n \"fnname\": \"___madz_LANG_python_FN_\" + node.name,\n \"nodename\": node.name,\n \"args\": \",\".join(map(\n lambda a: c_gen.gen_type_string(a.name, a.type),\n node.type.args)),\n\n }\n fragments[\"fn_dec\"] += fn.format(**frg)\n fragments[\"function_pointers\"] += pointer.format(**frg)\n if fragments[\"function_pointers\"] == \"\":\n fragments[\"function_pointers\"] = \"uint8_t _madz_empty;\"\n return res.format(**fragments)", "def arg_to_buffer(self, node, ordered_functions):\n options = node.options\n fmt_func = node.fmtdict\n\n if node.wrap.c is False:\n# if options.wrap_c is False: # XXX cdesc.yaml GetScalar2\n # The user does not require a C wrapper.\n # This can be the case if the Fortran wrapper is doing all\n # the work via splicer or fstatements.\n return\n\n # If a C++ function returns a std::string instance,\n # the default wrapper will not compile since the wrapper\n # will be declared as char. It will also want to return the\n # c_str of a stack variable. Warn and turn off the wrapper.\n ast = node.ast\n declarator = ast.declarator\n result_typemap = ast.typemap\n # shadow classes have not been added yet.\n # Only care about string, vector here.\n result_is_ptr = ast.declarator.is_indirect()\n if (\n result_typemap\n and result_typemap.base in [\"string\", \"vector\"]\n and result_typemap.name != \"char\"\n and not result_is_ptr\n ):\n node.wrap.c = False\n # node.wrap.fortran = False\n self.config.log.write(\n \"Skipping {}, unable to create C wrapper \"\n \"for function returning {} instance\"\n \" (must return a pointer or reference).\"\n \" Bufferify version will still be created.\\n\".format(\n result_typemap.cxx_type, declarator.user_name\n )\n )\n\n if node.wrap.fortran is False:\n # The buffer function is intended to be called by Fortran.\n # No Fortran, no need for buffer function.\n return\n if options.F_string_len_trim is False: # XXX what about vector?\n return\n\n # Arguments.\n # Is result or any argument a string or vector?\n # If so, additional arguments will be passed down so\n # create buffer version of function.\n buf_args = {}\n for arg in declarator.params:\n has_buf_arg = None\n arg_typemap = arg.typemap\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if meta[\"api\"]:\n # API explicitly set by user.\n continue\n elif attrs[\"cdesc\"]:\n # User requested cdesc.\n has_buf_arg = \"cdesc\"\n elif arg_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\", \"copy\"]:\n has_buf_arg = \"cdesc\"\n # XXX - this is not tested\n # XXX - tested with string **arg+intent(out)+dimension(ndim)\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"char\":\n if arg.ftrim_char_in:\n pass\n elif declarator.is_indirect():\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n has_buf_arg = \"cdesc\"\n else:\n has_buf_arg = \"buf\"\n elif arg_typemap.sgroup == \"vector\":\n if meta[\"intent\"] == \"in\":\n # Pass SIZE.\n has_buf_arg = \"buf\"\n else:\n has_buf_arg = \"cdesc\"\n elif (arg_typemap.sgroup == \"native\" and\n meta[\"intent\"] == \"out\" and\n meta[\"deref\"] != \"raw\" and\n declarator.get_indirect_stmt() in [\"**\", \"*&\"]):\n # double **values +intent(out) +deref(pointer)\n has_buf_arg = \"cdesc\"\n #has_buf_arg = \"buf\" # XXX - for scalar?\n buf_args[declarator.user_name] = has_buf_arg\n # --- End loop over function parameters\n has_buf_arg = any(buf_args.values())\n\n # Function result.\n need_buf_result = None\n\n result_as_arg = \"\" # Only applies to string functions\n # when the result is added as an argument to the Fortran api.\n\n # Check if result needs to be an argument.\n attrs = ast.declarator.attrs\n meta = ast.declarator.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup == \"string\":\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.sgroup == \"char\" and result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n # Result default to \"allocatable\".\n need_buf_result = \"cdesc\"\n else:\n need_buf_result = \"buf\"\n result_as_arg = fmt_func.F_string_result_as_arg\n result_name = result_as_arg or fmt_func.C_string_result_as_arg\n elif result_typemap.base == \"vector\":\n need_buf_result = \"cdesc\"\n elif result_is_ptr:\n if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n if meta[\"dimension\"]:\n # int *get_array() +deref(pointer)+dimension(10)\n need_buf_result = \"cdesc\"\n\n # Functions with these results need wrappers.\n if not (need_buf_result or\n has_buf_arg):\n return\n\n # XXX node.wrap.fortran = False\n # Preserve wrap.c.\n # This keep a version which accepts char * arguments.\n\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n generated_suffix = \"buf\"\n C_new._generated = \"arg_to_buffer\"\n C_new.splicer_group = \"buf\"\n if need_buf_result:\n C_new.ast.declarator.metaattrs[\"api\"] = need_buf_result\n \n fmt_func = C_new.fmtdict\n fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=node.options.wrap_c)\n C_new._PTR_C_CXX_index = node._function_index\n\n for arg in C_new.ast.declarator.params:\n declarator = arg.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n if buf_args[declarator.user_name]:\n meta[\"api\"] = buf_args[declarator.user_name]\n if arg.ftrim_char_in:\n continue\n arg_typemap = arg.typemap\n if arg_typemap.base == \"vector\":\n # Do not wrap the orignal C function with vector argument.\n # Meaningless to call without the size argument.\n # TODO: add an option where char** length is determined by looking\n # for trailing NULL pointer. { \"foo\", \"bar\", NULL };\n node.wrap.c = False\n node.wrap.lua = False # NotImplemented\n\n ast = C_new.ast\n if True: # preserve to avoid changing indention for now.\n # Add additional argument to hold result.\n # This will allocate a new character variable to hold the\n # results of the C++ function.\n f_attrs = node.ast.declarator.attrs # Fortran function attributes\n f_meta = node.ast.declarator.metaattrs # Fortran function attributes\n\n if result_as_arg:\n # decl: const char * getCharPtr2() +len(30)\n # +len implies copying into users buffer.\n result_as_string = ast.result_as_arg(result_name)\n result_as_string.const = False # must be writeable\n attrs = result_as_string.declarator.attrs\n # Special case for wrapf.py to override \"allocatable\"\n f_meta[\"deref\"] = None\n # We've added an argument to fill, use api=buf.\n result_as_string.declarator.metaattrs[\"api\"] = \"buf\"\n result_as_string.declarator.metaattrs[\"deref\"] = \"result\"\n result_as_string.declarator.metaattrs[\"is_result\"] = True\n C_new.ast.declarator.metaattrs[\"api\"] = None\n C_new.ast.declarator.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.declarator.metaattrs[\"deref\"] = None\n\n if result_as_arg:\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)\n else:\n if node._generated in [\"result_to_arg\", \"fortran_generic\", \"getter/setter\"]:\n node.wrap.c = False\n \n # Fortran function may call C subroutine if string/vector result\n node._PTR_F_C_index = C_new._function_index", "def test_callback_from_c(self):\n source = io.StringIO(\"\"\"\n int add(int x, int y);\n int x(int a) {\n return add(a + 1, 13);\n }\n \"\"\")\n arch = get_current_arch()\n obj = cc(source, arch, debug=True)\n def my_add(x: int, y: int) -> int:\n return x + y + 2\n imports = {\n 'add': my_add\n }\n m = load_obj(obj, imports=imports)\n y = m.x(101)\n self.assertEqual(117, y)", "def make_get_python_out_struct(self):\n res = \\\n\"\"\"DLLEXPORT ___madz_LANG_python_TYPE_* {}_get_out_struct(){{\n return &___madz_LANG_python_OUTPUT;\n}}\n\n\"\"\"\n return res.format(self.python_mangle)", "def adaptPythonToCpp(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptPythonToCpp(self, *args)", "def compile_function(self, function, arguments):", "def fortran_c_wrapper(self) -> str:\n if self.fc_override is not None:\n return self.fc_override.replace('$CLASSNAME$', self.class_name).replace(\n \"$C_PREFIX$\", self.c_prefix).replace(\"$F_PREFIX$\", self.f_prefix)\n\n result = ''\n\n # declaration\n in_parameters = self._fc_in_parameters()\n return_type, out_parameters = self._fc_out_parameters()\n if self.may_throw:\n out_parameters.append('int * err_code')\n out_parameters.append('char ** err_msg')\n out_parameters.append('std::size_t * err_msg_len')\n\n func_name = '{}_{}_{}_'.format(\n self.c_prefix, self.class_name, self.name)\n\n par_str = ', '.join(in_parameters + out_parameters)\n result += '{} {}({}) {{\\n'.format(return_type, func_name, par_str)\n\n # convert input\n for par in self.params:\n result += '{}'.format(par.fc_convert_input())\n\n # call C++ function and return result\n if self.may_throw:\n result += ' try {\\n'\n result += ' *err_code = 0;\\n'\n result += indent(self._fc_cpp_call(), 4*' ')\n result += indent(self._fc_return(), 4*' ')\n result += ' }\\n'\n for exception, code in error_codes.items():\n if code != 0:\n catch = ''\n catch += 'catch (std::{} const & e) {{\\n'.format(exception)\n catch += ' *err_code = {};\\n'.format(code)\n catch += ' static std::string msg;\\n'\n catch += ' msg = e.what();\\n'\n catch += ' *err_msg = const_cast<char*>(msg.data());\\n'\n catch += ' *err_msg_len = msg.size();\\n'\n catch += '}\\n'\n result += indent(catch, 4*' ')\n result += self._fc_return_default()\n else:\n result += self._fc_cpp_call()\n result += self._fc_return()\n result += '}\\n\\n'\n return result", "def create_checked_function():\n\n ffi = cffi.FFI()\n ffi.cdef(\"\"\"\nint overhead(int32_t* list, size_t num, char* utf8, int* error);\n\"\"\")\n c = ffi.dlopen(\"./liboverhead/liboverhead.so\")\n overhead = c.overhead\n\n error_type = ffi.typeof(\"int*\")\n\n def func(list_, text):\n # typecheck/convert text\n if isinstance(text, unicode):\n text = text.encode(\"utf-8\")\n elif text is None:\n text = ffi.NULL\n elif not isinstance(text, str):\n raise TypeError\n\n len_ = len(list_)\n error = ffi.new(error_type)\n result = overhead(list_, len_, text, error)\n\n if not result:\n raise Exception(\"Error occured: %d\" % error[0])\n\n return result\n\n return func", "def dispatchMacroEnvFunction(self, tree, tree_parent):\n cpp_func_name = \"getMacroProperty\"\n py_func = tree.attr\n # extract type from function name\n py_type = py_func[len(cpp_func_name):]\n if py_type not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_type}' is not a valid FLAME GPU type\")\n # get cpp type\n t = self._fgpu_types[py_type]\n cpp_func_name += f\"<{t}\"\n # mess with the parent to extract (and remove arguments so they dont end up in the argument list)\n if not tree_parent.args :\n self.RaiseError(tree, f\" Macro environment function '{py_func}' is expected to have some arguments.\")\n # if more than one arg then the rest are bounds to translate\n if len(tree_parent.args) > 1:\n bounds = tree_parent.args[1:]\n # process bounds by appending to cpp function template arguments\n for i in bounds:\n if isinstance(i, ast.Num): # num required for python 3.7\n if not isinstance(i.n, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.n}\"\n else: # all Python > 3.7 \n if not isinstance(i, ast.Constant):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an constant value (or Num in Python <3.8).\")\n if not isinstance(i.value, int):\n self.RaiseError(tree, f\" Macro environment function argument '{i}' should be an integer value.\")\n cpp_func_name += f\", {i.value}\"\n # remove bounds from argument list (in place)\n del tree_parent.args[1:]\n cpp_func_name += \">\"\n self.write(cpp_func_name)", "def create_function():\n\n ffi = cffi.FFI()\n ffi.cdef(\"\"\"\nint overhead(int32_t* list, size_t num, char* utf8, int* error);\n\"\"\")\n c = ffi.dlopen(\"./liboverhead/liboverhead.so\")\n overhead = c.overhead\n\n def func(list_, length, text, error):\n return overhead(list_, length, text, error)\n\n return overhead", "def cpp_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cts = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n ct = self.cpp_type(x)\n elif argkind is Arg.LIT:\n ct = self.cpp_literal(x)\n elif isinstance(x, Number):\n ct = self.cpp_literal(x)\n else:\n try:\n ct = self.cpp_type(x) # guess it is a type\n except TypeError:\n ct = x # guess it is a variable\n cts.append(ct)\n fname += '' if 0 == len(cts) else \"< \" + \", \".join(cts) + \" >\"\n return fname", "def cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def _make_array(self, c):\n return (c * ctypes.py_object)()", "def cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def adaptCorbaToCpp(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptCorbaToCpp(self, *args)", "def cython_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cfs = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n cf = self.cython_functionname(x)[1]\n elif argkind is Arg.LIT:\n cf = self.cython_literal(x)\n elif argkind is Arg.VAR:\n cf = x\n elif isinstance(x, Number):\n cf = self.cython_literal(x)\n else:\n try:\n cf = self.cython_functionname(x)[1] # guess type\n except TypeError:\n cf = x # guess variable\n cfs.append(cf)\n fname += '' if 0 == len(cfs) else \"_\" + \"_\".join(cfs)\n return fname", "def cpp_type_to_python(self, ot: str):\n t = ot\n t = remove_cvref(t)\n t = self._remove_variable_type_prefix(t)\n try:\n return cpp_base_type_to_python(t)\n except KeyError:\n pass\n if is_function_pointer_type(t):\n func = function_pointer_type_info(t)\n args = \",\".join([self.cpp_type_to_python(arg.type) for arg in func.args])\n return f'Callable[[{args}], {self.cpp_type_to_python(func.ret_type)}]'\n\n if is_function_type(t):\n func = function_type_info(t)\n args = \",\".join([self.cpp_type_to_python(arg.type) for arg in func.args])\n return f'Callable[[{args}], {self.cpp_type_to_python(func.ret_type)}]'\n\n if is_pointer_type(t):\n cpp_base = self.resolve_to_basic_type_remove_const(pointer_base(t))\n if is_pointer_type(cpp_base) or is_array_type(cpp_base):\n return f'\"level 2 pointer:{t}\"' # un-convertible: level 2 pointer\n if cpp_base in ARRAY_BASES:\n return ARRAY_BASES[cpp_base]\n return self.cpp_type_to_python(cpp_base)\n if is_array_type(t):\n b = array_base(t)\n if b in ARRAY_BASES: # special case: string array\n return ARRAY_BASES[b]\n base = self.cpp_type_to_python(b)\n return f'List[{base}]'\n if is_tuple_type(t):\n es = tuple_elements(t)\n bases = [self.cpp_type_to_python(i) for i in es]\n bases_str = \",\".join(bases)\n return f'Tuple[{bases_str}]'\n\n # check classes\n objects = self.objects\n if t in objects:\n o = objects[t]\n if isinstance(o, GeneratorClass) or isinstance(o, GeneratorEnum):\n return t.replace(\"::\", \".\").strip(\" .\") # todo fix this\n if isinstance(o, GeneratorTypedef):\n return self.cpp_type_to_python(o.target)\n\n if t.startswith(\"(anonymous\"):\n return f'\"{t}\"'\n\n # this means this is\n logger.warning(\"%s might be an internal symbol, failed to resolve to basic type\", t)\n return t", "def _build_comute_argtype(num_nd, num_nd_write):\n ret = [_xc_func_p, ctypes.c_size_t]\n ret += [_ndptr] * num_nd\n ret += [_ndptr_w] * num_nd_write\n return tuple(ret)", "def build(self, cres):\n _launch_threads()\n # Build wrapper for ufunc entry point\n ctx = cres.target_context\n library = cres.library\n signature = cres.signature\n llvm_func = library.get_function(cres.fndesc.llvm_func_name)\n wrapper, env = build_gufunc_wrapper(library, ctx, llvm_func,\n signature, self.sin, self.sout,\n fndesc=cres.fndesc,\n env=cres.environment)\n\n ptr = library.get_pointer_to_function(wrapper.name)\n\n # Get dtypes\n dtypenums = []\n for a in signature.args:\n if isinstance(a, types.Array):\n ty = a.dtype\n else:\n ty = a\n dtypenums.append(as_dtype(ty).num)\n\n return dtypenums, ptr, env", "def itkSingleValuedCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_cast(*args)", "def itkCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def make_func_code(params):\n class FuncCode(object):\n __slots__ = ('co_varnames', 'co_argcount')\n fc = FuncCode()\n fc.co_varnames = params\n fc.co_argcount = len(params)\n return fc", "def _PythonToCtype(data, c_type):\n if c_type is actuator_util.Vec3:\n # Handle Vec3.\n assert len(data) == 3\n c_data = c_type()\n c_data.x = data[0]\n c_data.y = data[1]\n c_data.z = data[2]\n return c_data\n elif hasattr(c_type, '_length_'):\n # Handle arrays.\n length = getattr(c_type, '_length_')\n assert len(data) == length\n\n c_data = c_type()\n for i in range(length):\n c_data[i] = _PythonToCtype(data[i], getattr(c_type, '_type_'))\n\n elif hasattr(c_type, '_fields_'):\n # Handle structures.\n fields = autogen_util.GetCFields(c_type)\n assert set(data.keys()) == {field for field, _ in fields}\n\n c_data = c_type()\n for field, field_type in fields:\n setattr(c_data, field, _PythonToCtype(data[field], field_type))\n\n else:\n c_data = c_type(data)\n\n return c_data", "def _create_args(self, func_args):\n self.llvm_ret_type = self._from_ctype(self.signature.ret_type)\n self.llvm_arg_types = \\\n [self._from_ctype(a) for a in self.signature.arg_ctypes]", "def call_cdef_inline(x):\n ret = cdef_inline(x)\n return ret, cython.typeof(ret)", "def get_func_type(self, *args):\n return _ida_hexrays.cfunc_t_get_func_type(self, *args)", "def build_func_body(func_name, arg_dict, return_type):\n body = \"\"\n arg_list = \"\"\n\n # the following are pointers to scalar outputs\n # Note: pBufferSize was renamed pBufferSizeInBytes in v6.5\n scalar_ptr_outputs = ['nnzTotalDevHostPtr',\n 'pBufferSize',\n 'pBufferSizeInBytes',\n 'resultDevHostPtr']\n\n is_creator = 'cusparseCreate' in func_name\n is_getter = 'cusparseGet' in func_name\n\n if return_type == 'cusparseStatus_t' and not (is_creator or is_getter):\n is_return = False\n else:\n is_return = True\n\n # else:\n return_str = ''\n for k, v in arg_dict.items():\n\n \"\"\"\n set some flags based on the name/type of the argument\n will use these flags to determine whether and how to call ffi.new or\n ffi.cast on each variable\n \"\"\"\n is_ptr = '*' in v\n is_cusparse_type = '_t' in v\n is_cusparse_ptr = is_ptr and is_cusparse_type\n is_output_scalar = k in scalar_ptr_outputs\n if k in ['alpha', 'beta']:\n is_scalar = True\n else:\n is_scalar = False\n if is_getter:\n is_gpu_array = False\n else:\n is_gpu_array = is_ptr and (not is_cusparse_ptr) and (not is_scalar)\n if 'Complex' in v:\n is_complex = True\n else:\n is_complex = False\n\n # convert variable to appropriate type for the FFI\n if is_output_scalar:\n # for scalar outputs make a new pointer\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_getter and is_ptr and (return_type == 'cusparseStatus_t'):\n # any pointers in cusparseGet* are new outputs to be created\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n elif is_gpu_array:\n # pass pointer to GPU array data (use either .ptr or .gpudata)\n body += \"%s = ffi.cast('%s', %s.ptr)\\n\" % (k, v, k)\n elif is_cusparse_ptr:\n if is_creator:\n # generate custom cusparse type\n body += \"%s = ffi.new('%s')\\n\" % (k, v)\n else:\n # cast to the custom cusparse type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n elif is_ptr and is_scalar:\n # create new pointer, with value initialized to scalar\n if is_complex:\n # complex case is a bit tricky. requires ffi.buffer\n body += \"%sffi = ffi.new('%s')\\n\" % (k, v)\n if 'cusparseC' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex64(%s).tostring()\\n\" % (k, k)\n elif 'cusparseZ' in func_name:\n body += \"ffi.buffer(%sffi)[:] = \\\n np.complex128(%s).tostring()\\n\" % (k, k)\n else:\n body += \"%s = ffi.new('%s', %s)\\n\" % (k, v, k)\n elif is_ptr or v == 'cudaStream_t':\n # case non-scalar pointer to appropriate type\n body += \"%s = ffi.cast('%s', %s)\\n\" % (k, v, k)\n else:\n # don't need explicit cast for plain int, float, etc\n pass\n\n # build the list of arguments to pass to the API\n if is_ptr and is_scalar and is_complex:\n # take into account modified argument name for complex scalars\n arg_list += \"%sffi, \" % k\n else:\n arg_list += \"%s, \" % k\n\n # add the function call and optionally return the result\n last_key = k\n arg_list = arg_list[:-2] # remove trailing \", \"\n if is_getter and return_type != 'cusparseStatus_t':\n body += \"return ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n else:\n # check cusparseStatus_t state before returning\n call_str = \"status = ffi_lib.%s(%s)\\n\" % (func_name, arg_list)\n body += split_line(call_str, break_pattern=', ', nmax=76)\n body += \"cusparseCheckStatus(status)\\n\"\n if is_return:\n # len(arg_dict) == 2) is to avoid return for cusparseGetLevelInfo\n if is_creator or (is_getter and (len(arg_dict) == 2)):\n body += \"return %s[0]\\n\" % last_key\n else:\n body += \"#TODO: return the appropriate result\"\n body += '\\n\\n'\n return reindent(body, numSpaces=4, lstrip=False)", "def get_func_type(self, *args):\n return _ida_hexrays.cfuncptr_t_get_func_type(self, *args)", "def cast(*args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_cast(*args)", "def _llvm_jit_code(args, expr, signature, callback_type):\n if callback_type is None:\n jit = LLVMJitCode(signature)\n else:\n jit = LLVMJitCodeCallback(signature)\n\n jit._create_args(args)\n jit._create_function_base()\n jit._create_param_dict(args)\n strmod = jit._create_function(expr)\n if False:\n print(\"LLVM IR\")\n print(strmod)\n fptr = jit._compile_function(strmod)\n return fptr", "def boost_initialization():\n global Lib_c \n Lib_c = ctypes.CDLL('./integral_function.so')\n Lib_c.set.restype = None\n Lib_c.set.argtypes = (ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)\n Lib_c.set_target.restype = None\n Lib_c.set_target.argtypes = (ctypes.c_int,)\n Lib_c.function.restype = ctypes.c_double\n Lib_c.function.argtypes = (ctypes.c_int,ctypes.c_double)", "def _generate_type_caster(\n py_name: str, cpp_name: str, generate_load: bool,\n generate_cast: bool) -> Generator[str, None, None]:\n yield 'namespace pybind11 {'\n yield 'namespace detail {'\n yield f'template <> struct type_caster<{cpp_name}> {{'\n yield ' public:'\n yield I + f'PYBIND11_TYPE_CASTER({cpp_name}, _(\"{py_name}\"));'\n yield ''\n if generate_load:\n yield I + 'bool load(handle src, bool) {'\n yield I + I + 'using ::clif::Clif_PyObjAs;'\n yield I + I + 'return Clif_PyObjAs(src.ptr(), &value);'\n yield I + '}'\n yield ''\n if generate_cast:\n yield I + (f'static handle cast({cpp_name} src, return_value_policy, '\n 'handle) {')\n yield I + I + 'using ::clif::Clif_PyObjFrom;'\n yield I + I + 'return Clif_PyObjFrom(src, {});'\n yield I + '}'\n yield '};'\n yield '} // namespace detail'\n yield '} // namespace pybind11'\n yield ''", "def adaptPythonToCorba(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptPythonToCorba(self, *args)", "def call_ccall(x):\n ret = c_call(x)\n return ret, cython.typeof(ret)", "def get_C_code(self, C_function_name):\n from cascada.bitvector.printing import BvCCodePrinter\n\n width2type = BvCCodePrinter._width2C_type\n\n # in C, * binds to the declarator, not the type specifier\n input_vars_c = ', '.join([\"{} {}\".format(width2type(v.width), v.name) for v in self.input_vars])\n output_vars_c = ', '.join([\"{} *{}\".format(width2type(v.width), v.name) for v in self.output_vars])\n if self.external_vars:\n external_vars_c = ', '.join([\"{} {}\".format(width2type(v.width), v.name) for v in self.external_vars])\n external_vars_c = external_vars_c + \", \"\n else:\n external_vars_c = \"\"\n\n aux = f\"void {C_function_name}({input_vars_c}, {external_vars_c}{output_vars_c})\"\n header = f\"{aux};\"\n body = f\"#include <stdint.h>\\n{aux}{{\" # stdint for uint_*\n\n outvar2outvar_c = {v: core.Variable(\"*\" + v.name, v.width, allowed_symbols=\"*\") for v in self.output_vars}\n\n def primary_assignment2C_code(my_var, my_expr):\n assert isinstance(my_expr, (core.Constant, core.Variable, operation.PrimaryOperation))\n if my_var in self.output_vars:\n return f\"*{my_var} = {my_expr.crepr()};\"\n else:\n return f\"{width2type(my_var.width)} {my_var} = {my_expr.crepr()};\"\n\n for var, expr in self.assignments.items():\n expr = expr.xreplace(outvar2outvar_c)\n if isinstance(expr, operation.SecondaryOperation):\n expr = expr.doit(eval_sec_ops=True)\n body += f\"\\n\\t{primary_assignment2C_code(var, expr)}\"\n body += \"\\n};\"\n\n return header, body", "def to_PyMethodDef_entry(items):\r\n\r\n entry_type = items[0]\r\n items = items[1:]\r\n if entry_type == 'method':\r\n return 'FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'function':\r\n return 'FREE_FORWARDER(%s, %s, \"%s\", %s)' % items\r\n elif entry_type == 'method_template':\r\n return 'FORWARDER(%s<common_type>, %s, \"%s\", %s)' % items\r\n else:\r\n assert False", "def _fc_function_definitions(self) -> str:\n result = 'extern \"C\" {\\n\\n'\n for namespace in self.namespaces:\n for member in namespace.members:\n result += member.fortran_c_wrapper()\n\n result += '}\\n\\n'\n return result", "def make_wrapper(fname, atypes, rtype, cres):\n fndesc = cres.fndesc\n module = cres.library.create_ir_module(fndesc.unique_name)\n context = cres.target_context\n ll_argtypes = [context.get_value_type(ty) for ty in atypes]\n ll_return_type = context.get_value_type(rtype)\n\n # TODO: design a API for custom wrapping\n if type(rtype).__name__ == 'ArrayPointer':\n wrapty = ir.FunctionType(ir.VoidType(),\n [ll_return_type] + ll_argtypes)\n wrapfn = module.add_function(wrapty, fname)\n builder = ir.IRBuilder(wrapfn.append_basic_block('entry'))\n fnty = context.call_conv.get_function_type(rtype, atypes)\n fn = builder.module.add_function(fnty, cres.fndesc.llvm_func_name)\n status, out = context.call_conv.call_function(\n builder, fn, rtype, atypes, wrapfn.args[1:])\n with cgutils.if_unlikely(builder, status.is_error):\n cgutils.printf(builder,\n f\"rbc: {fname} failed with status code %i\\n\",\n status.code)\n builder.ret_void()\n builder.store(builder.load(out), wrapfn.args[0])\n builder.ret_void()\n else:\n wrapty = ir.FunctionType(ll_return_type, ll_argtypes)\n wrapfn = module.add_function(wrapty, fname)\n builder = ir.IRBuilder(wrapfn.append_basic_block('entry'))\n fnty = context.call_conv.get_function_type(rtype, atypes)\n fn = builder.module.add_function(fnty, cres.fndesc.llvm_func_name)\n status, out = context.call_conv.call_function(\n builder, fn, rtype, atypes, wrapfn.args)\n with cgutils.if_unlikely(builder, status.is_error):\n cgutils.printf(builder,\n f\"rbc: {fname} failed with status code %i\\n\",\n status.code)\n builder.ret(out)\n\n cres.library.add_ir_module(module)", "def fptrunc(self, typ):", "def _deviceVariableFunctionName(self, tree, permitted_prefixes, allow_lengths = True):\n cpp_func_name = \"\"\n py_func = tree.attr\n # extract function name start\n for prefix in permitted_prefixes:\n if py_func.startswith(prefix):\n cpp_func_name = prefix\n py_func = py_func[len(prefix):]\n break # dont allow the else\n else:\n return None\n # check type and lengths\n if allow_lengths:\n #split to get type and Array Length (This could **potentially** be looked up from the model description but current syntax is consistent with swig bindings) \n type_and_length = py_func.split(\"Array\")\n if type_and_length[0] not in self._fgpu_types:\n self.RaiseError(tree, f\"'{type_and_length[0]}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[type_and_length[0]]\n # generate template args\n if (len(type_and_length) == 1):\n cpp_func_name += f\"<{t}>\"\n elif (len(type_and_length) == 2):\n cpp_func_name += f\"<{t}, {type_and_length[1]}>\"\n else:\n return None\n else:\n if py_func not in self._fgpu_types:\n self.RaiseError(tree, f\"'{py_func}' is not a valid FLAME GPU type\")\n t = self._fgpu_types[py_func]\n cpp_func_name += f\"<{t}>\"\n # return \n return cpp_func_name", "def translate_to_c(Newast):\n ast = parse_file('exampleMin.c', use_cpp=True)\n\n ast.show()\n #print(\"newast: \", Newast.ext[0].decl.type.args.params[0].type.type==ast.ext[0].decl.type.args.params[0].type.type)\n #print(\"newast2: \", Newast.ext[0].decl.type.args.params[0].type.type.coord)\n #print(\"ast2: \", ast.ext[0].decl.type.args.params[0].type.type.coord)\n\n #Newast.show()\n \n # print(ast.ext[0].decl.bitsize)\n # print(Newast.ext[0].decl.bitsize)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.coord)\n # print(Newast.ext[0].decl.type.args.coord)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params)\n # print(Newast.ext[0].decl.type.args.params)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0])\n # print(Newast.ext[0].decl.type.args.params[0])\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type)\n # print(Newast.ext[0].decl.type.args.params[0].type)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type.type)\n # print(Newast.ext[0].decl.type.args.params[0].type.type)\n # print(\"----------------------------------\")\n # print(ast.ext[0].decl.type.args.params[0].type.type.names)\n # print(Newast.ext[0].decl.type.args.params[0].type.type.names)\n # print(\"----------------------------------\")\n\n generator = c_generator.CGenerator()\n #ast.show()\n\n # tracing the generator for debugging\n # import trace\n # tr = trace.Trace(countcallers=1)\n # tr.runfunc(generator.visit, Newast)\n # tr.results().write_results()\n\n print(generator.visit(Newast))", "def compile(self, args):\n if args not in self._compileinfos:\n cres = compile_with_dppl(self.py_func, None, args, debug=self.debug)\n func = cres.library.get_function(cres.fndesc.llvm_func_name)\n cres.target_context.mark_ocl_device(func)\n first_definition = not self._compileinfos\n self._compileinfos[args] = cres\n libs = [cres.library]\n\n if first_definition:\n # First definition\n cres.target_context.insert_user_function(self, cres.fndesc,\n libs)\n else:\n cres.target_context.add_user_function(self, cres.fndesc, libs)\n\n else:\n cres = self._compileinfos[args]\n\n return cres.signature", "def adaptCorbaToPython(self, *args):\n return _SALOMERuntime.RuntimeSALOME_adaptCorbaToPython(self, *args)", "def cfunc_type(self):\n tif = ida_typeinf.tinfo_t()\n result = self.get_func_type(tif)\n if not result:\n return\n return tif", "def __call__(fun_name):", "def parse_capi(lines):\n pattern = r'(\\w+)\\s+(\\**)\\s*(\\w+)\\((.*)\\)' # Float32 *sin(...)\n pexcept = r'except (\\??)(.*)'\n\n functions = []\n for line in lines:\n if line.strip():\n m = re.match(pattern, line)\n restype, stars, fname, argtypes = m.groups()\n rest = line[len(m.group(0)):].strip()\n if rest:\n maybe, badval = re.match(pexcept, rest).groups()\n else:\n maybe, badval = None, None\n\n restype = parse_type(\"%s %s\" % (restype, \" \".join(stars)))\n argtypes = map(parse_type, argtypes.split(','))\n signature = Function(restype, argtypes)\n functions.append(Py_Function(fname, signature, maybe, badval))\n\n return functions", "def _compile_C_code(header, body, return_unloaded=False, verbose=False):\n import importlib\n import tempfile\n import uuid\n\n import cffi\n\n module_name = \"module_\" + uuid.uuid4().hex\n\n if \"__uint128\" in header:\n raise ValueError(\"_compile_C_code does not support bit-vector widths \"\n \"larger than 64 bits (cffi does not support __uint128)\")\n\n ffibuilder = cffi.FFI()\n ffibuilder.cdef(header)\n ffibuilder.set_source(module_name, body)\n\n tmpdir = tempfile.TemporaryDirectory()\n lib_path = ffibuilder.compile(tmpdir=tmpdir.name, verbose=verbose)\n\n if return_unloaded:\n return lib_path, module_name, tmpdir\n\n # dynamic import\n # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n spec = importlib.util.spec_from_file_location(module_name, lib_path)\n pymod_parent = importlib.util.module_from_spec(spec)\n # sys.modules[module_name] = module\n spec.loader.exec_module(pymod_parent)\n\n pymod = pymod_parent\n\n return pymod, tmpdir", "def type_cast(func,data_entry,*args):\n assert isinstance(data_entry,str)\n assert callable(func)\n try:\n out=func(data_entry,*args)\n except:\n out=None\n return out", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def _from_c_repr(c_repr):\n # We create a dummy module with a global variable of the requested type,\n # parse that module, and return the type of the global variable.\n # Include stdint.h to recognize the intX_t typedefs.\n module = parse(\"\"\"\n #include <stdint.h>\n\n {} a;\n \"\"\".format(c_repr))\n return module.global_vars['a'].type", "def compile_cutils():\r\n\r\n types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',\r\n 'int256', 'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',\r\n 'float16', 'float32', 'float64', 'float80', 'float96', 'float128',\r\n 'float256']]\r\n\r\n complex_types = ['npy_' + t for t in ['complex32', 'complex64',\r\n 'complex128', 'complex160', 'complex192', 'complex512']]\r\n\r\n inplace_map_template = \"\"\"\r\n #if defined(%(typen)s)\r\n static void %(type)s_inplace_add(PyArrayMapIterObject *mit, PyArrayIterObject *it)\r\n {\r\n int index = mit->size;\r\n while (index--) {\r\n %(op)s\r\n\r\n PyArray_MapIterNext(mit);\r\n PyArray_ITER_NEXT(it);\r\n }\r\n }\r\n #endif\r\n \"\"\"\r\n\r\n floatadd = \"((%(type)s*)mit->dataptr)[0] = ((%(type)s*)mit->dataptr)[0] + ((%(type)s*)it->dataptr)[0];\"\r\n complexadd = \"\"\"\r\n ((%(type)s*)mit->dataptr)[0].real = ((%(type)s*)mit->dataptr)[0].real + ((%(type)s*)it->dataptr)[0].real;\r\n ((%(type)s*)mit->dataptr)[0].imag = ((%(type)s*)mit->dataptr)[0].imag + ((%(type)s*)it->dataptr)[0].imag;\r\n \"\"\"\r\n\r\n fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': floatadd % {'type': t}}\r\n for t in types] +\r\n [inplace_map_template % {'type': t, 'typen': t.upper(),\r\n 'op': complexadd % {'type': t}}\r\n for t in complex_types])\r\n\r\n fn_array = (\"static inplace_map_binop addition_funcs[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(type)s_inplace_add,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"\"\"NULL};\r\n \"\"\")\r\n\r\n type_number_array = (\"static int type_numbers[] = {\" +\r\n ''.join([\"\"\"\r\n #if defined(%(typen)s)\r\n %(typen)s,\r\n #endif\r\n \"\"\" % {'type': t, 'typen': t.upper()}\r\n for t in types + complex_types]) +\r\n \"-1000};\")\r\n\r\n code = (\"\"\"\r\n #include <Python.h>\r\n #include \"numpy/arrayobject.h\"\r\n\r\n extern \"C\"{\r\n static PyObject *\r\n run_cthunk(PyObject *self, PyObject *args)\r\n {\r\n PyObject *py_cthunk = NULL;\r\n if(!PyArg_ParseTuple(args,\"O\",&py_cthunk))\r\n return NULL;\r\n\r\n if (!PyCObject_Check(py_cthunk)) {\r\n PyErr_SetString(PyExc_ValueError,\r\n \"Argument to run_cthunk must be a PyCObject.\");\r\n return NULL;\r\n }\r\n void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);\r\n int (*fn)(void*) = (int (*)(void*))(ptr_addr);\r\n void* it = PyCObject_GetDesc(py_cthunk);\r\n int failure = fn(it);\r\n\r\n return Py_BuildValue(\"i\", failure);\r\n }\r\n\r\n #if NPY_API_VERSION >= 0x00000008\r\n typedef void (*inplace_map_binop)(PyArrayMapIterObject *, PyArrayIterObject *);\r\n \"\"\" + fns + fn_array + type_number_array +\r\n\r\n\"\"\"\r\nstatic int\r\nmap_increment(PyArrayMapIterObject *mit, PyObject *op, inplace_map_binop add_inplace)\r\n{\r\n PyArrayObject *arr = NULL;\r\n PyArrayIterObject *it;\r\n PyArray_Descr *descr;\r\n if (mit->ait == NULL) {\r\n return -1;\r\n }\r\n descr = PyArray_DESCR(mit->ait->ao);\r\n Py_INCREF(descr);\r\n arr = (PyArrayObject *)PyArray_FromAny(op, descr,\r\n 0, 0, NPY_ARRAY_FORCECAST, NULL);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n if ((mit->subspace != NULL) && (mit->consec)) {\r\n PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);\r\n if (arr == NULL) {\r\n return -1;\r\n }\r\n }\r\n it = (PyArrayIterObject*)\r\n PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);\r\n if (it == NULL) {\r\n Py_DECREF(arr);\r\n return -1;\r\n }\r\n\r\n (*add_inplace)(mit, it);\r\n\r\n Py_DECREF(arr);\r\n Py_DECREF(it);\r\n return 0;\r\n}\r\n\r\n\r\nstatic PyObject *\r\ninplace_increment(PyObject *dummy, PyObject *args)\r\n{\r\n PyObject *arg_a = NULL, *index=NULL, *inc=NULL;\r\n PyArrayObject *a;\r\n inplace_map_binop add_inplace = NULL;\r\n int type_number = -1;\r\n int i =0;\r\n PyArrayMapIterObject * mit;\r\n\r\n if (!PyArg_ParseTuple(args, \"OOO\", &arg_a, &index,\r\n &inc)) {\r\n return NULL;\r\n }\r\n if (!PyArray_Check(arg_a)) {\r\n PyErr_SetString(PyExc_ValueError, \"needs an ndarray as first argument\");\r\n return NULL;\r\n }\r\n\r\n a = (PyArrayObject *) arg_a;\r\n\r\n if (PyArray_FailUnlessWriteable(a, \"input/output array\") < 0) {\r\n return NULL;\r\n }\r\n\r\n if (PyArray_NDIM(a) == 0) {\r\n PyErr_SetString(PyExc_IndexError, \"0-d arrays can't be indexed.\");\r\n return NULL;\r\n }\r\n type_number = PyArray_TYPE(a);\r\n\r\n\r\n\r\n while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){\r\n if (type_number == type_numbers[i]) {\r\n add_inplace = addition_funcs[i];\r\n break;\r\n }\r\n i++ ;\r\n }\r\n\r\n if (add_inplace == NULL) {\r\n PyErr_SetString(PyExc_TypeError, \"unsupported type for a\");\r\n return NULL;\r\n }\r\n mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);\r\n if (mit == NULL) {\r\n goto fail;\r\n }\r\n if (map_increment(mit, inc, add_inplace) != 0) {\r\n goto fail;\r\n }\r\n\r\n Py_DECREF(mit);\r\n\r\n Py_INCREF(Py_None);\r\n return Py_None;\r\n\r\nfail:\r\n Py_XDECREF(mit);\r\n\r\n return NULL;\r\n}\r\n #endif\r\n\r\n\r\n static PyMethodDef CutilsExtMethods[] = {\r\n {\"run_cthunk\", run_cthunk, METH_VARARGS|METH_KEYWORDS,\r\n \"Run a theano cthunk.\"},\r\n #if NPY_API_VERSION >= 0x00000008\r\n {\"inplace_increment\", inplace_increment,\r\n METH_VARARGS,\r\n \"increments a numpy array inplace at the passed indexes.\"},\r\n #endif\r\n {NULL, NULL, 0, NULL} /* Sentinel */\r\n };\"\"\")\r\n\r\n if PY3:\r\n # This is not the most efficient code, but it is written this way to\r\n # highlight the changes needed to make 2.x code compile under python 3.\r\n code = code.replace(\"<Python.h>\", '\"numpy/npy_3kcompat.h\"', 1)\r\n code = code.replace(\"PyCObject\", \"NpyCapsule\")\r\n code += \"\"\"\r\n static struct PyModuleDef moduledef = {\r\n PyModuleDef_HEAD_INIT,\r\n \"cutils_ext\",\r\n NULL,\r\n -1,\r\n CutilsExtMethods,\r\n };\r\n\r\n PyMODINIT_FUNC\r\n PyInit_cutils_ext(void) {\r\n import_array();\r\n return PyModule_Create(&moduledef);\r\n }\r\n }\r\n \"\"\"\r\n else:\r\n code += \"\"\"\r\n PyMODINIT_FUNC\r\n initcutils_ext(void)\r\n {\r\n import_array();\r\n (void) Py_InitModule(\"cutils_ext\", CutilsExtMethods);\r\n }\r\n } //extern C\r\n \"\"\"\r\n\r\n loc = os.path.join(config.compiledir, 'cutils_ext')\r\n if not os.path.exists(loc):\r\n os.mkdir(loc)\r\n\r\n args = cmodule.GCC_compiler.compile_args()\r\n cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,\r\n preargs=args)", "def result_as_arg(self, node, C_new):\n F_new = C_new.clone()\n\n # Fortran function should wrap the new C function\n F_new._PTR_F_C_index = C_new._function_index\n F_new.wrap.assign(fortran=True)\n # Do not add '_bufferify'\n F_new.fmtdict.function_suffix = node.fmtdict.function_suffix\n\n # Do not wrap original function (does not have result argument)\n node.wrap.fortran = False\n return F_new", "def itkMultipleValuedCostFunction_cast(*args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_cast(*args)", "def c_code(self, node, name, (x, y), (z, ), sub):\r\n # raise NotImplementedError(\"Unlike Python, C's modulo returns negative\r\n # modulo on negative dividend (to implement)\")\r\n t = node.inputs[0].type.upcast(*[i.type for i in node.inputs[1:]])\r\n if (str(t) in imap(str, discrete_types) or\r\n t in ['uint8', 'int8', 'uint16', 'int16'] or\r\n t in ['uint32', 'int32', 'uint64', 'int64'] or\r\n t in discrete_types):\r\n # The above or's should not be needed anymore. However, for now we\r\n # keep them out of safety, and verify they are useless with an\r\n # assert.\r\n assert str(t) in imap(str, discrete_types)\r\n x_mod_y = \"THEANO_MACRO_MOD(%(x)s, %(y)s)\" % locals()\r\n x_mod_ymm = \"THEANO_MACRO_MOD(-%(x)s, -%(y)s)\" % locals()\r\n x_mod_ypm = \"THEANO_MACRO_MOD(%(x)s, -%(y)s)\" % locals()\r\n x_mod_ymp = \"THEANO_MACRO_MOD(-%(x)s, %(y)s)\" % locals()\r\n elif (str(t) in imap(str, float_types) or\r\n t in ['float32', 'float64'] or\r\n t in float_types):\r\n # The above or's should not be needed anymore. However, for now we\r\n # keep them out of safety, and verify they are useless with an\r\n # assert.\r\n assert str(t) in imap(str, float_types)\r\n x_mod_y = \"fmod(%(x)s,%(y)s)\" % locals()\r\n x_mod_ymm = \"fmod(-%(x)s,-%(y)s)\" % locals()\r\n x_mod_ypm = \"fmod(%(x)s,-%(y)s)\" % locals()\r\n x_mod_ymp = \"fmod(-%(x)s,%(y)s)\" % locals()\r\n elif str(t) in imap(str, complex_types):\r\n raise self.complex_error\r\n else:\r\n raise NotImplementedError('type not supported', t)\r\n\r\n return dedent(\"\"\"\r\n if (%(x)s < 0){\r\n if (%(y)s < 0){\r\n %(z)s = -(%(x_mod_ymm)s);\r\n }else{\r\n %(z)s = - %(x_mod_ymp)s + (%(x_mod_ymp)s != 0 ? %(y)s : 0);\r\n }\r\n }else if (%(y)s < 0){\r\n %(z)s = (%(x_mod_ypm)s) + (%(x_mod_ypm)s != 0 ? %(y)s : 0);\r\n }else{\r\n %(z)s = %(x_mod_y)s;\r\n }\r\n \"\"\") % locals()", "def cppdef(src):\n with _stderr_capture() as err:\n errcode = gbl.gInterpreter.Declare(src)\n if not errcode:\n raise SyntaxError('Failed to parse the given C++ code%s' % err.err)\n return True", "def WrapFunction(lib, funcname, restype, argtypes):\n func = lib.__getattr__(funcname)\n func.restype = restype\n func.argtypes = argtypes\n return func", "def make_module_hook(self):\n res = \\\n\"\"\"{fname} = shared_object.{fname}\n {fname}.restype = POINTER({structname})\n {varname} = {fname}()\n\n\"\"\"\n fragments ={\n \"varname\": self._namespace_mangle(self.namespace) + \"_plugin\",\n \"fname\": \"___madz_LANG_python_get_out_struct\" if self.namespace == \"\" else \"___madz_LANG_python_get_\"+self._namespace_mangle(self.namespace) + \"_struct\",\n \"structname\": self.python_madz_types + (\"OUTSTRUCT\" if self.namespace == \"\" else self._namespace_mangle(self.namespace))\n }\n\n return res.format(**fragments)", "def to_py_name(cpp_name, entry_type):\r\n if entry_type == 'function':\r\n return cpp_name\r\n first_underscore = cpp_name.find('_')\r\n assert(first_underscore != -1)\r\n return cpp_name[first_underscore + 1:]", "def cython_py2c(self, name, t, inst_name=None, proxy_name=None):\n t = self.canon(t)\n if isinstance(t, basestring) or 0 == t[-1] or self.isrefinement(t[-1]):\n last = ''\n elif isinstance(t[-1], int):\n last = ' [{0}]'.format(t[-1])\n else:\n last = ' ' + t[-1]\n tkey = t\n tinst = None\n while tkey not in self.cython_py2c_conv and not isinstance(tkey, basestring):\n tinst = tkey\n tkey = tkey[1] if (0 < len(tkey) and self.isrefinement(tkey[1])) else tkey[0]\n if tkey not in self.cython_py2c_conv:\n tkey = t\n while tkey not in self.cython_py2c_conv and \\\n not isinstance(tkey, basestring):\n tkey = tkey[0]\n py2ct = self.cython_py2c_conv[tkey]\n if callable(py2ct):\n self.cython_py2c_conv[t] = py2ct(t, self)\n py2ct = self.cython_py2c_conv[t]\n if py2ct is NotImplemented or py2ct is None:\n raise NotImplementedError('conversion from Python to C/C++ for ' + \\\n str(t) + ' has not been implemented.')\n body_template, rtn_template = py2ct\n var = name if inst_name is None else \"{0}.{1}\".format(inst_name, name)\n proxy_name = \"{0}_proxy\".format(name) if proxy_name is None else proxy_name\n tstr = self.typestr(t, self)\n template_kw = dict(var=var, proxy_name=proxy_name, last=last, t=tstr)\n nested = False\n if self.isdependent(tkey):\n tsig = [ts for ts in self.refined_types if ts[0] == tkey][0]\n for ts, ti in zip(tsig[1:], tinst[1:]):\n if isinstance(ts, basestring):\n template_kw[ts] = self.cython_ctype(ti)\n else:\n template_kw[ti[0]] = ti[2]\n vartype = self.refined_types[tsig]\n if vartype in tsig[1:]:\n vartype = tinst[tsig.index(vartype)][1]\n if self.isrefinement(vartype):\n nested = True\n vdecl, vbody, vrtn = self.cython_py2c(var, vartype)\n template_kw['var'] = vrtn\n body_filled = body_template.format(**template_kw)\n if rtn_template:\n if '{t.cython_ctype}'in body_template:\n deft = tstr.cython_ctype\n elif '{t.cython_ctype_nopred}'in body_template:\n deft = tstr.cython_ctype_nopred\n elif '{t.cython_cytype_nopred}'in body_template:\n deft = tstr.cython_cytype_nopred\n else:\n deft = tstr.cython_cytype\n decl = \"cdef {0} {1}\".format(deft, proxy_name)\n body = body_filled\n rtn = rtn_template.format(**template_kw)\n decl += '\\n'+\"\\n\".join([l for l in body.splitlines() \\\n if l.startswith('cdef')])\n body = \"\\n\".join([l for l in body.splitlines() \\\n if not l.startswith('cdef')])\n else:\n decl = body = None\n rtn = body_filled\n if nested:\n decl = '' if decl is None else decl\n vdecl = '' if vdecl is None else vdecl\n decl = (vdecl + '\\n' + decl).strip()\n decl = None if 0 == len(decl) else decl\n body = '' if body is None else body\n vbody = '' if vbody is None else vbody\n body = (vbody + '\\n' + body).strip()\n body = None if 0 == len(body) else body\n return decl, body, rtn", "def get_func(name, argtypes=None, restype=c_int, lib=libDE):\n logger.debug(\"Getting NewWordFinder API function: 'name': '{}', 'argtypes': '{}',\"\n \" 'restype': '{}'.\".format(name, argtypes, restype))\n func = getattr(lib, name)\n if argtypes is not None:\n func.argtypes = argtypes\n if restype is not c_int:\n func.restype = restype\n logger.debug(\"NewWordFinder API function '{}' retrieved.\".format(name))\n return func", "def cast(*args):\n return _ITKCostFunctionsPython.itkCumulativeGaussianCostFunction_cast(*args)", "def test_vulkan_func_pointer_with_const_member() -> None:\n\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <type category=\"funcpointer\">typedef VkBool32 (VKAPI_PTR *\n <name>PFN_vkDebugReportCallbackEXT</name>)(\n <type>VkDebugReportFlagsEXT</type> flags,\n <type>VkDebugReportObjectTypeEXT</type> objectType,\n <type>uint64_t</type> object,\n <type>size_t</type> location,\n <type>int32_t</type> messageCode,\n const <type>char</type>* pLayerPrefix,\n const <type>char</type>* pMessage,\n <type>void</type>* pUserData);</type>\n \"\"\"\n\n funcptr = funcptr_parser.parse(ET.fromstring(xml))\n\n argument_names = list(funcptr.arguments.keys())\n assert argument_names[4] == \"messageCode\"\n assert funcptr.arguments[\"pLayerPrefix\"].argument_type == \"const char*\"", "def _init_signature(func_name, restype, argtypes):\n global cfi\n f = getattr(cfi, func_name)\n f.restype = restype\n f.argtypes = argtypes", "def libSetup(path):\n lib = CDLL(path)\n lib.visitPoints.argtypes = [c_int, c_int, c_char_p]\n lib.visitPoints.restype = c_int\n return lib", "def extern(fn):\n return builtin(fn)", "def function(fnc, *args, **kwargs):\n return Function(fnc, args=args, kwargs=kwargs).tunable()", "def generate_code(spn_id, spn, meta_types, floating_data_type):\r\n\r\n # make sure we have ids\r\n assign_ids(spn)\r\n\r\n # fill method body according to SPN structure\r\n method_body = generate_method_body(spn, spn, floating_data_type, 0)\r\n\r\n # build parameters used in generated c++ function\r\n method_params = []\r\n passed_params = []\r\n for i, type in enumerate(meta_types):\r\n if type == MetaType.DISCRETE:\r\n method_params += [f'vector <int> possibleValues{i}', f'int nullValueIdx{i}']\r\n passed_params += [f'py::arg(\"possibleValues{i}\")', f'py::arg(\"nullValueIdx{i}\")']\r\n elif type == MetaType.REAL:\r\n method_params += [f'bool inverse{i}', f'bool leftMinusInf{i}', f'float leftCondition{i}',\r\n f'bool rightMinusInf{i}', f'float rightCondition{i}', f'bool leftIncluded{i}',\r\n f'bool rightIncluded{i}', f'float nullValue{i}']\r\n passed_params += [f'py::arg(\"inverse{i}\")', f'py::arg(\"leftMinusInf{i}\")', f'py::arg(\"leftCondition{i}\")',\r\n f'py::arg(\"rightMinusInf{i}\")', f'py::arg(\"rightCondition{i}\")',\r\n f'py::arg(\"leftIncluded{i}\")', f'py::arg(\"rightIncluded{i}\")', f'py::arg(\"nullValue{i}\")']\r\n\r\n value_dictionary = {\r\n 'spn_id': spn_id,\r\n 'method_body': method_body,\r\n 'method_params': ', '.join(method_params),\r\n 'node_count': get_number_of_nodes(spn),\r\n 'passed_params': ', '.join(passed_params),\r\n 'floating_data_type': floating_data_type\r\n }\r\n generated_method = replace_template(TemplatePath.METHOD_MASTER, value_dictionary, 0)\r\n registrate_method = replace_template(TemplatePath.REGISTRATION_MASTER, value_dictionary, 0)\r\n\r\n return generated_method, registrate_method", "def map_string2func(funcname, clss, compute_capability):\n if \"_get_\" + funcname not in globals():\n raise AttributeError(\"kernel type '\" + funcname + \"' not understood\")\n return globals()[\"_get_\" + funcname](clss, compute_capability)", "def c_code(self, node, name, (a_val, a_ind, a_ptr, b), (z,), sub):\r\n # retrieve dtype number\r\n typenum_z = tensor.TensorType(self.dtype_out, []).dtype_specs()[2]\r\n if node.inputs[0].type.dtype in ('complex64', 'complex128'):\r\n raise NotImplementedError('Complex types are not supported for a_val')\r\n if node.inputs[3].type.dtype in ('complex64', 'complex128'):\r\n raise NotImplementedError('Complex types are not supported for b')\r\n\r\n return \"\"\"\r\n if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_val) != 1\"); %(fail)s;}\r\n if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ind) != 1\"); %(fail)s;}\r\n if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ptr) != 1\"); %(fail)s;}\r\n if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 2\"); %(fail)s;}\r\n\r\n if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {\r\n PyErr_SetString(PyExc_NotImplementedError, \"a_ind dtype not INT32\"); %(fail)s;}\r\n\r\n if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)\r\n {PyErr_SetString(PyExc_NotImplementedError, \"a_ptr dtype not INT32\"); %(fail)s;}\r\n\r\n if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])\r\n {PyErr_SetString(PyExc_NotImplementedError, \"a_val and a_ind have different lengths\"); %(fail)s;}\r\n\r\n if ((!%(z)s)\r\n || (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(a_ptr)s)[0]-1) //a's rows\r\n || (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1]) //b's columns\r\n )\r\n {\r\n {Py_XDECREF(%(z)s);}\r\n npy_intp dims[] = {0, 0};\r\n dims[0] = PyArray_DIMS(%(a_ptr)s)[0]-1;\r\n dims[1] = PyArray_DIMS(%(b)s)[1];\r\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_z)s);\r\n }\r\n\r\n {\r\n // sparse array has size MxK, dense KxN, output MxN\r\n npy_intp M = PyArray_DIMS(%(z)s)[0];\r\n npy_intp N = PyArray_DIMS(%(z)s)[1];\r\n npy_intp K = PyArray_DIMS(%(b)s)[0];\r\n\r\n // strides tell you how many bytes to skip to go to next column/row entry\r\n npy_intp Szm = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;\r\n npy_intp Szn = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;\r\n npy_intp Sbm = PyArray_STRIDES(%(b)s)[0] / PyArray_DESCR(%(b)s)->elsize;\r\n npy_intp Sbn = PyArray_STRIDES(%(b)s)[1] / PyArray_DESCR(%(b)s)->elsize;\r\n npy_intp Sval = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;\r\n npy_intp Sind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;\r\n npy_intp Sptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;\r\n\r\n // pointers to access actual data in the arrays passed as params.\r\n dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);\r\n const dtype_%(a_val)s* __restrict__ Dval = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);\r\n const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(a_ind)s);\r\n const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);\r\n\r\n //npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];\r\n\r\n //clear the output array\r\n memset(Dz, 0, M*N*sizeof(dtype_%(z)s));\r\n\r\n //iterate over the sparse array, making the most of an entry wherever we find it.\r\n // Normal matrix matrix multiply:\r\n // for m\r\n // for n\r\n // for k\r\n // z[m, n] += a[m, k] * b[k, n]\r\n // Here instead:\r\n // for m\r\n // for k (sparse)\r\n // for n\r\n // z[m, n] += a[m, k] * b[k, n]\r\n\r\n // loop over inner dimension\r\n for (npy_int64 m = 0; m < M; ++m)\r\n {\r\n // pointer to m-th row of the output matrix Z\r\n dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(PyArray_BYTES(%(z)s) + PyArray_STRIDES(%(z)s)[0] * m);\r\n\r\n // loop over sparse rows indices through index pointer array\r\n // (amounts to looping over cols k of sparse matrix)\r\n for (npy_int32 k_idx = Dptr[m * Sptr]; k_idx < Dptr[(m+1) * Sptr]; ++k_idx)\r\n {\r\n npy_int32 k = Dind[k_idx * Sind]; // col index of non-null value for row m\r\n const dtype_%(a_val)s Amk = Dval[k_idx * Sval]; // actual value at that location\r\n\r\n // get pointer to k-th row of dense matrix\r\n const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(PyArray_BYTES(%(b)s) + PyArray_STRIDES(%(b)s)[0] * k);\r\n\r\n // loop over final dimension (cols of dense matrix) and perform dot product\r\n for(npy_int32 n = 0; n < N; ++n)\r\n {\r\n zm[n*Szn] += Amk * bk[n*Sbn];\r\n }\r\n }\r\n }\r\n }\r\n\r\n \"\"\" % dict(locals(), **sub)", "def convert_function(self, access_modifier, return_type, func_name, params):\n\n # Run super func_name\n access_modifier, return_type, func_name, params = \\\n super().convert_function(access_modifier, return_type,\n func_name, params)\n\n # Make and return processed function definition\n return [self.make_function_definition(return_type, func_name, params)], []", "def _cast_strlist_to_C(py_strlist):\n c_strarr = (str_t * len(py_strlist))()\n c_strarr[:] = py_strlist\n return c_strarr", "def convert(self):\n\t\tself.make_func_dict() #sets self.func_dict\n\t\tself.make_main_function() #sets self.main\n\t\tself.remove_lambda_nesting()\n\t\tself.replace_self_with_func_names()\n\t\tself.make_func_declarations() #sets self.cpp_declarations\n\t\tself.make_func_bodies() #sets self.cpp_func_bodies\t\t\n\t\tself.make_cpp_func_bodies()\n\t\tlines = []\n\t\tlines.append('#include \"lithp.hpp\"')\n\t\tfor name, signature in self.cpp_declarations.iteritems():\n\t\t\tlines.append(signature + ';')\n\n\t\tfor name, signature in self.cpp_declarations.iteritems():\n\t\t\tif name == 'main': continue\n\t\t\tlines.append(signature + '{')\n\t\t\tlines.append(' return ' + self.cpp_func_bodies[name] + ';\\n}')\n\t\tlines.append(\n\"\"\"\nint main(){\n %s;\n return 0;\n}\n\"\"\" % self.cpp_func_bodies['main'])\n\t\tself.converted = '\\n'.join(lines)\t\t\n\t\treturn self.converted", "def _create_function(self, expr):\n bb_entry = self.fn.append_basic_block('entry')\n builder = ll.IRBuilder(bb_entry)\n\n lj = LLVMJitCallbackPrinter(self.module, builder, self.fn,\n func_arg_map=self.param_dict)\n\n ret = self._convert_expr(lj, expr)\n\n if self.signature.ret_arg:\n output_fp_ptr = builder.bitcast(self.fn.args[self.signature.ret_arg],\n ll.PointerType(self.fp_type))\n for i, val in enumerate(ret):\n index = ll.Constant(ll.IntType(32), i)\n output_array_ptr = builder.gep(output_fp_ptr, [index])\n builder.store(val, output_array_ptr)\n builder.ret(ll.Constant(ll.IntType(32), 0)) # return success\n else:\n lj.builder.ret(self._wrap_return(lj, ret))\n\n strmod = str(self.module)\n return strmod", "def create_typedef(*args):\n return _ida_hexrays.create_typedef(*args)", "def make_func_declarations(self):\n\n\t\tfor name in self.func_dict:\n\t\t\tbody = Lexer(self.func_dict[name]).get_tokens()\n\t\t\ti = body.index('\\\\') + 1 #Start of parameters\n\t\t\tj = body.match_paren(i)\n\t\t\tparam_tokens = body[i + 1: j] #Stuff inside parentheses\n\t\t\t#\t\t\tprint \"param list:\", param_tokens\n\n\t\t\tparams = self.split_params(param_tokens)\n\t\t\tparams = map(lambda n: n.split(':'), params)\n\t\t\t#params is now [[<name>,<type>],...]\n\t\t\tc_types = map(lambda n: self.convert_type(*n), params)\n\t\t\t#\t\t\tprint c_types\n\n\t\t\treturn_type = ''\n\t\t\t# +2 to skip over \")\" and \":\"\n\t\t\tif body[j+2] == '(': #Function returns another function\n\t\t\t\t# +3 for [\")\",\"->\",\"<type>\"]\n\t\t\t\tfor x in xrange(j+2, body.match_paren(j+2)+3):\n\t\t\t\t\treturn_type += body[x]\n\t\t\telse: #Function returns a concrete type\n\t\t\t\treturn_type = body[j+2] #+2 to skip over \")\" and \":\"\n\n\t\t\tfunc_type = self.convert_type(name, return_type)\n\t\t\t#\t\t\tprint \"params\", params\n\t\t\t#\t\t\tprint \"c_types\", c_types\n\t\t\t#while True:exec raw_input() in globals(), locals()\n\t\t\tself.cpp_declarations[name] = func_type + '(' + ', '.join(c_types) + ')'\n\n\t\tself.cpp_declarations['main'] = 'int main()' #actually this isn't used", "def SBMLFunctionDefinitionConverter_init():\n return _libsbml.SBMLFunctionDefinitionConverter_init()", "def __init__(self, name, c_arg):\n super().__init__(name)\n self._c_arg = c_arg", "def load_c_functions(self):\n\n # Load shared object\n lib = ctypes.cdll.LoadLibrary(os.path.join(self.working_directory,\"models/doubly_constrained/flow_forward_models.so\"))\n lib2 = ctypes.cdll.LoadLibrary(os.path.join(self.working_directory,\"models/doubly_constrained/potential_function.so\"))\n\n # Load DSF procedure flow inference\n self.infer_flows_dsf_procedure = lib.infer_flows_dsf_procedure\n self.infer_flows_dsf_procedure.restype = ctypes.c_double\n self.infer_flows_dsf_procedure.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_size_t,\n ctypes.c_bool,\n ctypes.c_bool]\n\n\n # Load Newton Raphson procedure flow inference\n self.infer_flows_newton_raphson = lib.infer_flows_newton_raphson\n self.infer_flows_newton_raphson.restype = None #ctypes.c_double\n self.infer_flows_newton_raphson.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_double,\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_size_t,\n ctypes.c_size_t]\n\n # Load Iterative proportional filtering procedure flow inference\n self.infer_flows_ipf_procedure = lib.infer_flows_ipf_procedure\n self.infer_flows_ipf_procedure.restype = ctypes.c_double\n self.infer_flows_ipf_procedure.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_bool]\n\n # Load Iterative proportional filtering procedure flow inference\n self.infer_flows_ipf_procedure_singly = lib.infer_flows_ipf_procedure_singly\n self.infer_flows_ipf_procedure_singly.restype = ctypes.c_double\n self.infer_flows_ipf_procedure_singly.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t,\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_double,\n ctypes.c_bool]\n\n # Load potential function\n self.potential_stochastic = lib2.potential_stochastic\n self.potential_stochastic.restype = ctypes.c_double\n self.potential_stochastic.argtypes = [ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ndpointer(ctypes.c_double, flags=\"C_CONTIGUOUS\"),\n ctypes.c_size_t,\n ctypes.c_size_t]", "def generate_prototype(self):\n apientry = \"\"\n if self.__name[:2] == \"gl\":\n apientry = \"DNLOAD_APIENTRY \"\n params = \"void\"\n if self.__parameters:\n params = \", \".join(self.__parameters)\n return \"(%s (%s*)(%s))\" % (self.__returntype, apientry, params)", "def CALL(name, *args):\r\n funcname = 'is_' + name\r\n func = getattr(libueye, funcname)\r\n new_args = []\r\n for a in args: \r\n if isinstance (a, unicode):\r\n print name, 'argument',a, 'is unicode'\r\n new_args.append (str (a))\r\n else:\r\n new_args.append (a)\r\n return func(*new_args)", "def __init__(self, compiler_module, function_name, type_signature):\n py_typecheck.check_type(compiler_module,\n iree_compiler.binding.CompilerModule)\n py_typecheck.check_type(function_name, str)\n py_typecheck.check_type(type_signature, computation_types.FunctionType)\n self._compiler_module = compiler_module\n self._function_name = function_name\n self._type_signature = type_signature", "def fsig(\n arg_types: ArgTypes, name: Text, span: Span, ctx: DeduceCtx,\n parametric_bindings: Optional[ParametricBindings]\n) -> Tuple[ConcreteType, SymbolicBindings]:\n logging.vlog(5, 'Instantiating for builtin %r @ %s', name, span)\n _Checker(arg_types, name, span).len(2).is_array(0).is_fn(1, argc=1)\n t = arg_types[0].get_element_type() # pytype: disable=attribute-error\n u, symbolic_bindings = parametric_instantiator.instantiate_function(\n span, arg_types[1], (t,), ctx, parametric_bindings, {})\n return_type = ArrayType(u, arg_types[0].size) # pytype: disable=attribute-error\n return FunctionType(arg_types, return_type), symbolic_bindings", "def get_pyos_inputhook_as_func(self):\n return self.PYFUNC.in_dll(ctypes.pythonapi,\"PyOS_InputHook\")", "def transform_npu_function(self, _, func: relay.Function) -> relay.Function:\n\n tir_mod, const_dict = _lower_to_tir(func, self.scheduler)\n\n for param in const_dict.keys():\n const_dict[param] = tvm.nd.array(const_dict[param])\n\n compiler_name = \"ethos-u\"\n primfunc = tir_mod[\"main\"]\n primfunc = primfunc.with_attr(\"global_symbol\", func.attrs[\"global_symbol\"])\n primfunc = primfunc.with_attr(\"ethos-u.constants\", const_dict)\n primfunc = primfunc.with_attr(\"target\", tvm.target.Target(compiler_name))\n return primfunc", "def convert_to_user_call(*args):\n return _ida_hexrays.convert_to_user_call(*args)", "def t_CCONST(t):\n return t", "def make_function_callbacks(self):\n res = \"\"\n for node in self.description.definitions():\n if isinstance(node.type.get_type(), pdl.TypeFunction):\n frags={\n \"name\": node.name,\n \"nameupper\": self.python_madz_deftypes + \"___\" + node.name,\n \"sanitize\": \"_sanitize_python_callback\" if isinstance(node.type.return_type.get_type(), pdl.TypePointer) else \"_python_callback\"\n }\n res += \\\n\"\"\"\n temp = cast({sanitize}(user_code_module.{name}, {nameupper}), {nameupper})\n keepers['{nameupper}'] = temp\n _plugin.contents.{name} = temp\n\"\"\".format(**frags)\n return res", "def _loadPyc(self, vfile, timestamp):\n code = None\n f = open(vfile, 'rb')\n if f.read(4) == imp.get_magic():\n t = struct.unpack('<I', f.read(4))[0]\n if not timestamp or t == timestamp:\n code = marshal.loads(f.read())\n f.close()\n return code", "def clips_to_py_type(ctype):\n\n ptype = None\n if ctype == \"INTEGER\":\n ptype = int\n elif ctype == \"FLOAT\":\n ptype = float\n elif ctype == \"STRING\":\n ptype = str\n elif ctype == \"BOOLEAN\":\n ptype = bool\n return ptype", "def cython_functionname(self, t, cycyt=None):\n if cycyt is None:\n t = self.canon(t)\n if isinstance(t, basestring):\n return t, self.cython_functionnames[t]\n elif t[0] in self.base_types:\n return t, self.cython_functionnames[t[0]]\n return self.cython_functionname(t, self.cython_functionnames[t[0]])\n d = {}\n for key, x in zip(self.template_types[t[0]], t[1:-1]):\n if isinstance(x, basestring):\n val = self.cython_functionnames[x] if x in self.cython_functionnames \\\n else x\n elif isinstance(x, Number):\n val = str(x).replace('-', 'Neg').replace('+', 'Pos')\\\n .replace('.', 'point')\n elif x[0] in self.base_types:\n val = self.cython_functionnames[x[0]]\n else:\n _, val = self.cython_functionname(x, self.cython_functionnames[x[0]])\n d[key] = val\n return t, cycyt.format(**d)", "def Cpp_test():\n pass", "def scalar_object_check(py_object, c_object):\n\n try :\n check_type = check_type_registry[c_object.dtype, c_object.precision]\n except KeyError:\n errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal')\n\n check_func = FunctionDef(name = check_type,\n body = [],\n arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)],\n results = [Variable(dtype=NativeBool(), name = 'r')])\n\n return FunctionCall(check_func, [py_object])" ]
[ "0.72192234", "0.68052465", "0.6632314", "0.64130855", "0.6358928", "0.63258225", "0.6155145", "0.6139662", "0.60484755", "0.60477465", "0.60254824", "0.6015706", "0.59863913", "0.59403557", "0.58158", "0.57880235", "0.57846373", "0.5784282", "0.5761604", "0.57465273", "0.5739184", "0.5732361", "0.5699242", "0.5689414", "0.56871146", "0.56743395", "0.5621684", "0.5606441", "0.557284", "0.5560054", "0.5550692", "0.5548199", "0.5538608", "0.5509883", "0.5496695", "0.54882526", "0.54749715", "0.5469637", "0.54647005", "0.5453082", "0.5449594", "0.54483104", "0.54423153", "0.54373425", "0.54047936", "0.5397263", "0.53869534", "0.53856236", "0.5378772", "0.5376972", "0.5367715", "0.53665125", "0.5364052", "0.5361049", "0.53313553", "0.5313965", "0.5313965", "0.53136986", "0.53134155", "0.5308007", "0.5289526", "0.5276639", "0.5273215", "0.526558", "0.526499", "0.5237518", "0.522835", "0.5228044", "0.52214754", "0.5217716", "0.5199025", "0.5196456", "0.51826125", "0.5177866", "0.51707673", "0.5170285", "0.5164487", "0.5157318", "0.51536155", "0.51445454", "0.5130723", "0.512936", "0.51204073", "0.5117358", "0.5107843", "0.50997865", "0.50909173", "0.5090212", "0.50864625", "0.5073718", "0.50734407", "0.5069779", "0.50576967", "0.50516623", "0.5028645", "0.50063294", "0.50043553", "0.4987479", "0.4983432", "0.4975385" ]
0.69464856
1
Generate function Call of c/python api PyErr_SetString
def PyErr_SetString(exception, message): func = FunctionDef(name = 'PyErr_SetString', body = [], arguments = [Variable(dtype = PyccelPyObject(), name = 'o'), Variable(dtype = NativeString(), name = 's')], results = []) exception = Variable(PyccelPyObject(), name = exception) return FunctionCall(func, [exception, message])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def failure_code(sub):\r\n return '''{\r\n %(failure_var)s = %(id)s;\r\n if (!PyErr_Occurred()) {\r\n PyErr_SetString(PyExc_RuntimeError,\r\n \"Unexpected error in an Op's C code. \"\r\n \"No Python exception was set.\");\r\n }\r\n goto __label_%(id)i;}''' % sub", "def SPYExceptionHandler(*excargs, **exckwargs):\n\n # Depending on the number of input arguments, we're either in Jupyter/iPython\n # or \"regular\" Python - this matters for coloring error messages\n if len(excargs) == 3:\n isipy = False\n etype, evalue, etb = excargs\n else:\n etype, evalue, etb = sys.exc_info()\n try: # careful: if iPython is used to launch a script, ``get_ipython`` is not defined\n ipy = get_ipython()\n isipy = True\n cols = ipy.InteractiveTB.Colors\n cols.filename = cols.filenameEm\n cols.bold = ansiBold\n sys.last_traceback = etb # smartify ``sys``\n except NameError:\n isipy = False\n\n # Pass ``KeyboardInterrupt`` on to regular excepthook so that CTRL + C\n # can still be used to abort program execution (only relevant in \"regular\"\n # Python prompts)\n if issubclass(etype, KeyboardInterrupt) and not isipy:\n sys.__excepthook__(etype, evalue, etb)\n return\n\n # Starty by putting together first line of error message\n emsg = \"{}\\nSyNCoPy encountered an error in{} \\n\\n\".format(cols.topline if isipy else \"\",\n cols.Normal if isipy else \"\")\n\n # If we're dealing with a `SyntaxError`, show it and getta outta here\n if issubclass(etype, SyntaxError):\n\n # Just format exception, don't mess around w/ traceback\n exc_fmt = traceback.format_exception_only(etype, evalue)\n for eline in exc_fmt:\n if \"File\" in eline:\n eline = eline.split(\"File \")[1]\n fname, lineno = eline.split(\", line \")\n emsg += \"{}{}{}\".format(cols.filename if isipy else \"\",\n fname,\n cols.Normal if isipy else \"\")\n emsg += \", line {}{}{}\".format(cols.lineno if isipy else \"\",\n lineno,\n cols.Normal if isipy else \"\")\n elif \"SyntaxError\" in eline:\n smsg = eline.split(\"SyntaxError: \")[1]\n emsg += \"{}{}SyntaxError{}: {}{}{}\".format(cols.excName if isipy else \"\",\n cols.bold if isipy else \"\",\n cols.Normal if isipy else \"\",\n cols.bold if isipy else \"\",\n smsg,\n cols.Normal if isipy else \"\")\n else:\n emsg += \"{}{}{}\".format(cols.line if isipy else \"\",\n eline,\n cols.Normal if isipy else \"\")\n\n # Show generated message and leave (or kick-off debugging in Jupyer/iPython if %pdb is on)\n logger = get_parallel_logger()\n logger.critical(emsg)\n if isipy:\n if ipy.call_pdb:\n ipy.InteractiveTB.debugger()\n return\n\n # Build an ordered(!) dictionary that encodes separators for traceback components\n sep = OrderedDict({\"filename\": \", line \",\n \"lineno\": \" in \",\n \"name\": \"\\n\\t\",\n \"line\": \"\\n\"})\n\n # Find \"root\" of traceback tree (and remove outer-most frames)\n keepgoing = True\n while keepgoing:\n frame = traceback.extract_tb(etb)[0]\n etb = etb.tb_next\n if frame.filename.find(\"site-packages\") < 0 or \\\n (frame.filename.find(\"site-packages\") >= 0 and \\\n frame.filename.find(\"syncopy\") >= 0):\n tb_entry = \"\"\n for attr in sep.keys():\n tb_entry += \"{}{}{}{}\".format(getattr(cols, attr) if isipy else \"\",\n getattr(frame, attr),\n cols.Normal if isipy else \"\",\n sep.get(attr))\n emsg += tb_entry\n keepgoing = False\n\n # Format the exception-part of the traceback - the resulting list usually\n # contains only a single string - if we find more just use everything\n exc_fmt = traceback.format_exception_only(etype, evalue)\n if len(exc_fmt) == 1:\n exc_msg = exc_fmt[0]\n idx = exc_msg.rfind(etype.__name__)\n if idx >= 0:\n exc_msg = exc_msg[idx + len(etype.__name__):]\n exc_name = \"{}{}{}{}\".format(cols.excName if isipy else \"\",\n cols.bold if isipy else \"\",\n etype.__name__,\n cols.Normal if isipy else \"\")\n else:\n exc_msg = \"\".join(exc_fmt)\n exc_name = \"\"\n\n # Now go through traceback and put together a list of strings for printing\n if __tbcount__ and etb is not None:\n emsg += \"\\n\" + \"-\"*80 + \"\\nAbbreviated traceback:\\n\\n\"\n tb_count = 0\n tb_list = []\n for frame in traceback.extract_tb(etb):\n if frame.filename.find(\"site-packages\") < 0 or \\\n (frame.filename.find(\"site-packages\") >= 0 and \\\n frame.filename.find(\"syncopy\") >= 0):\n tb_entry = \"\"\n for attr in sep.keys():\n tb_entry += \"{}{}{}{}\".format(\"\", # placeholder for color if wanted\n getattr(frame, attr),\n \"\", # placeholder for color if wanted\n sep.get(attr))\n tb_list.append(tb_entry)\n tb_count += 1\n if tb_count == __tbcount__:\n break\n emsg += \"\".join(tb_list)\n\n # Finally, another info message\n if etb is not None:\n emsg += \"\\nUse `import traceback; import sys; traceback.print_tb(sys.last_traceback)` \" + \\\n \"for full error traceback.\\n\"\n\n # Glue actual Exception name + message to output string\n emsg += \"{}{}{}{}{}\".format(\"\\n\" if isipy else \"\",\n exc_name,\n cols.bold if isipy else \"\",\n exc_msg,\n cols.Normal if isipy else \"\",)\n\n\n # Show generated message and get outta here\n logger = get_parallel_logger()\n logger.critical(emsg)\n\n # Kick-start debugging in case %pdb is enabled in Jupyter/iPython\n if isipy:\n if ipy.call_pdb:\n ipy.InteractiveTB.debugger()", "def format_exc():\n from traceback import format_exc\n return format_exc().decode('utf-8', 'surrogateescape')", "def getCompilerError():", "def ErrorString(self): # real signature unknown; restored from __doc__\n pass", "def transformErr2Str(self,*args):\n error_code = c_int32(args[0])\n error_str = create_string_buffer(\"\\000\"*1024)\n status = self.__acqiris_QuantroDLL1.transformErr2Str(self.__instrumentID,error_code,error_str) \n return str(error_str)", "def traceback(self):", "def stack_trace(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:", "def exc_info(): # real signature unknown; restored from __doc__\n pass", "def test_friendly_exception_formatting_exc_with_str_overload():\n ex = InsufficientSignatures(1, 3)\n\n formatted_exception = friendlyEx(ex)\n\n assert formatted_exception == '{}'.format(ex.reason)", "def exception(self):\n exc_type, exc_value, exc_tb = sys.exc_info()\n cui.message(traceback.format_exception_only(exc_type, exc_value)[-1],\n log_message=traceback.format_exc())", "def _handle_error(self, err: ctypes.c_char_p, method: str) -> Exception:\n if err:\n string = ctypes.string_at(err).decode(\"utf-8\")\n self._free_error(err)\n return RuntimeError(string)\n else:\n return RuntimeError(f\"Unknown error in {method}. \")", "def fancy_traceback(exc: Exception) -> str:\n text = \"\".join(traceback.format_exception(type(exc), exc, exc.__traceback__))\n return f\"```py\\n{text[-4086:]}\\n```\"", "def _FormatException(exc):\n return ''.join(traceback.format_exception_only(type(exc), exc))", "def test_does_not_crash(self):\n py_function(6)", "def repr_failure(self, excinfo):\n if excinfo.errisinstance(MypyError):\n return excinfo.value.args[0]\n return super().repr_failure(excinfo)", "def src_strerror(error):\n return ffi.string(_lib.src_strerror(error)).decode()", "def __ex(exception_string, internal=False):\n ex = str(exception_string).strip()\n while \" \" * 2 in ex:\n ex = ex.replace((\" \" * 2), \" \")\n if internal:\n ex = \"PaVal: \" + ex\n raise Exception(ex)", "def py_raise(*xs):\n raise NotImplemented", "def StandViz_ReportError( errorobj, args, Header = None ): # error reporting and traceback function\n (MyPath, MyFile) = os.path.split( args[0] ) # retrieve filename and path of running python script\n (MyBaseName, MyExt) = os.path.splitext( MyFile ) # separate basefilename from extension\n errorfilename = \"{}.txt\".format(MyBaseName) # create new error filename based on base of script filename\n ERRFILE = open( errorfilename, 'w' ) # open text file for writting\n if( Header != None ): ERRFILE.write( '%s\\n' % Header ) # if Header defined, write Header to file\n ERRFILE.write( \"Error running '{}'\\n\".format(MyFile) ) # write error message with filename\n MyTrace = errorobj[2] # retrieve error object\n while( MyTrace != None ): # loop through stack trace\n (line, file, name) = ( MyTrace.tb_lineno, MyTrace.tb_frame.f_code.co_filename, MyTrace.tb_frame.f_code.co_name ) # extract line, file, and error name\n F = open( file, 'r' ) # open source file of Python script\n L = F.readlines() # read scripot source into memory\n F.close() # close script file\n code = L[line-1].strip() # extract line of source code that caused error\n ERRFILE.write( \" File '{}', line {}, in {}\\n {}\\n\".format(file, line, name, code) ) # write filename, source code line, error name, and error code\n MyTrace = MyTrace.tb_next # step to next level of call stack trace\n ERRFILE.write( \"errorobj: {}\\n\".format(errorobj) ) # write error object and arguments for call\n ERRFILE.write( \"Calling Argument Vector: {}\\n\".format(args) ) # write calling arguments\n ERRFILE.close() # close text file with error stack trace\n os.system( \"notepad.exe {}\".format(errorfilename) ) # display error log file with notepad.exe", "def my_err_handler(traceback, exec_info):\n print \"Custom function invoked\"\n print \"Formatted exception\"\n print traceback.format_exc()\n print \"System exec info\"\n print exec_info\n exp_type, exp_value, exp_traceback = exec_info\n print \"String formatted exception\"\n print traceback.format_exception(exp_type, exp_value, exp_traceback)\n print \"End of custom function\"", "def test_cclerror_repr():\n e = pyccl.CCLError(\"blah\")\n e2 = eval(repr(e))\n assert str(e2) == str(e)\n assert e2 == e", "def text(eparams, context=5):\n import os\n import types\n import time\n import traceback\n import linecache\n import inspect\n import pydoc\n\n etype, evalue, etb = eparams\n if isinstance(etype, types.ClassType):\n etype = etype.__name__\n pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable\n date = time.ctime(time.time())\n head = \"%s\\n%s\\n%s\\n\" % (str(etype), pyver, date) + '''\nA problem occurred in a Python script. Here is the sequence of\nfunction calls leading up to the error, in the order they occurred.\n'''\n\n frames = []\n records = inspect.getinnerframes(etb, context)\n for frame, file, lnum, func, lines, index in records:\n file = file and os.path.abspath(file) or '?'\n args, varargs, varkw, locals = inspect.getargvalues(frame)\n call = ''\n if func != '?':\n call = 'in ' + func + \\\n inspect.formatargvalues(args, varargs, varkw, locals,\n formatvalue=lambda value: '=' + pydoc.text.repr(value))\n\n highlight = {}\n\n def reader(lnum=[lnum]):\n highlight[lnum[0]] = 1\n try:\n return linecache.getline(file, lnum[0])\n finally:\n lnum[0] += 1\n vars = scanvars(reader, frame, locals)\n\n rows = [' %s %s' % (file, call)]\n if index is not None:\n i = lnum - index\n for line in lines:\n num = '%5d ' % i\n rows.append(num + line.rstrip())\n i += 1\n\n done, dump = {}, []\n for name, where, value in vars:\n if name in done:\n continue\n done[name] = 1\n if value is not __UNDEF__:\n if where == 'global':\n name = 'global ' + name\n elif where == 'local':\n name = name\n else:\n name = where + name.split('.')[-1]\n dump.append('%s = %s' % (name, pydoc.text.repr(value)))\n else:\n dump.append(name + ' undefined')\n\n rows.append('\\n'.join(dump))\n frames.append('\\n%s\\n' % '\\n'.join(rows))\n\n exception = ['%s: %s' % (str(etype), str(evalue))]\n if isinstance(evalue, types.InstanceType):\n for name in dir(evalue):\n value = pydoc.text.repr(getattr(evalue, name))\n exception.append('\\n%s%s = %s' % (\" \" * 4, name, value))\n\n return head + ''.join(frames) + ''.join(exception) + '''\n\nThe above is a description of an error in a Python program. Here is\nthe original traceback:\n\n%s\n''' % ''.join(traceback.format_exception(etype, evalue, etb))", "def exception_handler(exctype, val, trace):\n logger.info(\n ''.join(traceback.format_exception(exctype, val, trace)))", "def vpython_error_message():\n error_message = (\n \"<p>&#9888; Sorry, spacesimmer! OrbitX has crashed for \"\n \"some reason.</p>\"\n\n \"<p>Any information that OrbitX has on the crash has \"\n \"been saved to a logfile. If you want to get this problem fixed,\"\n \" send the contents of the log file \"\n \"<blockquote>\" +\n logs.logfile_name.replace('\\\\', '\\\\\\\\') +\n \"</blockquote> \"\n \"to Patrick Melanson along with a description of what was \"\n \"happening in the program when it crashed.</p>\"\n\n \"<p>Again, thank you for using OrbitX!</p>\"\n )\n vpython.canvas.get_selected().append_to_caption(f\"\"\"<script>\n if (document.querySelector('div.error') == null) {{\n error_div = document.createElement('div');\n error_div.className = 'error';\n error_div.innerHTML = \"{error_message}\";\n document.querySelector('body').prepend(error_div);\n }}\n </script>\"\"\")\n vpython.canvas.get_selected().append_to_caption(\"\"\"<style>\n .error {\n color: #D8000C !important;\n background-color: #FFBABA;\n margin: 10px 0;\n padding: 10px;\n border-radius: 5px 5px 5px 5px;\n width: 700px;\n }\n span.code {\n color: #D8000C !important;\n font-family: monospace;\n }\n blockquote {\n font-family: monospace;\n }\n </style>\"\"\")\n\n time.sleep(0.1) # Let vpython send out this update", "def graphical_exception_handler(self, exc_type, exc_value, exc_tb):\n bugdialog.ShowEI(exc_type, exc_value, exc_tb)\n if compat.PYTHON2: sys.exc_clear()", "def raise_error(Err):\n raise Err()", "def exception(self, *args, **kwargs):", "def _get_traceback(self, exc_info=None):\n import traceback\n import sys\n return '\\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))", "def create_exception(self, msg: str):", "def __exc_info(self):\n exctype, excvalue, tb = sys.exc_info()\n if sys.platform[:4] == 'java': ## tracebacks look different in Jython\n return (exctype, excvalue, tb)\n return (exctype, excvalue, tb)", "def tb():\n etype, value, tb = sys.exc_info()\n return \"%s: %s (%s@%s:%d)\" % (etype.__name__, value, tb.tb_frame.f_code.co_name, os.path.basename(tb.tb_frame.f_code.co_filename), tb.tb_lineno)", "def get_exception():\n raise Exception(\"example\")", "def inject_exception(self, value):\n if self.ident != threading.current_thread().ident:\n ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(self.ident),\n ctypes.py_object(value))", "def get_exception():\n trace = ''\n exception = ''\n exc_list = traceback.format_exception_only(sys.exc_info()[0],\n sys.exc_info()[1])\n for entry in exc_list:\n exception += entry\n tb_list = traceback.format_tb(sys.exc_info()[2])\n for entry in tb_list:\n trace += entry\n return '%s\\n%s' % (exception, trace)", "def e(msg):\n raise Exception(repr(msg))", "def parallel_exception_error(pth, *args):\n\n return path(pth) + '\\n' + ''.join(args)", "def func_on_exception(*args, **keys):\n try:\n yield\n except Exception as exc:\n reraise = func(*args + (\":\", str(exc)), **keys)\n if not CRDS_EXCEPTION_TRAP:\n # In python-2, distinction between raise and \"raise something\". raise doesn't\n # wreck the traceback, raising a new improved exception does.\n raise\n # Augmented, the traceback is trashed from here down but the message is better when caught higher up.\n elif reraise:\n exc_class = keys.pop(\"exception_class\", exc.__class__)\n keys[\"end\"] = \"\"\n raise exc_class(format(*args + (\":\", str(exc)), **keys)) from exc\n else:\n pass # snuff the exception, func() probably issued a log message.", "def _async_raise(tid, exctype):\r\n # tid = ctypes.c_long(tid)\r\n if not inspect.isclass(exctype):\r\n exctype = type(exctype)\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\r\n # res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\r\n # if res == 0:\r\n # raise ValueError(\"invalid thread id\")\r\n # elif res != 1:\r\n # ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n # raise SystemError(\"PyThreadState_SetAsyncExc failed !\")\r", "def raise_(err):\n raise err", "def get_exception_data(exc_type=None, exc_value=None, tb=None, get_full_tb=False, max_var_length=4096 + 2048):\n\n head_var_length = int(max_var_length / 2)\n tail_var_length = max_var_length - head_var_length\n\n if not tb:\n exc_type, exc_value, tb = sys.exc_info()\n\n frames = get_traceback_frames(exc_value=exc_value, tb=tb, get_full_tb=get_full_tb)\n\n for i, frame in enumerate(frames):\n if \"vars\" in frame:\n frame_vars = []\n for k, v in frame[\"vars\"]:\n try:\n v = pformat(v)\n except Exception as e:\n try:\n v = saferepr(e)\n except Exception:\n v = \"An error occurred rendering the exception of type: \" + repr(e.__class__)\n # The force_escape filter assume unicode, make sure that works\n if isinstance(v, bytes):\n v = v.decode(\"utf-8\", \"replace\") # don't choke on non-utf-8 input\n # Trim large blobs of data\n if len(v) > max_var_length:\n v = f\"{v[0:head_var_length]}... \\n\\n<trimmed {len(v)} bytes string>\\n\\n ...{v[-tail_var_length:]}\"\n frame_vars.append((k, escape(v)))\n frame[\"vars\"] = frame_vars\n frames[i] = frame\n\n unicode_hint = \"\"\n if exc_type and issubclass(exc_type, UnicodeError):\n start = getattr(exc_value, \"start\", None)\n end = getattr(exc_value, \"end\", None)\n if start is not None and end is not None:\n unicode_str = exc_value.args[1]\n unicode_hint = force_text(unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))], \"ascii\", errors=\"replace\")\n try:\n unicode_hint.encode(\"utf8\")\n except UnicodeEncodeError:\n unicode_hint = unicode_hint.encode(\"utf8\", \"surrogateescape\")\n\n c = {\n \"unicode_hint\": unicode_hint,\n \"frames\": frames,\n \"sys_executable\": sys.executable,\n \"sys_version_info\": \"%d.%d.%d\" % sys.version_info[0:3],\n \"server_time\": datetime.now(timezone.utc),\n \"sys_path\": sys.path,\n \"platform\": platform.uname()._asdict(),\n }\n # Check whether exception info is available\n if exc_type:\n c[\"exception_type\"] = exc_type.__name__\n if exc_value:\n c[\"exception_value\"] = force_text(exc_value, errors=\"replace\")\n if frames:\n c[\"lastframe\"] = frames[-1]\n\n return c", "def traceback_hook(type, value, traceback):\n logger.error(\"Uncaught Error:\", exc_info=(type, value, traceback))", "def exception_handler_quits(exctype, val, trace):\n logger.info(\n ''.join(traceback.format_exception(exctype, val, trace)))\n sys.exit(1)", "def test_friendly_exception_formatting_exc_without_str_overload():\n ex = SigningException()\n\n formatted_exception = friendlyEx(ex)\n\n assert formatted_exception == '{}'.format(ex)", "def send_rpc_error(req, rpcreq, e):", "def exception(self, msg, *args, **kwargs):\n if args:\n try:\n msg = msg % args\n except TypeError:\n log.exception_orig(_('Wrong format of a log message'))\n\n (exc_type, exc_value, exc_tb) = sys.exc_info()\n bugdialog.ShowEI(exc_type, exc_value, exc_tb, msg)\n if compat.PYTHON2: sys.exc_clear()", "def get_traceback_stxt():\n #/\n exc_cls, exc_obj, tb_obj = sys.exc_info()\n\n #/\n txt_s = traceback.format_exception(exc_cls, exc_obj, tb_obj)\n\n #/\n res = ''.join(txt_s)\n\n return res", "def test_call_with_exception(self):\n eclipse_name='p_func'\n def called_from_eclipse(arguments):\n a=a +1 \n return SUCCEED\n addPythonFunction(eclipse_name,called_from_eclipse)\n my_var=Var()\n Compound('call_python_function',Atom(eclipse_name),[1,my_var]).post_goal()\n with self.assertRaises(UnboundLocalError) as exp:\n resume()", "def throw(self, type, value=None, traceback=None):\n pass", "def VB2PY_ERROR(text):\n raise Exception('VB2PY conversion error: %s' % text)", "def look_for_cython_error(capfd):\n yield\n _, err = capfd.readouterr()\n assert \"Exception ignored\" not in err", "def crash():\n i = ctypes.c_char('a')\n j = ctypes.pointer(i)\n c = 0\n while True:\n j[c] = 'a'\n c += 1\n j", "def unexpected_error(self, exception):", "def exception_hash(err, traceback = None):\n result = ''\n if isinstance(err, str):\n result = \"str: %s\" % err\n else:\n if traceback == None:\n traceback = \"\\nNone\\n\"\n else:\n traceback = '\\n' + traceback\n result = \"%s.%s: %s%s\" % (err.__class__.__module__, \\\n err.__class__.__name__, \\\n str(err), traceback)\n return result", "def repr_failure(self, excinfo):\n if isinstance(excinfo.value, NbCellError):\n msg_items = [bcolors.FAIL + \"Notebook cell execution failed\" + bcolors.ENDC]\n formatstring = bcolors.OKBLUE + \"Cell %d: %s\\n\\n\" + \\\n \"Input:\\n\" + bcolors.ENDC + \"%s\\n\\n\" + \\\n bcolors.OKBLUE + \"Traceback:%s\" + bcolors.ENDC\n msg_items.append(formatstring % excinfo.value.args)\n return \"\\n\".join(msg_items)\n else:\n return \"pytest plugin exception: %s\" % str(excinfo.value)", "def __call__(self, argv ):\n \n try:\n self.apply( argv )\n return None\n except:\n exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()\n exception_stack = traceback.format_exc(exceptionTraceback)\n exception_name = exceptionType.__module__ + '.' + exceptionType.__name__\n exception_value = str(exceptionValue)\n return (exception_name, exception_value, exception_stack)", "def _exceptionStackBTT(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n stack = \"\\tFrame stack (most recent call last):\\n\"\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else:\n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\n\\tException getting frame stack. Type: %s, Value: %s\" % (stack,exc_type,exc_value)\n #endTry\n \n try:\n stack = \"%s\\tException stack (most recent call last):\\n\" % stack\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else: \n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\tException getting exception stack. Type: %s, Value: %s\\n\" % (stack,exc_type,exc_value)\n #endTry\n\n # At the very end - put the exception string\n stack = \"%s\\t%s\" % (stack,exc)\n \n return stack", "def exception_message():\n def get_os_release():\n \"\"\"Returns detailed OS release.\"\"\"\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"\n\n msg = (\n \"Oops! Cuckoo failed in an unhandled exception!\\nSometimes bugs are \"\n \"already fixed in the development release, it is therefore \"\n \"recommended to retry with the latest development release available \"\n \"%s\\nIf the error persists please open a new issue at %s\\n\\n\" %\n (GITHUB_URL, ISSUES_PAGE_URL)\n )\n\n msg += \"=== Exception details ===\\n\"\n msg += \"Cuckoo version: %s\\n\" % version\n msg += \"OS version: %s\\n\" % os.name\n msg += \"OS release: %s\\n\" % get_os_release()\n msg += \"Python version: %s\\n\" % platform.python_version()\n msg += \"Python implementation: %s\\n\" % platform.python_implementation()\n msg += \"Machine arch: %s\\n\" % platform.machine()\n\n try:\n import pip\n\n msg += \"Modules: %s\\n\" % \" \".join(sorted(\n \"%s:%s\" % (package.key, package.version)\n for package in pip.get_installed_distributions()\n ))\n except ImportError:\n pass\n\n msg += \"\\n\"\n return msg", "def _async_raise(t, exctype):\n tid = ctypes.c_long(t)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)", "def handle_exception(exc_type, exc_value, exc_traceback):\n exc_msg = traceback.format_exception(exc_type, exc_value, exc_traceback)\n exc_msg.insert(0, 'Uncaught exception on processor {}\\n'.format(mpiops.chunk_index))\n exc_msg = \"\".join(exc_msg)\n print(exc_msg, file=sys.stderr)", "def _exc_info_to_string(self, err, test):\n\t\texctype, value, tb = err\n\t\t# Skip test runner traceback levels\n\t\twhile tb and self._is_relevant_tb_level(tb):\n\t\t\ttb = tb.tb_next\n\n\t\tif exctype is test.failureException:\n\t\t\t# Skip assert*() traceback levels\n\t\t\tlength = self._count_relevant_tb_levels(tb)\n\t\t\tmsgLines = traceback.format_exception(exctype, value, tb, length)\n\t\telse:\n\t\t\tmsgLines = traceback.format_exception(exctype, value, tb)\t\t\n\t\treturn ''.join(msgLines)", "def excepthook(exctype, value, traceback): # real signature unknown; restored from __doc__\n pass", "def _async_raise(self,tid, exctype): \n tid = c_long(tid) \n if not inspect.isclass(exctype): \n exctype = type(exctype) \n res = pythonapi.PyThreadState_SetAsyncExc(tid, py_object(exctype)) \n if res == 0: \n raise ValueError(\"invalid thread id\") \n elif res != 1: \n # \"\"\"if it returns a number greater than one, you're in trouble, \n # and you should call it again with exc=NULL to revert the effect\"\"\" \n pythonapi.PyThreadState_SetAsyncExc(tid, None) \n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _async_raise(self,tid, exctype):\n tid = ctypes.c_long(tid)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _async_raise(self,tid, exctype):\r\n tid = ctypes.c_long(tid)\r\n if not inspect.isclass(exctype):\r\n exctype = type(exctype)\r\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\r\n if res == 0:\r\n raise ValueError(\"invalid thread id\")\r\n elif res != 1:\r\n # \"\"\"if it returns a number greater than one, you're in trouble,\r\n # and you should call it again with exc=NULL to revert the effect\"\"\"\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def ips_excepthook(excType, excValue, traceback, frame_upcount=0):\n\n assert isinstance(frame_upcount, int)\n\n # first: print the traceback:\n tb_printer = TBPrinter(excType, excValue, traceback)\n\n # go down the stack\n tb = traceback\n tb_frame_list = []\n while tb.tb_next is not None:\n tb_frame_list.append(tb.tb_frame)\n tb = tb.tb_next\n\n critical_frame = tb.tb_frame\n tb_frame_list.append(critical_frame)\n\n tb_frame_list.reverse()\n # now the first frame in the list is the critical frame where the exception occured\n index = 0\n diff_index = frame_upcount\n\n # this allows to repeat the traceback inside the interactive function\n\n def __ips_print_tb(**kwargs):\n return tb_printer.printout(end_offset=index, **kwargs)\n\n while diff_index is not None:\n index += diff_index\n tb_printer.printout(end_offset=index)\n print(\"\\n\")\n current_frame = tb_frame_list[index]\n diff_index = IPS(frame=current_frame, ns_extension={\"__ips_print_tb\": __ips_print_tb}, print_tb=False)", "def _async_raise(tid, exctype):\n\tif not inspect.isclass(exctype):\n\t\traise TypeError(\"Only types can be raised (not instances)\")\n\tres = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n\tif res == 0:\n\t\traise ValueError(\"invalid thread id\")\n\telif res != 1:\n\t\t# \"\"\"if it returns a number greater than one, you're in trouble, \n\t\t# and you should call it again with exc=NULL to revert the effect\"\"\"\n\t\tctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)\n\t\traise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def 报错(自身, func):\n 自身.错误处理 = func\n return func", "def help_with_exception():\n global previous_traceback\n if 'last_traceback' in dir(sys):\n if sys.last_traceback != previous_traceback:\n previous_traceback = sys.last_traceback\n parse_last_exception(sys.last_value)", "def _get_traceback(self, exc_info):\n import traceback\n return '<br/>'.join(traceback.format_exception(*(exc_info or sys.exc_info())))", "def reportError(self):\n self.Q['err'].put(sys.exc_info()[:2])", "def showtraceback(self,exc_tuple = None):\n\n # Though this won't be called by syntax errors in the input line,\n # there may be SyntaxError cases whith imported code.\n if exc_tuple is None:\n type, value, tb = sys.exc_info()\n else:\n type, value, tb = exc_tuple\n if type is SyntaxError:\n self.showsyntaxerror()\n else:\n sys.last_type = type\n sys.last_value = value\n sys.last_traceback = tb\n self.InteractiveTB()\n if self.InteractiveTB.call_pdb and self.has_readline:\n # pdb mucks up readline, fix it back\n self.readline.set_completer(self.Completer.complete)", "def PyHiew_ExecuteCallable(func_name, g, *args, **kwargs):\r\n PY_COMPILE_ERR = None\r\n try:\r\n g[func_name](*args, **kwargs)\r\n except Exception, e:\r\n PY_COMPILE_ERR = str(e) + \"\\n\" + traceback.format_exc()\r\n return PY_COMPILE_ERR", "def GetLongLineExceptions(self):\n return []", "def _exceptionStackTTB(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s]\" % (sourcefile,line,function)\n else:\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n #endIf\n else:\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s] - %s\" % (sourcefile,line,function,text)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endIf\n #endFor\n stack = \"\\tFrame stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting frame stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n\n try:\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endFor\n stack = \"\\tException stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting exception stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n \n # At the very top - put the exception string\n stack = \"\\t%s\\n%s\" % (exc,stack)\n \n return stack", "def exception_hook(type, value, traceback):\n sys.__excepthook__(type, value, traceback)", "def unexpectedException(self):", "def errmsg(self, str, prefix=\"** \"):\n raise NotImplementedError(NotImplementedMessage)", "def _async_raise(self, tid, exctype): \n tid = ctypes.c_long(tid) \n if not inspect.isclass(exctype): \n exctype = type(exctype) \n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) \n if res == 0: \n raise ValueError(\"invalid thread id\") \n elif res != 1: \n # \"\"\"if it returns a number greater than one, you're in trouble, \n # and you should call it again with exc=NULL to revert the effect\"\"\" \n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) \n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def exception_handler(exception_type, value, tb_obj):\n tb = '\\n'.join(traceback.format_tb(tb_obj))\n txt = 'Traceback (most recent call last):\\n' + tb + '\\n' + exception_type.__name__ + ': ' + str(value)\n print(txt)\n logger.error(_(\"Uncaught exception: \") + txt)\n QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), txt)", "def print_exc_plus(tb):\n while 1:\n if not tb.tb_next:\n break\n tb = tb.tb_next\n stack = []\n f = tb.tb_frame\n while f:\n stack.append(f)\n f = f.f_back\n stack.reverse()\n traceback.print_exc()\n print(\"Locals by frame, innermost last\")\n for frame in stack:\n print()\n print(\"Frame %s in %s at line %s\" % (frame.f_code.co_name,\n frame.f_code.co_filename,\n frame.f_lineno))\n for key, value in frame.f_locals.items():\n print(\"\\t%20s = \" % key,)\n #We have to be careful not to cause a new error in our error\n #printer! Calling str() on an unknown object could cause an\n #error we don't want.\n try:\n print(value)\n except:\n print(\"<ERROR WHILE PRINTING VALUE>\")", "def error(error):\n print(\"Error\", error)\n erlport.erlang.cast(this.erlang_pid, (erlport.erlterms.Atom(b'python_error'), str(error)))", "def _async_raise(tid, exctype):\n tid = ctypes.c_long(tid)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _async_raise(tid, exctype):\n tid = ctypes.c_long(tid)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _async_raise(tid, exctype):\n tid = ctypes.c_long(tid)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _async_raise(tid, exctype):\n tid = ctypes.c_long(tid)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _async_raise(tid, exctype):\n tid = ctypes.c_long(tid)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _colorstr(self, args):", "def last_exception():\n exc_type, exc_value, exc_traceback = sys.exc_info()\n return ''.join(traceback.format_exception(exc_type, exc_value,\n exc_traceback))", "def _get_exc_reason(cls, exc: Exception) -> str:\n reason = str(exc)\n for reason_re in cls.REASON_RES:\n if reason_re.search(reason):\n return reason_re.sub(r\"\\1\", reason).rstrip(\"')\")\n return reason", "def _async_raise(tid, exctype):\n\ttid = ctypes.c_long(tid)\n\tif not inspect.isclass(exctype):\n\t\texctype = type(exctype)\n\tres = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n\tif res == 0:\n\t\traise ValueError(\"invalid thread id\")\n\telif res != 1:\n\t\tctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n\t\traise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _async_raise(tid, exctype):\r\n tid = ctypes.c_long(tid)\r\n if not inspect.isclass(exctype):\r\n exctype = type(exctype)\r\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\r\n if res == 0:\r\n raise ValueError(\"invalid thread id\")\r\n elif res != 1:\r\n # \"\"\"if it returns a number greater than one, you're in trouble,\r\n # and you should call it again with exc=NULL to revert the effect\"\"\"\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def test_base_error_raises():\n with pytest.raises(PypyrAwsError) as err_info:\n raise PypyrAwsError(\"this is error text right here\")\n\n assert str(err_info.value) == \"this is error text right here\"", "def exception_data(code):\n try:\n exec(code)\n except Exception, detail:\n return (detail, detail.args,\n '%s: %s' % (detail.__class__.__name__, detail))", "def _async_raise(self, tid, exctype):\n tid = ctypes.c_long(tid)\n if not inspect.isclass(exctype):\n exctype = type(exctype)\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))\n if res == 0:\n raise ValueError(\"invalid thread id\")\n elif res != 1:\n # \"\"\"if it returns a number greater than one, you're in trouble,\n # and you should call it again with exc=NULL to revert the effect\"\"\"\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def _async_raise(tid, exctype):\r\n if not inspect.isclass(exctype):\r\n raise TypeError(\"Only types can be raised (not instances)\")\r\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(exctype))\r\n if res == 0:\r\n raise ValueError(\"invalid thread id\")\r\n elif res != 1:\r\n # \"\"\"if it returns a number greater than one, you're in trouble,\r\n # and you should call it again with exc=NULL to revert the effect\"\"\"\r\n ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)\r\n raise SystemError(\"PyThreadState_SetAsyncExc failed\")", "def print_exc_plus():\n tb = sys.exc_info()[2]\n while tb.tb_next:\n tb = tb.tb_next\n stack = []\n f = tb.tb_frame\n while f:\n stack.append(f)\n f = f.f_back\n stack.reverse()\n traceback.print_exc()\n print \"Locals by frame, innermost last\"\n for frame in stack:\n print\n print \"Frame %s in %s at line %s\" % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno)\n for key, value in frame.f_locals.items():\n print \"\\t%20s = \" % key,\n try: print value\n except: print \"<ERROR WHILE PRINT VALUE>\"", "def _error_handling(self,e,func):\n print(self.type, \" sufferred exception in \" , func , \":\" , e)", "def exception_hook(type, value, traceback):\n logging.getLogger('*excepthook*').critical(f'Uncaught Exception!', exc_info=(type, value, traceback))", "def getErrorTable(self, *args):\n return _libsbml.CompExtension_getErrorTable(self, *args)" ]
[ "0.60588336", "0.5961557", "0.58332723", "0.5556526", "0.55433345", "0.54902726", "0.5450629", "0.5449993", "0.5403903", "0.53904366", "0.53624654", "0.533152", "0.53102547", "0.529605", "0.52905834", "0.52848315", "0.52823675", "0.5257808", "0.5237709", "0.52302915", "0.5175386", "0.5172794", "0.51624376", "0.5112163", "0.51074886", "0.5106983", "0.50938654", "0.50820225", "0.505966", "0.5052084", "0.5051965", "0.50397515", "0.5005952", "0.5002877", "0.5000243", "0.499774", "0.4969319", "0.49658185", "0.49641186", "0.49498647", "0.49469778", "0.49456918", "0.4937601", "0.4933502", "0.49317834", "0.49307078", "0.49196288", "0.4918871", "0.4917048", "0.4907777", "0.48995024", "0.4895988", "0.48875514", "0.48788133", "0.48715287", "0.48671526", "0.4866407", "0.4860474", "0.48600653", "0.48541534", "0.4841231", "0.4835792", "0.48252437", "0.48105177", "0.48085594", "0.48074466", "0.48069748", "0.48043764", "0.4802812", "0.48023525", "0.4789657", "0.47886226", "0.47854194", "0.47770262", "0.477007", "0.47698817", "0.4769515", "0.47661403", "0.47627556", "0.47602916", "0.47579357", "0.47565407", "0.474837", "0.474837", "0.474837", "0.474837", "0.474837", "0.47462064", "0.4745572", "0.4743358", "0.47414714", "0.47392485", "0.47385448", "0.47378585", "0.4737191", "0.47334978", "0.4731579", "0.4728571", "0.47264764", "0.47228476" ]
0.7551365
0
Generate TypeError exception from the variable information (datatype, precision)
def generate_datatype_error(variable): dtype = variable.dtype if isinstance(dtype, NativeBool): precision = '' if isinstance(dtype, NativeComplex): precision = '{} bit '.format(variable.precision * 2 * 8) else: precision = '{} bit '.format(variable.precision * 8) message = '"Argument must be {precision}{dtype}"'.format( precision = precision, dtype = variable.dtype) return PyErr_SetString('PyExc_TypeError', message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})", "def test_type_error(self):\n self._error_test(TypeError)", "def test_invalid_expression_type(self, parse_input_mocked_metadata):\n with pytest.raises(TypeError, match=r\"not of declared type int\"):\n parse_input_mocked_metadata(\"int Beta = -0.231e-6+5.21e-2j\")", "def try_wrong_types(self, p, name, type_):\n for x in (1, 1.0, \"x\", True, np.ndarray,):\n if type(x) != type_:\n with self.assertRaises(TypeError, msg=f\"{name} {type_} {x}\"):\n setattr(p, name, x)", "def test_incorrect_arg_type(self):\n\n with pytest.raises(TypeError) as exc_info:\n upper_incomplete_gamma(a='A', z=0.3)\n\n expected_error_msg = (\n 'type of argument \"a\" must be one of (int, float); got str instead'\n )\n assert str(exc_info.value) == expected_error_msg", "def test_non_pd_type_error(self):\n\n x = BaseTransformer(columns=\"a\")\n\n with pytest.raises(ValueError):\n\n x.transform(X=[1, 2, 3, 4, 5, 6])", "def test_datatype_error(self):\n arr = numpy.zeros((10,10), dtype='complex')\n self.assertRaises(ValueError, bytscl, arr)", "def _raise_value_error(is_gt, tracker, seq):\n if is_gt:\n raise TrackEvalException(\n 'GT data for sequence %s cannot be converted to the right format. Is data corrupted?' % seq)\n else:\n raise TrackEvalException(\n 'Tracking data from tracker %s, sequence %s cannot be converted to the right format. '\n 'Is data corrupted?' % (tracker, seq))", "def test_type_errors():\n\n\ttry:\n\t\ttransmissions = compute_transmissions(cal_directory, lines = 3.0)\n\texcept TypeError:\n\t\ttry:\n\t\t\ttransmissions = compute_transmissions(cal_directory, calibrator = 300.0)\n\t\texcept TypeError:\n\t\t\tassert True\n\t\telse:\n\t\t\tassert False\n\telse:\n\t\tassert False", "def type_error(var, types):\n\n divisor = None\n if len(types) == 2:\n divisor = \" or \"\n elif len(types) > 2:\n divisor = \", \"\n\n raise TypeError(\n \"'{var_name}' must be {type}, received '{var_type}'\"\n .format(var_name=RaiseIfNot._get_name(var),\n type=divisor.join(map(\n lambda x: \"'\" + x + \"'\",\n types)), var_type=type(var)))", "def test_exception_raised(self):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\", \"b\", \"c\"], scaler=\"standard\")\n\n with pytest.raises(\n TypeError,\n match=r\"\"\"The following columns are not numeric in X; \\['b', 'c'\\]\"\"\",\n ):\n\n x.check_numeric_columns(df)", "def test_creation_float():\n with pytest.raises(ValueError) as __:\n value = 42.30474\n __ = param.Integer(value=value)", "def test_dict_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def test__specification_type_to_python_type_unsupported_type(self):\n with self.assertRaises(TypeError):\n _specification_type_to_python_type(\"unsupported_type\")", "def test_non_pd_type_error(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=\"a\")\n\n with pytest.raises(ValueError):\n\n x.fit(X=df, y=[1, 2, 3, 4, 5, 6])", "def test_invalid_value(self):\n with self.assertRaises(TypeError):\n METRIC_SYSTEM.length('25a', LENGTH_KILOMETERS)\n with self.assertRaises(TypeError):\n METRIC_SYSTEM.temperature('50K', TEMP_CELSIUS)", "def _TypeMismatch(a, b):\n return 'Types do not match, %s v. %s' % (str(a), str(b))", "def test_instantiate_7():\n with raises(ValueError):\n FixedPoint(1.5, 'Q20.204')", "def test_wrong_type_error(self, parse_input_mocked_metadata):\n with pytest.raises(ValueError, match=\"invalid value\"):\n bb = parse_input_mocked_metadata(\n \"for int m in [1, 4.2, 9]\\n\\tMZgate(0, 1) | [0, 1]\"\n )", "def test_constructor_wrong_parameter_type(self):\n\n for invalid in (None, 1):\n with self.assertRaises(TypeError):\n group_tr = OCIO.FixedFunctionTransform(invalid)", "def conversionNotPossibleException(valueType: cern.japc.value.ValueType, valueType2: cern.japc.value.ValueType) -> cern.japc.value.ValueConversionException:\n ...", "def test_invalid_argument_type(self):\n t = TruthTable('A or B')\n\n with self.assertRaises(InvalidArgumentTypeError):\n t.equivalent_to(float())\n\n with self.assertRaises(InvalidArgumentTypeError):\n t.equivalent_to(None)", "def test_validation_can_fail():\n\n @type_checked\n def _run_test(something:int): pass\n\n with pytest.raises(TypeError) as error:\n _run_test(\"abc\")\n\n assert \"abc is of type str, expecting int.\" in error.value.args", "def test_float_type(self):\n\n input_ = 1.2\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_01_float(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(float(1.2), float(2.2), 1)\n self.assertEqual(\"width must be an integer\", str(x.exception))", "def test_new_invalid(self) -> None:\n with pytest.raises(TypeError) as excinfo:\n RunwayTestDefinition({}) # type: ignore\n assert str(excinfo.value).startswith(\"expected data of type\")", "def test_check_X_not_int_not_float():\n with pytest.raises(ValueError):\n check_X(['hi'], verbose=False)", "def test_data_type(self):\n self.assertRaises(TypeError, Square, 'hello', 3, 2)\n self.assertRaises(TypeError, Square, 3, True, 2)\n self.assertRaises(TypeError, Square, 3, 2, 3.45)", "def ExceptionPropertyType_test(type1: str, type2: str):\n m = pyflamegpu.ModelDescription(\"model\")\n ed = m.Environment()\n add_func_t1 = getattr(ed, f\"newProperty{type1}\")\n add_func_array_t1 = getattr(ed, f\"newPropertyArray{type1}\")\n set_func_t1 = getattr(ed, f\"setProperty{type1}\")\n set_func_t2 = getattr(ed, f\"setProperty{type2}\")\n set_func_array_t2 = getattr(ed, f\"setPropertyArray{type2}\")\n \n a_t1 = 1\n a_t2 = 1\n b_t1 = [0] * ARRAY_TEST_LEN\n b_t2 = [0] * ARRAY_TEST_LEN\n for i in range(ARRAY_TEST_LEN):\n b_t1[i] = i\n b_t2[i] = i\n add_func_t1(\"a\", a_t1, True)\n add_func_array_t1(\"b\", b_t1, True)\n \n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func_t2(\"a\", a_t2)\n assert e.value.type() == \"InvalidEnvPropertyType\"\n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func_array_t2(\"b\", b_t2)\n assert e.value.type() == \"InvalidEnvPropertyType\"\n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func_t2(\"b\", 0, a_t2)\n assert e.value.type() == \"InvalidEnvPropertyType\"", "def test_str_type(self):\n\n expected = TypeError\n input_ = 'c'\n with self.assertRaises(expected):\n math.factorial(input_)", "def test_convert_nonnumeric_value():\n with pytest.raises(TypeError):\n pressure_util.convert(\"a\", PRESSURE_HPA, PRESSURE_INHG)", "def test__validate_features__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_features(input_value)", "def test_invalid_data_construction(self):\n with self.assertRaises(Exception):\n LongDecimalEuler(term=-1)\n with self.assertRaises(Exception):\n LongDecimalEuler(term=\"aaa\")\n with self.assertRaises(Exception):\n LongDecimalEuler(nodecimals=-1)\n with self.assertRaises(Exception):\n LongDecimalEuler(nodecimals=\"aaa\")", "def ExceptionPropertyLength_test(type: str):\n m = pyflamegpu.ModelDescription(\"model\")\n ed = m.Environment()\n add_func = getattr(ed, f\"newPropertyArray{type}\")\n set_func = getattr(ed, f\"setPropertyArray{type}\")\n \n b = [0] * ARRAY_TEST_LEN\n _b1 = [0] * 1\n _b2 = [0] * (ARRAY_TEST_LEN + 1)\n _b3 = [0] * ARRAY_TEST_LEN * 2\n\n add_func(\"a\", b)\n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func(\"a\", _b1)\n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func(\"a\", _b2)\n with pytest.raises(pyflamegpu.FLAMEGPURuntimeException) as e:\n set_func(\"a\", _b3)\n # Added extra case to ensure that the above TypeErrors are not a result of the set_func not being found\n set_func(\"a\", b)", "def _check_types(self):\n if isinstance(self.unique_id, (int, str)): # should unique_id be a float?\n self.unique_id = str(self.unique_id)\n else:\n raise TypeError(f'unique_id incorrect type: {type(self.unique_id)}')\n try:\n self.ra = float(self.ra)\n except TypeError:\n print(f'ra incorrect type: {type(self.ra)}')\n try:\n self.dec = float(self.dec)\n except TypeError:\n print(f'dec incorrect type: {type(self.dec)}')\n try:\n self.z = float(self.z)\n except TypeError:\n print(f'z incorrect type: {type(self.z)}')\n if not isinstance(self.galcat, GCData):\n raise TypeError(f'galcat incorrect type: {type(self.galcat)}')\n if not -360. <= self.ra <= 360.:\n raise ValueError(f'ra={self.ra} not in valid bounds: [-360, 360]')\n if not -90. <= self.dec <= 90.:\n raise ValueError(f'dec={self.dec} not in valid bounds: [-90, 90]')\n if self.z < 0.:\n raise ValueError(f'z={self.z} must be greater than 0')", "def _infer_variable_types_from_data(raw_data):\n raise NotImplementedError()", "def test_value_init17(self):\n with self.assertRaises(TypeError) as err:\n r1 = Rectangle(1, 2, 3, \"hi\")\n msg = \"y must be an integer\"\n self.assertEqual(str(err.exception), msg)", "def test_value_error(self):\n self._error_test(ValueError)", "def get_data_type_error_text(field_name, field_value, type_name):\n\n\tmessage = ''\n\n\ttry:\n\t\tmessage = (\"Value '{0}' entered for '{1}' could not be parsed as a valid {2}\"\n\t\t\t\t .format(str(field_value),field_name,type_name))\n\texcept TypeError:\n\t\tmessage = (\"A value entered for '{0}' could not be read\".format(field_name))\n\n\treturn message", "def test_14_float_test(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(1.3, 20)\n self.assertEqual(\"width must be an integer\", str(x.exception))\n\n with self.assertRaises(TypeError) as x:\n r = Rectangle(13, 2.0)\n self.assertEqual(\"height must be an integer\", str(x.exception))\n\n with self.assertRaises(TypeError) as x:\n r = Rectangle(13, 20, 1.7777)\n self.assertEqual(\"x must be an integer\", str(x.exception))\n\n with self.assertRaises(TypeError) as x:\n r = Rectangle(13, 20, 17, 8.0)\n self.assertEqual(\"y must be an integer\", str(x.exception))", "def test_make_plot_invalid_plot_type(self):\n print(sys._getframe().f_code.co_name)\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n self.assertRaises(Exception,pp.make_plot,x,y,plot_type='wrong',msg='Invalid plot type')", "def test_with_invalid_input(self):\n for dataset_type in ['ruler', 'pencil', 'cheese']:\n with self.assertRaises(ValueError) as exc:\n check_dataset_type(dataset_type)\n self.assertEqual(\"Dataset type not 'regular' or 'raw' is %s\" % dataset_type,\n str(exc.exception))", "def test_badxvaluewithfloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, float(1), 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')", "def test_datatype():\n\n assert isinstance(pf.get_datatype(), torch.dtype)\n assert pf.get_datatype() == torch.float32\n\n pf.set_datatype(torch.float64)\n assert isinstance(pf.get_datatype(), torch.dtype)\n assert pf.get_datatype() == torch.float64\n pf.set_datatype(torch.float32)\n\n with pytest.raises(TypeError):\n pf.set_datatype(\"lala\")", "def test_raises_type_error(self):\n wrong_type = dict()\n self.assertRaises(\n TypeError, util.convert_protobuf_to_proto_plus, wrong_type\n )", "def test__validate_topic__2():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_topic(input_value)", "def test_non_int_case(self):\n self.assertRaises(TypeError, factorial, 1.5)", "def __init__(self, datatype, stage=\"\", context=\"\"):\n filler = \"unspecified\"\n if isinstance(datatype, str):\n typename = datatype\n else:\n try:\n typename = datatype.__name__\n except AttributeError:\n typename = str(datatype)\n explanation = \"Error creating {dt}; stage: {s}; context: {c}\".\\\n format(dt=typename, s=stage or filler, c=context or filler)\n super(ModelConstructionException, self).__init__(explanation)", "def testSlopeBadType(self):\n def setSlope():\n self.cc.slope = 'ban'\n\n self.assertRaises(\n TypeError,\n setSlope\n )", "def error(number):\n \n if number >= 1 or number <= -1 :\n raise TypeError,\\\n \"\\n<The interval of convergence should be -1 < value < 1 \\n\"", "def error(number):\n \n if number >= 1 or number <= -1 :\n raise TypeError,\\\n \"\\n<The interval of convergence should be -1 < value < 1 \\n\"", "def test_optimize_invalid_dtype():\n os.chdir(pathlib.Path(__file__).parent.absolute())\n loc = shutil.which(\"parrot-optimize\")\n script_descriptor = open(os.path.abspath(loc))\n script = script_descriptor.read()\n sys.argv = [\"parrot-optimize\", \"../data/seq_class_dataset.tsv\",\n \"../data/output_network.pt\", \"-d\", \"gibberish\", \"-c\", \"4\"]\n\n with pytest.raises(ValueError):\n exec(script)\n\n script_descriptor.close()", "def test__validate_title__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_title(input_value)", "def test_type_error(self):\n self.assertRaises(TypeError, two_out_five,[1,1,0,0,0])\n self.assertRaises(TypeError, two_out_five,11000)", "def error(number):\n \n if number > 1 or number <= -1 :\n raise TypeError,\\\n \"\\n<The interval of convergence should be -1 < value <= 1 \\n\"", "def test_fieldname_exc(self):\n ds = self.f.create_dataset('foo', (100,), 'f')\n self.assertRaises(ValueError, ds.__getitem__, (0, 'a'))", "def test_should_raise_error_if_type_is_invalid(self):\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.parse_statement({'type': 'sugar'})", "def test_value_init8(self):\n with self.assertRaises(TypeError) as err:\n r1 = Rectangle(\"hi\", 10)\n msg = \"width must be an integer\"\n self.assertEqual(str(err.exception), msg)", "def test_badyvaluewithfloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, float(1), 3)\n self.assertEqual(str(e.exception), 'y must be an integer')", "def test_error_type(self):\n\n value = 0\n\n iter_given_code = self.test_error_type.__iter__()\n length = self.test_error_type.__len__()\n\n while value < self.MAX_ERROR_TYPE_VALUE or length > 0:\n\n if value == 18:\n value = 0xffff\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_ERROR_TYPE_VALUE:\n value += 1\n\n length -= 1", "def test_value_init13(self):\n with self.assertRaises(TypeError) as err:\n r1 = Rectangle(1, (1, 2), 3)\n msg = \"height must be an integer\"\n self.assertEqual(str(err.exception), msg)", "def test_from_knx_wrong_parameter2(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().from_knx(\"0x23\")", "def test_if_it_accepts_string_datatype(self):\n with self.assertRaises(TypeError):\n prime_numbers(\"string\")", "def test_type_parameters_fine(self):\n\t\td = self.algo.typeParameters()\n\t\t# Test F parameter check\n\t\tself.assertIsNotNone(d.get('F', None))\n\t\tself.assertFalse(d['F'](-30))\n\t\tself.assertFalse(d['F'](-.3))\n\t\tself.assertTrue(d['F'](.3))\n\t\tself.assertTrue(d['F'](.39))\n\t\t# Test CR parameter check\n\t\tself.assertIsNotNone(d.get('CR', None))\n\t\tself.assertFalse(d['CR'](10))\n\t\tself.assertFalse(d['CR'](-10))\n\t\tself.assertFalse(d['CR'](-1))\n\t\tself.assertTrue(d['CR'](.3))\n\t\tself.assertTrue(d['CR'](.0))\n\t\tself.assertTrue(d['CR'](1.))", "def test__validate_status__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_status(input_value)", "def get_expected_type_error_message(key, val, expected_type):\n\n return \"Invalid type at key '%s'. Expected '%s' got '%s'.\" \\\n % (str(key), str(expected_type), str(type(val)))", "def test_invalid_op_name_inputs_with_wrong_types(self, data, description):\n with self.assertRaises(TypeError, msg=description):\n tfx.op_name(data)", "def __type_incorrect_int(self):\n\n strTestName = 'Type (int instead of float) is given (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddOpt('parameter1', 'types \\'int\\' or \\'float\\' parameter')\n RxCSObject.paramType('parameter1', (int, float))\n RxCSObject.parameter1 = 1\n\n RxCSObject.paramAddOpt('parameter2', 'type \\'int\\' parameter')\n RxCSObject.paramType('parameter2', (int))\n RxCSObject.parameter2 = int(2)\n\n RxCSObject.paramAddOpt('parameter3', 'type \\'int\\' parameter')\n RxCSObject.paramType('parameter3', (int))\n RxCSObject.parameter3 = float(3)\n\n self.__parametersCheck_error(RxCSObject, ParameterTypeError, strTestName)", "def test_unexpectedType(self):\n self.assertRaises(TypeError, nativeString, 1)", "def _raise_argument_validation_exception(typedef, value, detail, expected_tokens=None):\n typedef_name = typedef.get('help-name')\n if typedef_name is None:\n typedef_name = typedef.get('name')\n if typedef_name is None:\n typedef_name = typedef.get('field')\n if typedef_name is None:\n typedef_name = '<unknown-type>'\n if detail is None:\n detail = ''\n validation_error_format = typedef.get('validation-error-format',\n 'Invalid %(typedef)s: %(value)s; %(detail)s')\n validation_error = (validation_error_format %\n {'typedef': typedef_name, 'value': str(value), 'detail': detail})\n raise error.ArgumentValidationError(validation_error, expected_tokens)", "def test_plot_lm_typeerror(models):\n idata1 = models.model_1\n with pytest.raises(TypeError):\n plot_lm(idata=idata1, y=\"y\", num_samples=-1)", "def test_badsizevaluefloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(float(1), 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def test_6_x_integer(self):\r\n with self.assertRaises(TypeError):\r\n S5 = Square(1, 'a')", "def test_point_negative_init_wrong_type_argument(self):\n with self.assertRaises(ValueError) as err:\n Point('five', 'three')\n self.assertEqual(err.args[0], 'Wrong value to define point. Both x and y must be int or float.',\n \"Test of Point('five', 'three') failed.\")\n\n with self.assertRaises(ValueError) as err:\n Point(['five', 'two'])\n self.assertEqual(err.args[0], 'Wrong value to define point. Both x and y must be int or float.',\n \"Test of Point(['five', 'two']) failed.\")\n\n with self.assertRaises(ValueError) as err:\n Point([5, 2])\n self.assertEqual(err.args[0], 'Wrong value to define point. Both x and y must be int or float.',\n \"Test of Point([5, 2]) failed.\")\n\n with self.assertRaises(ValueError) as err:\n Point((1, 3))\n self.assertEqual(err.args[0], 'Wrong value to define point. Both x and y must be int or float.',\n \"Test of Point((1, 3)) failed.\")\n\n with self.assertRaises(ValueError) as err:\n Point({'x': 3, 'y': 0.1})\n self.assertEqual(err.args[0], 'Wrong value to define point. Both x and y must be int or float.',\n \"Test of Point({'x': 3, 'y': 0.1}) failed.\")\n\n with self.assertRaises(ValueError) as err:\n Point({3, 0.1})\n self.assertEqual(err.args[0], 'Wrong value to define point. Both x and y must be int or float.',\n \"Test of Point({3, 0.1}) failed.\")", "def test_database_insert_times_invalid_types(value):\n database = forest.drivers.eida50.Database()\n with pytest.raises(Exception):\n database.insert_times([value], \"file.nc\")", "def type_error(func_name, expect_tp, got_tp,\n arg_name=None, ret=False) -> TypeError:\n msg = func_name\n if ret:\n msg += f\"'s return\"\n elif arg_name:\n msg += f\"'s parameter '{arg_name}'\"\n msg += f\" expect type {expect_tp}, got {got_tp}.\"\n return TypeError(msg)", "def test_value_init11(self):\n with self.assertRaises(TypeError) as err:\n r1 = Rectangle(10, 2, {})\n msg = \"x must be an integer\"\n self.assertEqual(str(err.exception), msg)", "def test_wrong_type_param():\n from scraper import get_inspection_page\n with pytest.raises(TypeError):\n get_inspection_page(Violation_Points=0, City='Seattle')", "def test__validate_message_notification__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_message_notification(input_value)", "def test_creation_str():\n with pytest.raises(ValueError) as __:\n value = \"42\"\n __ = param.Integer(value=value)", "def test_lt_invalid(self):\n with self.assertRaises(TypeError):\n self.instance < 12", "def test_etype__invalid(self):\n\n for etype in (\"SyntaxError\", self):\n self.assertRaises(TypeError, encode_string, \"test\", etype=etype)", "def test_create_investigation_type_error(self):\n with self.assertRaises(QiitaDBColumnError):\n PrepTemplate.create(self.metadata, self.new_raw_data,\n self.test_study, self.data_type_id,\n 'Not a term')", "def test_value_init9(self):\n with self.assertRaises(TypeError) as err:\n r1 = Rectangle([1, 2], 8)\n msg = \"width must be an integer\"\n self.assertEqual(str(err.exception), msg)", "def test_datatype(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])\n np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])", "def test_etype__invalid(self):\n\n for etype in (\"SyntaxError\", self):\n self.assertRaises(TypeError, encode_file_path, \"test\", etype)", "def test_no_coercion():\n\n @type_checked(coerce=False)\n def _run_test(something:str): pass\n\n with pytest.raises(TypeError) as error:\n _run_test(1234)\n\n assert \"1234 is of type int, expecting str.\" in error.value.args", "def test_invalid_array_type(self, parse_input_mocked_metadata):\n with pytest.raises(BlackbirdSyntaxError, match=r\"not of declared type float\"):\n parse_input_mocked_metadata(\n \"float array A =\\n\\t-1.0+1.0j, 2.7e5+0.2e-5j\\n\\t-0.1-2j, 0.2-0.1j\"\n )", "def test_types(self):\n \n self.assertIsInstance(self.c, int)\n self.assertIsInstance(self.dX, int)\n self.assertIsInstance(self.dY, int)\n self.assertIsInstance(self.dXg, int)\n self.assertIsInstance(self.dYg, int)\n self.assertIsInstance(self.dXqg, int)\n self.assertIsInstance(self.dYqg, int)\n self.assertIsInstance(self.Xr, int)\n self.assertIsInstance(self.Yr, int)\n self.assertIsInstance(self.dXq, int)\n self.assertIsInstance(self.dYq, int)\n self.assertIsInstance(self.outx_cal, int)\n self.assertIsInstance(self.outy_cal, int)\n self.assertIsInstance(self.dXql, int)\n self.assertIsInstance(self.dYql, int)\n self.assertIsInstance(self.dWx, int)\n self.assertIsInstance(self.dWy, int)\n self.assertIsInstance(self.Wf1, int)\n self.assertIsInstance(self.W, int)\n self.assertIsInstance(self.Wrms, int)\n self.assertIsInstance(self.delta, int)\n self.assertIsInstance(self.yb, int)\n self.assertIsInstance(self.x_edge, int)\n self.assertIsInstance(self.z_basis, int)\n self.assertIsInstance(self.coeff, int)\n self.assertIsInstance(self.nz, int)\n self.assertIsInstance(self.mz, int)\n self.assertIsInstance(self.nn, int)\n self.assertIsInstance(self.a, int)\n self.assertIsInstance(self.b, int)\n self.assertIsInstance(self.a1, int)\n self.assertIsInstance(self.b1, int)\n self.assertIsInstance(self.theta, int)\n self.assertIsInstance(self.jx, int)\n self.assertIsInstance(self.jy, int)\n self.assertIsInstance(self.ma, int)\n self.assertIsInstance(self.xx, int)\n self.assertIsInstance(self.outx_l, int)\n \n pass", "def test_Sobol_G_raises_error_if_values_not_numpy_array():\n fixture = [list(range(8)), str(12345678)]\n for x in fixture:\n with raises(TypeError):\n evaluate(x)", "def invalid_scalar(data):\n return object.__new__(object)", "def test_NN_hwr_raises_exception_for_non_numeric_values(self):\n self.assertRaises(TypeError, NN_hwr, [\"sb\", \"uir\", 5])", "def test_train_invalid_dtype():\n os.chdir(pathlib.Path(__file__).parent.absolute())\n loc = shutil.which(\"parrot-train\")\n script_descriptor = open(os.path.abspath(loc))\n script = script_descriptor.read()\n sys.argv = [\"parrot-train\", \"../data/seq_class_dataset.tsv\",\n \"../data/output_network.pt\", \"-d\", \"gibberish\", \"-c\", \"4\"]\n\n with pytest.raises(ValueError):\n exec(script)\n\n script_descriptor.close()", "def test_name_validation(self, attr):\n kwargs = {'kind': POSITIONAL_ONLY, attr: 3}\n with pytest.raises(TypeError) as excinfo:\n FParameter(**kwargs)\n assert excinfo.value.args[0] == \\\n '{} must be a str, not a {}'.format(attr, 3)", "def test_data_type(self):\n self.assertTrue(self.tester.data_type(), \"18S\")", "def _ValueMismatch(how_much):\n return 'Values mismatch, %s' % how_much", "def test_value_init6(self):\n with self.assertRaises(TypeError) as err:\n r1 = Rectangle(10, \"hi\")\n msg = \"height must be an integer\"\n self.assertEqual(str(err.exception), msg)", "def test_make_point(self):\n\n self.assertEqual(sppasPoint(3., 0.005), sppasANTX.make_point(\"132300\"))\n with self.assertRaises(TypeError):\n sppasANTX.make_point(\"3a\")\n with self.assertRaises(TypeError):\n sppasANTX.make_point(\"3.\")", "def test_data_type(self):\n with self.assertRaises(TypeError):\n max_integer(None)\n\n with self.assertRaises(TypeError):\n max_integer([\"Hey\", 3, 456, \"ALX\", 65])", "def test_to_knx_wrong_parameter(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().to_knx(\"fnord\")" ]
[ "0.6931209", "0.6566379", "0.6381943", "0.63050914", "0.62194294", "0.61516786", "0.6136856", "0.61005616", "0.60737664", "0.60367274", "0.60167736", "0.6010583", "0.6010164", "0.5982661", "0.59672403", "0.59668577", "0.59405553", "0.5933665", "0.5899024", "0.5894452", "0.58893466", "0.5887788", "0.5876288", "0.58660054", "0.5862698", "0.5856714", "0.585536", "0.5847813", "0.57912815", "0.57860976", "0.5781553", "0.57612306", "0.5744741", "0.57397264", "0.5729693", "0.57294375", "0.5727678", "0.5711504", "0.56927365", "0.5691867", "0.5690673", "0.5689787", "0.56763667", "0.56666034", "0.5666081", "0.56493264", "0.56319666", "0.5628503", "0.56267726", "0.56265616", "0.56265616", "0.56223065", "0.5621753", "0.5620854", "0.5607732", "0.56036454", "0.5599713", "0.55899334", "0.5585371", "0.55772233", "0.5574378", "0.55706024", "0.55567527", "0.5555372", "0.55539453", "0.55509406", "0.5543568", "0.55418223", "0.5537212", "0.55327266", "0.5529207", "0.55286497", "0.55272084", "0.55210316", "0.55169415", "0.55146706", "0.5497392", "0.5492574", "0.54911", "0.5484033", "0.54808354", "0.5470137", "0.54673207", "0.546532", "0.5460773", "0.5454108", "0.54527426", "0.54478604", "0.5446459", "0.5444199", "0.5441755", "0.5440578", "0.54385835", "0.5437347", "0.54363424", "0.5424891", "0.54104537", "0.5405992", "0.5404425", "0.5401655" ]
0.81578225
0
Create FunctionCall responsible for checking python argument data type
def scalar_object_check(py_object, c_object): try : check_type = check_type_registry[c_object.dtype, c_object.precision] except KeyError: errors.report(PYCCEL_RESTRICTION_TODO, symbol=c_object.dtype,severity='fatal') check_func = FunctionDef(name = check_type, body = [], arguments = [Variable(dtype=PyccelPyObject(), name = 'o', is_pointer=True)], results = [Variable(dtype=NativeBool(), name = 'r')]) return FunctionCall(check_func, [py_object])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CheckType(self, *args, **kwargs):\n pass", "def check_input_type(func):\n @functools.wraps(func)\n def wrapper_check_input_type(*args):\n new_args = []\n for X in list(args):\n new_args.append(_check_type(X))\n return func(*new_args)\n return wrapper_check_input_type", "def get_type_check(self, arg, option):\n pass", "def check_object_input_type(func):\n @functools.wraps(func)\n def wrapper_check_input_type(ref, *args):\n new_args = [ref]\n for X in list(args):\n new_args.append(_check_type(X))\n return func(*new_args)\n return wrapper_check_input_type", "def _check_type(details: CallableDetails, arg: CallableArg):\n if arg.type is None:\n raise HandlerFactoryError(\n f\"Callable {details.obj!r} argument {arg.name} has no type annotation\"\n )\n if not isinstance(arg.type, type):\n raise HandlerFactoryError(\n f\"Callable {details.obj!r} argument {arg.name}\"\n f\" type annotation is not a type\"\n )", "def test_raises_error_if_arg_not_string(self):\n def result():\n return encode_run_length([678])\n \n self.assertRaises(TypeError, result)", "def test_unchecked_args():\n\n @type_checked\n def _run_test(something:str, something_else):\n assert isinstance(something, str)\n assert isinstance(something_else, bool)\n\n _run_test(1234, True)", "def _get_typed_arg_value(self, given_value, param_def, strict):\n param_type = param_def[\"type\"]\n if isinstance(given_value, unicode):\n # Convert all unicode to str in UTF-8\n given_value = given_value.encode(\"utf8\") # Make all unicode into str\n\n if isinstance(given_value, IonObjectBase) and (given_value._get_type() == param_type or\n param_type in given_value._get_extends()):\n return given_value\n elif is_ion_object_dict(given_value) and (param_type == \"NoneType\" or hasattr(objects, param_type)):\n return self.create_ion_object(given_value)\n elif param_type in (\"str\", \"bool\", \"int\", \"float\", \"list\", \"dict\", \"NoneType\"):\n arg_val = get_typed_value(given_value, targettype=param_type, strict=strict)\n return arg_val\n else:\n raise BadRequest(\"Cannot convert param value to type %s\" % param_type)", "def test_arg_type(args, arg, arg_type):\n try:\n arg_type(args[arg])\n except Exception:\n raise GaiaException('Required argument {} must be of type {}'\n .format(arg, arg_type))", "def is_compatible(self, function, arguments):", "def check_arg(function: Callable, arg_name: str, value: Any) -> None:\n Annotation = function.__annotations__.get(arg_name)\n if not is_valid(value, Annotation):\n raise ArgumentError(function, arg_name, value)", "def CALL(name, *args):\r\n funcname = 'is_' + name\r\n func = getattr(libueye, funcname)\r\n new_args = []\r\n for a in args: \r\n if isinstance (a, unicode):\r\n print name, 'argument',a, 'is unicode'\r\n new_args.append (str (a))\r\n else:\r\n new_args.append (a)\r\n return func(*new_args)", "def check_parameter(**kwargs):\n # get the frame and the parameters of the function\n frame = inspect.currentframe().f_back\n _, _, _, values = inspect.getargvalues(frame)\n\n # compare each parameter with its expected dtype\n for arg in kwargs:\n expected_dtype = kwargs[arg]\n parameter = values[arg]\n if not isinstance(parameter, expected_dtype):\n # TODO improve the error: raise 'Parameter array' when it comes from 'check_array'.\n raise ValueError(\"Parameter {0} should be cast in {1}. It is a {2}\"\n \"instead.\"\n .format(arg, expected_dtype, type(parameter)))\n\n return", "def __check_args(self):\n self.__check_args_type()\n self.__check_args_val()", "def test__validate_arg_type(parameter_name, parameter, expected_type, raises) :\n\n if raises is not None : \n # We expect this to raise an error\n with pytest.raises(raises) :\n _validate_arg_type(parameter_name, parameter, expected_type)\n else :\n _validate_arg_type(parameter_name, parameter, expected_type)", "def check(*argtypes):\n def _check(func):\n\n @functools.wraps(func)\n def __check(*args):\n \"\"\"\n Takes the arguments\n \"\"\"\n if len(args) != len(argtypes):\n msg = 'Expected %d but got %d arguments' % (\n len(argtypes), len(args)\n )\n raise TypeError(msg)\n for arg, argtype in zip(args, argtypes):\n if not isinstance(arg, argtype):\n msg = 'Expected %s but got %s' % (\n argtypes, tuple(type(arg) for arg in args)\n )\n raise TypeError(msg)\n return func(*args)\n return __check\n return _check", "def check_type(typ: Annotation, msg: str, is_argument: bool = True) -> Annotation:\n return typing._type_check(typ, msg, is_argument)", "def _validate_argument_types_match(self, rule, argument_types):\n if len(set(argument_types)) > 1:\n self.add_error(\n self.OPERATOR_ARGUMENT_TYPE_MISMATCH,\n rule=str(rule),\n argument_types=argument_types,\n )", "def check_argtype(val, type_, name, or_none=False):\n if not (isinstance(val, type_) or (or_none and val is None)):\n raise TypeError('{} should be of type {}, got {}'.format(\n name, type_, type(val)))", "def __check_args_type(self):\n if not isinstance(self.__num_prev_scans, int):\n error_msg = \"num_prev_scans must of type 'int', but given '\"\n error_msg += str(type(self.__num_prev_scans))+ \"'\"\n raise TypeError(error_msg)\n\n if isinstance(self.__num_prev_scans, bool):\n error_msg = \"num_prev_scans must of type 'int', but given '\"\n error_msg += str(type(self.__num_prev_scans))+ \"'\"\n raise TypeError(error_msg)", "def validate_argtype(arg, argtype):\r\n if not isinstance(arg, argtype):\r\n raise HelperException('{0} argument must be of type {1}'.format(\r\n arg, argtype))\r\n return arg", "def check_argument_type(dtype, kernel_argument, i):\n types_map = {\"uint8\": [\"uchar\", \"unsigned char\", \"uint8_t\"],\n \"int8\": [\"char\", \"int8_t\"],\n \"uint16\": [\"ushort\", \"unsigned short\", \"uint16_t\"],\n \"int16\": [\"short\", \"int16_t\"],\n \"uint32\": [\"uint\", \"unsigned int\", \"uint32_t\"],\n \"int32\": [\"int\", \"int32_t\"], #discrepancy between OpenCL and C here, long may be 32bits in C\n \"uint64\": [\"ulong\", \"unsigned long\", \"uint64_t\"],\n \"int64\": [\"long\", \"int64_t\"],\n \"float16\": [\"half\"],\n \"float32\": [\"float\"],\n \"float64\": [\"double\"]}\n if dtype in types_map:\n return any([substr in kernel_argument for substr in types_map[dtype]])\n else:\n return False # unknown dtype. do not throw exception to still allow kernel to run.", "def _type_check(self, input_dict: Dict[Text, channel.Channel],\n exec_properties: Dict[Text, Any]) -> None:\n raise NotImplementedError", "def _check_input(self, func, args, kwargs):\n fullargspec = inspect.getfullargspec(func)\n return_msg = ''\n if fullargspec.varkw is None:\n for key in kwargs:\n if not key in fullargspec.kwonlyargs:\n return_msg += f'[Error]: not support param `{key}`. \\n'\n if fullargspec.varargs is None:\n if len(fullargspec.args) == 0:\n max_args_len = 0\n else:\n max_args_len = len(fullargspec.args)-1 if fullargspec.args[0] == 'self' else len(fullargspec.args)\n defaults_nums = 0 if fullargspec.defaults is None else len(fullargspec.defaults)\n min_args_len = max_args_len - defaults_nums\n if len(args) < min_args_len:\n return_msg += f'[Error]: have min {min_args_len} input, but you input {len(args)} args. \\n'\n if max_args_len < len(args):\n return_msg += f'[Error]: have max {max_args_len} input, but you input {len(args)} args. \\n'\n return return_msg", "def test_args_validation_with_trait_type_classes(self):\n\n @function(x=Int, y=Int, _returns_=Int)\n def add(x, y):\n return x + y\n\n self.assertEqual(add(8, 2), 10)\n self.failUnlessRaises(TraitError, add, 2, 'xxx')\n\n return", "def _validate_function(t):\n t1, t2 = t.fromType, t.toType\n if is_array(t2):\n raise ArrayReturnError(t)\n validate(t1)\n validate(t2)", "def is_fn(self, argno: int, argc: int) -> '_Checker':\n t = self.arg_types[argno]\n if not isinstance(t, FunctionType):\n raise XlsTypeError(\n self.span, t, None,\n 'Want argument {} to be a function; got {}'.format(argno, t))\n if len(t.params) != argc:\n raise XlsTypeError(\n self.span, t, None,\n 'Want argument {} to be a function with {} parameters; got {}'.format(\n argno, argc, t))\n return self", "def test_validation_can_fail():\n\n @type_checked\n def _run_test(something:int): pass\n\n with pytest.raises(TypeError) as error:\n _run_test(\"abc\")\n\n assert \"abc is of type str, expecting int.\" in error.value.args", "def test_no_coercion():\n\n @type_checked(coerce=False)\n def _run_test(something:str): pass\n\n with pytest.raises(TypeError) as error:\n _run_test(1234)\n\n assert \"1234 is of type int, expecting str.\" in error.value.args", "def checkType(self, value):\n pass", "def type_cast(self, value, data_type):\n if isinstance(data_type, BaseArg):\n value = data_type(value)\n elif isinstance(value, data_type) is False:\n if self.auto_type_cast and isinstance(value, str) and data_type in (int, bool, float):\n if data_type is bool:\n value = value.lower()\n if value not in {\"true\", \"false\"}:\n raise Exception()\n value = True if value == \"true\" else False\n else:\n value = data_type(value)\n else:\n raise Exception()\n return value", "def _typechecked_func(func):\n\n # Preserve the function signature\n @functools.wraps(func)\n def arg_checking_func(*args, **kwargs):\n\n # Get the annotation dict from the arg spec\n spec = inspect.getfullargspec(func)\n annotations = spec.annotations\n\n # Get the dict of {arg name: arg value}\n call_args = inspect.getcallargs(func, *args, **kwargs)\n\n # Check each argument for type if the annotation contains it\n for arg_name, arg_type in annotations.items():\n if (arg_name != 'return'\n and not isinstance(call_args[arg_name], arg_type):\n fmt = \"Argument {0}={1} is not of type {2.__name__}\"\n raise TypeError(fmt.format(arg_name,\n call_args[arg_name], arg_type))\n\n # Check the return value as well if necessary\n ret = func(*args, **kwargs)\n if 'return' in annotations:\n ret_type = annotations['return']\n if not isinstance(ret, ret_type):\n fmt = (\"Return value must be of type {0.__name__},\"\n \" not {1.__name__}\")\n raise TypeError(fmt.format(ret_type, type(ret)))\n\n return ret\n\n return arg_checking_func", "def do_type(self, str_arg):\n try:\n self.adbc.type(validateString(str_arg))\n except Exception, e:\n printLog(self.threadName + 'TYPE FAILED: %s' % e.message)\n self.resultFlag = False\n finally:\n return self.resultFlag", "def __check_args_type(self):\n if not isinstance(self.__min_range, (float, int)):\n error_msg = \"min_range must of type int or float, but given: \"\n error_msg += str(type(self.__min_range))\n raise TypeError(error_msg)\n elif not isinstance(self.__max_range, (float, int)):\n error_msg = \"max_range must of type int or float, but given: \"\n error_msg += str(type(self.__max_range))\n raise TypeError(error_msg)\n\n if isinstance(self.__min_range, bool):\n error_msg = \"min_range must of type int or float, but given: \"\n error_msg += str(type(self.__min_range))\n raise TypeError(error_msg)\n elif isinstance(self.__max_range, bool):\n error_msg = \"max_range must of type int or float, but given: \"\n error_msg += str(type(self.__max_range))\n raise TypeError(error_msg)", "def data_type(arg):\n if isinstance(arg, str):\n return len(arg)\n elif isinstance(arg, bool):\n return arg\n elif isinstance(arg, list):\n return arg[2] if len(arg) >= 3 else None\n elif isinstance(arg, int):\n if arg == 100:\n return \"equal to 100\"\n else:\n if arg < 100:\n return \"less than 100\"\n else:\n return \"greater than 100\"\n else:\n return \"no value\"", "def test_argument_types(self):\n funcs = [\n CityHash32,\n CityHash64,\n CityHash128,\n CityHash64WithSeed,\n CityHash64WithSeeds,\n CityHash128WithSeed,\n ]\n args = [b\"ab\\x00c\", bytearray(b\"ab\\x00c\"), memoryview(b\"ab\\x00c\")]\n for func in funcs:\n values = set(func(arg) for arg in args)\n self.assertEqual(len(values), 1, values)", "def test_args_validation_with_trait_type_instances(self):\n\n @function(x=Int(10), y=Int(20), _returns_=Int(30))\n def add(x, y):\n return x + y\n\n self.assertEqual(add(8, 2), 10)\n self.failUnlessRaises(TraitError, add, 2, 'xxx')\n\n return", "def test_missing_argument(self):\n @converters.wrap\n def inner_test(param: int):\n \"\"\"This shouldn't be called, converting should fail.\"\"\"\n pass\n self.assert_raises_request_error(inner_test, 3102)", "def test_star_args():\n\n @type_checked\n def _run_test(wat:int, *args:float, **kwargs:str):\n assert wat == 0\n for arg in args:\n assert isinstance(arg, float)\n assert len(args) == 4\n for _, value in kwargs.items():\n assert isinstance(value, str)\n\n _run_test(False, False, True, 14, \"10.2\", foo=False, bar=17, ok=None)", "def test_input_type_errors(self):\n\n def net_func():\n input_value = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0])\n paddle.bincount(input_value)\n\n with self.assertRaises(TypeError):\n self.run_network(net_func)", "def type_check_simple(func: callable, args: list,\n expected: type) -> Tuple[bool, object]:\n\n try:\n args_copy = deepcopy(args)\n returned = func(*args_copy)\n except Exception as exn:\n return (False, error_message(func, args, exn))\n\n if isinstance(returned, expected):\n return (True, returned)\n\n return (False,\n type_error_message(func.__name__, expected.__name__, returned))", "def check(self, value: Any) -> None:\n if not isinstance(value, self.oktype):\n raise TypeError(value)", "def to_java_arg(arg):\n if isinstance(arg, bool):\n return storm_thrift.JavaObjectArg(bool_arg=arg)\n elif isinstance(arg, int):\n return storm_thrift.JavaObjectArg(long_arg=arg)\n elif isinstance(arg, bytes):\n return storm_thrift.JavaObjectArg(binary_arg=arg)\n elif isinstance(arg, text_type):\n return storm_thrift.JavaObjectArg(string=arg)\n elif isinstance(arg, float):\n return storm_thrift.JavaObjectArg(double_arg=arg)\n else:\n return TypeError('arg is not a valid type to pass to Java: {!r}'\n .format(arg))", "def test_parameters_with_mixed_inferred_and_declared_types(self):\n with self.assertRaises(parser.JavaSyntaxError):\n parse.parse(setup_java_class(\"(x, int y) -> x+y;\"))", "def check_type(self):\n return True", "def validate(name, args, required, typ):\n value = args.get(name)\n if required and value is None:\n raise errors.Error(\"{0} is required argument\".format(name))\n if value is not None and not isinstance(value, typ):\n raise errors.Error(\"{0} should be {1}\".format(name, typ))", "def _check_sql_args(self, sql_args):\n # Check that sql arguments have the correct type\n if sql_args and type(sql_args) not in [tuple, list]:\n raise TypeError(\"sql_args should be tuple or list. Found %s \" %\n type(sql_args))", "def test_noncallable():\n\n @type_checked\n def _run_test(something:sys): pass\n\n with pytest.raises(ValueError) as error:\n _run_test(True)\n\n err = error.value.args\n assert \"type <module 'sys' (built-in)> is not a type or callable.\" in err\n\n @type_checked\n def _run_test(something:\"else\"): pass\n\n with pytest.raises(ValueError) as error:\n _run_test(True)", "def _cast_types(args):\n\targs.x_val = None if args.x_val == 'None' else int(args.x_val)\n\targs.test_size = float(args.test_size)\n\targs.alpha = float(args.alpha)\n\targs.fit_prior = (args.fit_prior in ['True', \"True\", 'true', \"true\"])\n\n\t# class_prior - array like type (problem to convert)\n\tif args.class_prior == \"None\" or args.class_prior == 'None':\n\t\targs.class_prior = None\n\n\t# --------- #\n\treturn args", "def external_call( d, output, stack, context ):\n\targuments = d[\"args\"].split(\" \")\n\tstack_collect = stack[-len(arguments):]\n\tfor arg, desired in zip(stack_collect, arguments):\n\t\tresult = type_system.type_reduce( arg, desired, output, stack, context )\n\t\tif result != \"success\":\n\t\t\treturn \"pass\"\n\toutput.append(\" call %s\\n\" % d[\"name\"])\n\tfor i in arguments:\n\t\tstack.pop()\n\tretvals = d[\"ret\"].split(\" \")\n\tfor val in retvals:\n\t\tstack.append( type_system.Datum( [[val]], [None] ) )\n\treturn \"success\"", "def test_check_wrong_argument_type(self, number, base):\n with self.assertRaises(exceptions.WrongArgumentTypeError):\n positional.decode(number, base)", "def _valid_types(arguments, types):\n for arg in arguments:\n if type(arg) not in types:\n return False\n return True", "def _valid_types(arguments, types):\n for arg in arguments:\n if type(arg) not in types:\n return False\n return True", "def is_type_correct(*args):\n return _ida_hexrays.is_type_correct(*args)", "def _preprocess_typecheck(argSig, argspecs, slf_or_clsm=False):\n # todo: Maybe move also slf-logic here\n vargs = argspecs.varargs\n try:\n kw = argspecs.keywords\n except AttributeError:\n kw = argspecs.varkw\n try:\n kwonly = argspecs.kwonlyargs\n except AttributeError:\n kwonly = None\n if not vargs is None or not kw is None:\n arg_type_lst = list(get_Tuple_params(argSig))\n if not vargs is None:\n vargs_pos = (len(argspecs.args)-1) \\\n if slf_or_clsm else len(argspecs.args)\n # IndexErrors in this section indicate that a child-method was\n # checked against a parent's type-info with the child featuring\n # a more wider type on signature level (e.g. adding vargs)\n try:\n vargs_type = typing.Sequence[arg_type_lst[vargs_pos]]\n except IndexError:\n vargs_type = typing.Sequence[typing.Any]\n try:\n arg_type_lst[vargs_pos] = vargs_type\n except IndexError:\n arg_type_lst.append(vargs_type)\n if not kw is None:\n kw_pos = len(argspecs.args)\n if slf_or_clsm:\n kw_pos -= 1\n if not vargs is None:\n kw_pos += 1\n if not kwonly is None:\n kw_pos += len(kwonly)\n try:\n kw_type = typing.Dict[str, arg_type_lst[kw_pos]]\n except IndexError:\n kw_type = typing.Dict[str, typing.Any]\n try:\n arg_type_lst[kw_pos] = kw_type\n except IndexError:\n arg_type_lst.append(kw_type)\n return typing.Tuple[tuple(arg_type_lst)]\n else:\n return argSig", "def type_cast(func,data_entry,*args):\n assert isinstance(data_entry,str)\n assert callable(func)\n try:\n out=func(data_entry,*args)\n except:\n out=None\n return out", "def assertAccepts(*types):\n def check_accepts(func):\n #assert len(types) == func.func_code.co_argcount, f\"The function {func.__name__} doesn't have the specified number of arguments ({len(types)}).\"\n # ^ AttributeError: 'function' object has no attribute 'func_code'\n @wraps(func)\n def wrapped_func(*args, **kwargs):\n assert(len(args) + len(kwargs) == len(types))\n for i, (a, t) in enumerate(zip(args, types)):\n assert isinstance(a, t), \\\n f\"The {i}th argument of function `{func.__name__}` is expected to be of type `{t}`, but it's value is `{a}`\"\n return func(*args, **kwargs)\n return wrapped_func\n return check_accepts", "def test_float_to_int():\n\n @type_checked\n def _run_test(something:int):\n assert something == 10\n\n _run_test(\"10.4\")", "def test_8_type_mismatch_in_expression(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction foo(a:real;b:boolean):real; begin return 1; end\n\t\tprocedure main(); var x:real; begin x:=foo(1,2); {error} end\"\"\"\n\t\texpect = \"Type Mismatch In Expression: CallExpr(Id(foo),[IntLiteral(1),IntLiteral(2)])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,408))", "def arg_validation(arg, cla):\n if is_subclass(cla, arg):\n return arg\n else:\n print(str(arg)+\" is not a valid \" + cla.__module__ + \" name.\")\n sys.exit(2)", "def check_argument(self, struct_class, item, keyword, value):\n pass", "def check_args(f, got_len, exp_len):\n if not got_len == exp_len:\n msg = \"{0} expects {1} argument; got {2}\".format(f, exp_len, got_len)\n raise error.LispException(msg)", "def check_arguments(antns, lcls) -> None:\n for (arg, cls) in antns.items():\n if arg != 'return':\n if not isinstance(lcls[arg], cls):\n raise ValueError(\n (\"type({arg}) must be {cls}\\n\" +\n \"type({arg}) = {typ}\").format(\n arg=arg, cls=cls, typ=type(lcls[arg])))\n return None", "def check_inputs(function):\n def decorated(self, data, *args, **kwargs):\n if not (isinstance(data, np.ndarray) and len(data.shape) == 2 and data.shape[1] == 1):\n raise ValueError('The argument `data` must be a numpy.ndarray with shape (n, 1).')\n\n return function(self, data, *args, **kwargs)\n\n decorated.__doc__ = function.__doc__\n return decorated", "def do_type_checking(self, node):\r\n\r\n if not isinstance(node.inputs[0].type, CudaNdarrayType):\r\n raise NotImplementedError()", "def test_incorrect_arg_type(self):\n\n with pytest.raises(TypeError) as exc_info:\n upper_incomplete_gamma(a='A', z=0.3)\n\n expected_error_msg = (\n 'type of argument \"a\" must be one of (int, float); got str instead'\n )\n assert str(exc_info.value) == expected_error_msg", "def is_call_arg_of(self, *args):\n return _ida_hexrays.cexpr_t_is_call_arg_of(self, *args)", "def check_args(*args: Tuple[Any, ...], **kwargs: Any) -> None:\n\n # We begin by initializing the maximum number of args we will allow at 0. We will iterate\n # this if by chance we see an argument whose name is \"self\".\n max_arg_len = 0\n\n # iterate through every parameter passed in\n for idx, param_name in enumerate(literal_signature.parameters):\n\n if idx == 0 and (param_name == \"self\" or param_name == \"cls\"):\n max_arg_len += 1\n continue\n\n # if this parameter isn't in kwargs, then it's probably in args. However, we can't check\n # directly because we don't have arg names, only the list of args which were passed in.\n # Thus, the way this check works is to return an error if we find an argument which\n # isn't in kwargs and isn't \"self\".\n if param_name not in kwargs and len(args) > max_arg_len:\n traceback_and_raise(\n AttributeError(\n f\"'{param_name}' was passed into a function as an arg instead of a kwarg. \"\n f\"Please pass in all arguments as kwargs when coding/using PySyft.\"\n )\n )", "def signature_check(dummy, *args, **kwargs):\n try:\n dummy(*args, **kwargs)\n return True\n\n except TypeError:\n return False", "def test_from_callable(self):\n def func(a: int = 0):\n return a\n fsig = FSignature.from_callable(func)\n assert len(fsig.parameters) == 1\n assert fsig.parameters['a'] == FParameter(\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n name='a',\n interface_name='a',\n default=0,\n type=int,\n )", "def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))", "def _verify_input_args(\n spatial_dimension: Optional[Union[str, int]] = None,\n tag: Optional[str] = None,\n tabulate: bool = True,\n) -> None:\n # --- Parse 'spatial_dimension'\n if not isinstance(spatial_dimension, (int, str, type(None))):\n raise TypeError(\n f\"Invalid type for spatial dimension! \"\n f\"Expected either an integer or a string. \"\n f\"Got instead {type(spatial_dimension)}.\"\n )\n if spatial_dimension is not None and isinstance(spatial_dimension, str):\n if spatial_dimension.lower() != \"m\":\n raise ValueError(\n f\"Invalid value ({spatial_dimension}) for spatial dimension! \"\n f\"Either a positive integer or 'M' to indicate \"\n f\"a variable-dimension test function.\"\n )\n if spatial_dimension is not None and isinstance(spatial_dimension, int):\n if spatial_dimension <= 0:\n raise ValueError(\n f\"Invalid value ({spatial_dimension}) for spatial dimension! \"\n f\"Either a positive integer or 'M' to indicate \"\n f\"a variable-dimension test function.\"\n )\n\n # --- Parse 'tag'\n if not isinstance(tag, (str, type(None))):\n raise TypeError(f\"Tag argument must be of str type! Got {type(tag)}.\")\n if tag is not None and tag not in SUPPORTED_TAGS:\n raise ValueError(\n f\"Tag {tag!r} is not supported. Use one of {SUPPORTED_TAGS}!\"\n )\n\n # --- Parse 'tabulate'\n if not isinstance(tabulate, (bool, type(None))):\n raise TypeError(\n f\"'tabulate' argument must be of bool type! Got {type(tabulate)}.\"\n )", "def _check_input_args(scale, shape, dtype):\n if tf.as_dtype(dtype) not in (tf.int32, tf.int64):\n raise ValueError(\n f'Only tf.int32 and tf.int64 are supported. Found dtype `{dtype}`.')\n\n checks = [\n tf.compat.v1.assert_non_negative(scale),\n tf.compat.v1.assert_integer(scale)\n ]\n with tf.control_dependencies(checks):\n return tf.identity(scale), shape, dtype", "def test_nested_one_arg_short():\n\n @type_checked\n def _run_test(thing:(float, int, str)): pass\n\n with pytest.raises(TypeError) as error:\n _run_test((\"123\", 123.12))\n\n assert error.exconly() == (\n \"TypeError: Argument length mismatch. \"\n \"Expected a tuple of float, int, str.\"\n )", "def NormalizeAndTypeCheck(arg, types):\n if not isinstance(types, (list, tuple)):\n types = (types,)\n\n assert list not in types and tuple not in types\n\n if isinstance(arg, types):\n return ([arg], False)\n else:\n try:\n for val in arg:\n if not isinstance(val, types):\n raise datastore_errors.BadArgumentError(\n 'Expected one of %s; received %s (a %s).' %\n (types, val, typename(val)))\n except TypeError:\n raise datastore_errors.BadArgumentError(\n 'Expected an instance or sequence of %s; received %s (a %s).' %\n (types, arg, typename(arg)))\n\n return (list(arg), True)", "def catch_typeerror(func):\n def f(self, *args, **kwargs):\n try:\n return func(self, *args, **kwargs)\n except TypeError, exn:\n #log.exception('catch_typeerror')\n if hasattr(func, 'api') and func.api in argcounts:\n # Assume that if the argument count was wrong and if the\n # exception was thrown inside this file, then it is due to an\n # invalid call from the client, otherwise it's an internal\n # error (which will be handled further up).\n expected = argcounts[func.api]\n actual = len(args) + len(kwargs)\n if expected != actual:\n tb = sys.exc_info()[2]\n try:\n sourcefile = traceback.extract_tb(tb)[-1][0]\n if sourcefile == inspect.getsourcefile(BNVMAPI):\n return xen_api_error(\n ['MESSAGE_PARAMETER_COUNT_MISMATCH',\n func.api, expected, actual])\n finally:\n del tb\n raise\n except BNAPIError, exn:\n return xen_api_error(exn.get_api_error())\n\n return f", "def test04(self):\n self.assertRaises(TypeError, robustApply, oneArgument, \"this\", blah=\"that\")", "def test_type_error(self):\n with self.assertRaises(TypeError):\n function_inclusion_filter_builder(5)", "def test_badargs(self):\n self.assertRaises(TypeError, isint, [])\n self.assertRaises(TypeError, isint, {})\n self.assertRaises(TypeError, isint, None)\n return", "def fcheck(*args, **kwargs)->None:\n pass", "def __call__(self, *args):\n return TypeCall(self, args)", "def _validate(self, args, kwargs) -> None:\n\n def error(\n exception_type: type[RegistrationError],\n arg_name: str | None = None,\n **msg_kwargs,\n ) -> None:\n if arg_name is None:\n arg_name = args[0] if args else \"<unknown>\"\n raise exception_type(self.scope, arg_name, **msg_kwargs)\n\n if not args:\n error(NoOptionNames)\n # Validate args.\n for arg in args:\n # We ban short args like `-x`, except for special casing the global option `-l`.\n if not arg.startswith(\"--\") and not (self.scope == GLOBAL_SCOPE and arg == \"-l\"):\n error(OptionNameDoubleDash, arg_name=arg)\n\n # Validate kwargs.\n if \"implicit_value\" in kwargs and kwargs[\"implicit_value\"] is None:\n error(ImplicitValIsNone)\n type_arg = kwargs.get(\"type\", str)\n if \"member_type\" in kwargs and type_arg != list:\n error(MemberTypeNotAllowed, type_=type_arg.__name__)\n member_type = kwargs.get(\"member_type\", str)\n is_enum = inspect.isclass(member_type) and issubclass(member_type, Enum)\n if not is_enum and member_type not in self._allowed_member_types:\n error(InvalidMemberType, member_type=member_type.__name__)\n\n help_arg = kwargs.get(\"help\")\n if help_arg is not None and not isinstance(help_arg, str):\n error(HelpType, help_type=type(help_arg).__name__)\n\n # check type of default value\n default_value = kwargs.get(\"default\")\n if default_value is not None:\n if isinstance(default_value, str) and type_arg != str:\n # attempt to parse default value, for correctness..\n # custom function types may implement their own validation\n default_value = self.to_value_type(default_value, type_arg, member_type)\n if hasattr(default_value, \"val\"):\n default_value = default_value.val\n\n # fall through to type check, to verify that custom types returned a value of correct type\n\n if isinstance(type_arg, type) and not isinstance(default_value, type_arg):\n error(\n DefaultValueType,\n option_type=type_arg.__name__,\n default_value=kwargs[\"default\"],\n value_type=type(default_value).__name__,\n )\n\n # verify list member types (this is not done by the custom list value type)\n if type_arg == list:\n for member_val in default_value:\n if not isinstance(member_type, type):\n # defer value validation to custom type\n member_type(member_val)\n\n elif not isinstance(member_val, member_type):\n error(\n DefaultMemberValueType,\n member_type=member_type.__name__,\n member_value=member_val,\n value_type=type(member_val).__name__,\n )\n\n if (\n \"passthrough\" in kwargs\n and kwargs[\"passthrough\"]\n and (type_arg != list or member_type not in (shell_str, str))\n ):\n error(PassthroughType)\n\n for kwarg in kwargs:\n if kwarg not in self._allowed_registration_kwargs:\n error(InvalidKwarg, kwarg=kwarg)\n\n # Ensure `daemon=True` can't be passed on non-global scopes.\n if kwarg == \"daemon\" and self._scope != GLOBAL_SCOPE:\n error(InvalidKwargNonGlobalScope, kwarg=kwarg)\n\n removal_version = kwargs.get(\"removal_version\")\n if removal_version is not None:\n validate_deprecation_semver(removal_version, \"removal version\")", "def test_arguments(self):\n\n h.test_function_arguments(\n func=ScalingTransformer.check_numeric_columns,\n expected_arguments=[\"self\", \"X\"],\n expected_default_values=None,\n )", "def _check_args(self):\n if not isinstance(self.digits, str):\n raise TypeError('digits must be of type string.')\n if isinstance(self.n_points, float):\n self.n_points = int(self.n_points)\n if not isinstance(self.n_points, int):\n raise TypeError('n_points must be of type integer.')\n if self.n_points < 0:\n raise ValueError('n_points must be positive.')", "def _check_type(self, new_value):\n raise NotImplementedError", "def _check_args(self, args):\n if not isinstance(args, list) or not len(args) >= 2:\n raise FunctionArgumentException(\"Argument of attribute getter \"\n \"function '%s' must be a list of \"\n \"indeces; got: '%s'\" % (\n self.name,\n args\n ))\n\n if not is_homogeneous(args, (str, int)):\n raise FunctionArgumentException(\n \"'%s': argument must be a list of strings; got: '%s'\" %\n (self.name, args)\n )", "def input_type():\n pass", "def check_params(self):\n raise NotImplementedError", "def get_check_types():", "def test_verify_param_compare_types(self):\n utils.check_raises(lambda: ax.verify_param(\"1\", paramCompare=1, isEqual=True), HTTPInternalServerError)\n utils.check_raises(lambda: ax.verify_param(\"1\", paramCompare=True, isEqual=True), HTTPInternalServerError)\n utils.check_raises(lambda: ax.verify_param(1, paramCompare=\"1\", isEqual=True), HTTPInternalServerError)\n utils.check_raises(lambda: ax.verify_param(1, paramCompare=True, isEqual=True), HTTPInternalServerError)\n\n # strings cases handled correctly (no raise)\n utils.check_no_raise(lambda: ax.verify_param(\"1\", paramCompare=u\"1\", isEqual=True))", "def test_int_to_listed():\n\n @type_checked\n def _run_test(thing:[int]=None):\n assert thing == [15]\n\n _run_test(\"15.0\")", "def validate_arguments(arguments: dict) -> None:\n if not isinstance(arguments, dict):\n raise TypeError('Argument \"arguments\" should be a dict')\n for argument in arguments:\n if not isinstance(arguments[argument][0], arguments[argument][1]):\n raise TypeError(f'Argument {argument} should be a {arguments[argument][1]}')", "def get_pytype(self, c_arg, parse_arg):\n if isinstance(c_arg, FunctionAddress):\n return 'O'\n else:\n try:\n return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)]\n except KeyError as e:\n raise NotImplementedError(\"Type not implemented for argument collection : \"+str(type(parse_arg))) from e", "def type_check_full(func: callable, args: list,\n checker_function: callable) -> Tuple[bool, object]:\n\n try:\n args_copy = deepcopy(args)\n returned = func(*args_copy)\n except Exception as exn:\n return (False, error_message(func, args, exn))\n\n return checker_function(returned)", "def argument_type(arg):\n types = (int, float)\n \n for t in types:\n try:\n return type(t(arg))\n except ValueError:\n continue\n \n return str", "def check_argument_values(arg_name: str, arg_value: any, expected_type: Tuple[Union[type, None], Union[Tuple, None]]) -> None:\n expected_type, expected_range = expected_type\n if isinstance(expected_type, list) and None in expected_type:\n pass\n elif not isinstance(arg_value, expected_type):\n raise TypeError(f\"{arg_name} should be of type {expected_type.__name__} not type {arg_value.__class__}.\")\n if expected_range is not None:\n # Handle numeric range\n if isinstance(expected_range, tuple) and len(expected_range) == 2:\n if not (expected_range[0] <= arg_value <= expected_range[1]):\n raise ValueError(f\"{arg_name} should be within the range {expected_range}, cannot except {arg_value}.\")\n elif isinstance(expected_range, list) and arg_value not in expected_range:\n raise ValueError(f\"{arg_name} should be one of {expected_range}, cannot except '{arg_value}'.\")", "def evaluate(self, *args, **kwargs) -> Union[str, int, float, bool]:\n return True", "def _check_helper(self, value, raise_exceptions=True) -> bool:\n if not isinstance(value, self.value_type):\n if raise_exceptions:\n raise InvalidParameterException(\n '%s: invalid type given: %s (required %s)' % (\n self.name, type(value),\n ', '.join([str(x) for x in self.value_type])\n )\n )\n return False\n\n return True", "def _typecheck(name, value, *types):\n if not types:\n raise ValueError('expected one or more types, maybe use _textcheck?')\n if not isinstance(value, types):\n raise TypeError(\"expected %s for %s, got %r\"\n % (\" or \".join([t.__name__ for t in types]),\n name, value))\n return value", "def check(self, kwargs):\n cls = self.__class__.__name__\n for k, v in self.argument_types.items():\n try:\n if k in kwargs:\n kwargs[k] = v(kwargs[k])\n except ValueError:\n logger.warning(\n '%s(\"%s\"): Invalid value %r for argument \"%s\".'\n % (cls, self.path, kwargs[k], k)\n )\n return None\n except TypeError:\n logger.warning(\n '%s(\"%s\"): Invalid value %r for argument \"%s\".'\n % (cls, self.path, kwargs[k], k)\n )\n return None\n for k in self.required_arguments_names:\n if k not in kwargs:\n logger.warning(\n '%s(\"%s\"): Missing required argument \"%s\".' % (cls, self.path, k)\n )\n return None\n if not self.accept_kwargs:\n for k in kwargs:\n if k not in self.accepted_argument_names:\n logger.warning(\n '%s(\"%s\"): Invalid argument \"%s\".' % (cls, self.path, k)\n )\n return None\n return kwargs" ]
[ "0.69113696", "0.6676271", "0.66149634", "0.6469016", "0.63755065", "0.63459605", "0.632837", "0.63063824", "0.62952715", "0.629278", "0.6274046", "0.6262766", "0.6187069", "0.6174713", "0.6169088", "0.61163473", "0.61155474", "0.60963315", "0.6094568", "0.6088186", "0.6083363", "0.60735685", "0.6062335", "0.60424286", "0.6032663", "0.6022588", "0.6006085", "0.59956497", "0.5989258", "0.59827846", "0.5968829", "0.5944627", "0.5943165", "0.59311485", "0.59287226", "0.59186184", "0.5893683", "0.58832014", "0.587973", "0.5874673", "0.58737665", "0.5867811", "0.5857039", "0.58510333", "0.5850795", "0.5839276", "0.58373755", "0.5823896", "0.58190066", "0.58114356", "0.58071303", "0.58024013", "0.58024013", "0.5792227", "0.57897073", "0.57789516", "0.5750357", "0.5742349", "0.5740874", "0.57397217", "0.5738549", "0.57367146", "0.5726993", "0.57269454", "0.57239985", "0.5723585", "0.57192886", "0.5717782", "0.5716343", "0.570171", "0.5701572", "0.56937146", "0.56891763", "0.56833553", "0.5681742", "0.5665408", "0.56642145", "0.56623715", "0.5660213", "0.5659004", "0.56577045", "0.5651877", "0.56509924", "0.5649596", "0.5641235", "0.56351197", "0.563404", "0.56294477", "0.5611277", "0.5610566", "0.56101906", "0.56089306", "0.5600475", "0.5594106", "0.5592961", "0.5590484", "0.5590037", "0.55890715", "0.5586333", "0.5583664" ]
0.6071258
22
copes with positions with no values
def test_get_logo_missing(self): data = [ [0.1, 0.3, 0.5, 0.1], [0.05, 0.8, 0.05, 0.1], [0, 0, 0, 0], [0.7, 0.1, 0.1, 0.1], [0.6, 0.15, 0.05, 0.2], ] data = DictArrayTemplate(5, "ACGT").wrap(data) get_logo(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filled_positions(self):\n return [x for x in assignable_positions if self.grid[x][0]]", "def no_empty_positions(self):\n clauses = []\n\n for position in range(0,self.graph.num_vertices):\n clause = []\n for vertex in range(0,self.graph.num_vertices):\n clause.append(ClauseVariable(False,vertex,position))\n clauses.append(clause)\n return clauses", "def _some_variables(use_posInd=False):\n\n parent = (\n np.array(\n [\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 1,\n 7,\n 8,\n 9,\n 10,\n 1,\n 12,\n 13,\n 14,\n 15,\n 13,\n 17,\n 18,\n 19,\n 20,\n 21,\n 20,\n 23,\n 13,\n 25,\n 26,\n 27,\n 28,\n 29,\n 28,\n 31,\n ]\n )\n - 1\n )\n\n offset = np.array(\n [\n 0.000000,\n 0.000000,\n 0.000000,\n -132.948591,\n 0.000000,\n 0.000000,\n 0.000000,\n -442.894612,\n 0.000000,\n 0.000000,\n -454.206447,\n 0.000000,\n 0.000000,\n 0.000000,\n 162.767078,\n 0.000000,\n 0.000000,\n 74.999437,\n 132.948826,\n 0.000000,\n 0.000000,\n 0.000000,\n -442.894413,\n 0.000000,\n 0.000000,\n -454.206590,\n 0.000000,\n 0.000000,\n 0.000000,\n 162.767426,\n 0.000000,\n 0.000000,\n 74.999948,\n 0.000000,\n 0.100000,\n 0.000000,\n 0.000000,\n 233.383263,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 121.134938,\n 0.000000,\n 0.000000,\n 115.002227,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 151.034226,\n 0.000000,\n 0.000000,\n 278.882773,\n 0.000000,\n 0.000000,\n 251.733451,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 99.999627,\n 0.000000,\n 100.000188,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 151.031437,\n 0.000000,\n 0.000000,\n 278.892924,\n 0.000000,\n 0.000000,\n 251.728680,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 99.999888,\n 0.000000,\n 137.499922,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n ]\n )\n\n offset = offset.reshape(-1, 3)\n\n rotInd = [\n [5, 6, 4],\n [8, 9, 7],\n [11, 12, 10],\n [14, 15, 13],\n [17, 18, 16],\n [],\n [20, 21, 19],\n [23, 24, 22],\n [26, 27, 25],\n [29, 30, 28],\n [],\n [32, 33, 31],\n [35, 36, 34],\n [38, 39, 37],\n [41, 42, 40],\n [],\n [44, 45, 43],\n [47, 48, 46],\n [50, 51, 49],\n [53, 54, 52],\n [56, 57, 55],\n [],\n [59, 60, 58],\n [],\n [62, 63, 61],\n [65, 66, 64],\n [68, 69, 67],\n [71, 72, 70],\n [74, 75, 73],\n [],\n [77, 78, 76],\n [],\n ]\n\n # definitions are originating from matlab file --> bring them to zero based indexing\n rotInd = [[e - 1 for e in s if len(s) > 0] for s in rotInd]\n posInd = [0, 1, 2] if use_posInd else None\n\n expmapInd = np.split(np.arange(4, 100) - 1, 32)\n\n return parent, offset, rotInd, expmapInd, posInd", "def find_free_ortho_spaces(self):\r\n for pos, ortho in ORTHOGONAL_POSITIONS.items():\r\n if self.board[pos[0]][pos[1]].color != 0:\r\n for p in ortho:\r\n if self.board[p[0]][p[1]].color == 0:\r\n if (pos[0], pos[1]) in self.free_pos.keys():\r\n self.free_pos[(pos[0], pos[1])].add(p)\r\n else:\r\n self.free_pos[(pos[0], pos[1])] = {p}", "def _reset_stored(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n self._setted = False\n self.ks = None\n self.iss = [0]", "def null(cls):\n return cls(*[Point.origin() for i in range(2)])", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def get_objpositions(scope):\n positions = []\n print('Press enter after each position has been found; press control-c to end')\n while True:\n try:\n input()\n except KeyboardInterrupt:\n break\n positions.append(scope.stage.position)\n positions[-1].insert(0,scope.nosepiece.position)\n print('Position {}: {}'.format(len(positions), tuple(positions[-1])), end='')\n return positions", "def observation_space():", "def supress_atomPosition_singulrarities(self) -> None:\n\n if (\"POSITION\" in dir(self)):\n for ind, atom in enumerate(self.POSITION.content):\n atom.xp = atom.xp + 10 ** (-7) * ind\n atom.yp = atom.yp - 10 ** (-7) * ind\n atom.zp = atom.zp - 10 ** (-7) * ind", "def recheckPosition(self):\n self.start = self.bounds[0].pos\n self.end = self.bounds[1].pos", "def get_unhindered_positions(self, endposition):\n pass", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def positions(self):\n return self.inorder() # make inorder the default", "def cleavagePos(self):\n raise NotImplementedError", "def cleavagePos(self):\n raise NotImplementedError", "def positions(self):\n return self.preorder() # return entire preorder iteration", "def reset_pos(self):\n\n return self.pos(1, 1)", "def max_positions(self):\n return None", "def fillPositions(self):\r\n if self.th is not None:\r\n self.df['POSITION'] = self.th.positions['Qty']\r\n self.df['REGS'] = self.th.positions['REGS']\r\n self.df['144A'] = self.th.positions['144A']\r\n self.df['POSITION'].fillna(0, inplace=True)\r\n self.df['REGS'].fillna(0, inplace=True)\r\n self.df['144A'].fillna(0, inplace=True)\r\n self.df['RISK'] = -self.df['RISK_MID'] * self.df['POSITION'] / 10000.", "def __init__(self):\n\n\t\tself.position = np.array([0, 0])", "def zeroWorkingCoordinates(self):\n self.workingZeroX = self.x\n self.workingZeroY = self.y\n self.workingZeroZ = self.z\n\n self.sendCommand(\"G10 L20 P1 X0 Y0 Z0\")", "def free_positions(self):\n positions = []\n for i in range(self.grid_size):\n for j in range(self.grid_size):\n if self.grid[i][j] == 0:\n positions.append((i, j))\n if positions == []:\n raise GameException('Game Over. No free position left.')\n return positions", "def global_coords(self) -> GlobalCoordsABC:", "def reset(self, position):\n size = self.stack.size\n if size < position: raise Exception(\"MultiDict.reset(), can't return to a point (%s) that is higher than the current size (%s)\" % (position, size))\n for i in range(size-1, position-1, -1):\n name, _, prev = self.stack[i]\n if prev is None: del self.lookup[name]\n else: self.lookup[name] = prev\n self.stack.size = position", "def remove_filled_positions(self, positions, board):\n\n new_positions = []\n for p in positions:\n if board.check_move(p[0], p[1]):\n new_positions.append(p)\n return new_positions", "def default_startpos(self) -> Dict[AtomKey, numpy.array]:\n ...", "def autoreveal_empty_spaces(self, position):\n revealed = []\n zero_spaces = []\n check_stack = [position]\n checked = []\n\n while len(check_stack) > 0:\n pos = x, y = check_stack.pop()\n if self.get_num_mines_around_position(x, y) == 0:\n zero_spaces.append(pos)\n \n # Add spaces around\n for ay in range(y-1, y+2):\n for ax in range(x-1, x+2):\n if ay >= 0 and ax >= 0 and ay < len(self.mine_map) and ax < len(self.mine_map[ay]): # Don't check spaces that are outside of the array\n apos = ax, ay\n if apos not in checked:\n check_stack.append(apos)\n revealed.append(apos)\n checked.append(pos)\n \n self.revealed.extend(revealed)", "def reset_position(self):\n self.set_position(copy.deepcopy(self.ab_pos))", "def ExecuteBeforeSolutionLoop(self):\n super().ExecuteBeforeSolutionLoop()\n num_of_vaviables = len(self.variables) + len(self.nonhistorical_variables)\n self.values = [[-1e6] * num_of_vaviables for _ in self.found_positions]", "def empty(self):\n return _osgAnimation.mapVertexInfluence_empty(self)", "def reset(self):\n super(PolygonTool, self).reset()\n # self.__nsides = None\n # self.__increment = None\n # self.__external = False # make this adjustable?\n self.__center = None\n for _i in range(self.__nsides):\n self.__xpts[_i] = 0.0\n self.__ypts[_i] = 0.0", "def delPosition(self):\n self.components = [0 for i in range(len(self.components))]", "def bounds(self, pos):", "def restorePositionsOfUnownedAntennas() :\n if ( s.getInitializationFlag() == True ): return\n unownedAnts = subarrayAntSetup( True )\n progress(\"Setting positions of unowned and uninitialized antennas %s\" % helpers.formatAsRanges( unownedAnts) )\n progress(\"....Pads\")\n restoreAntCommand(pad, unownedAnts, subarray=DEFAULT)\n progress( \"....Pad Offsets\" )\n restoreAntCommand( padOffset, unownedAnts, subarray=DEFAULT )\n progress(\"....Antenna positional offset and axis non-intersection\")\n restoreAntCommand( antennaOffset, unownedAnts, subarray=DEFAULT )\n restoreAntCommand( axisNonIntersection, unownedAnts, subarray=DEFAULT )", "def smart_tentacle_positions(self, bounds: np.ndarray, num_positions) -> np.ndarray:\n valid_memory = [(pos, cost) for pos, cost in self.memory if\n np.all(pos >= bounds[:, 0]) and np.all(pos <= bounds[:, 1])]\n if len(valid_memory) < 2 * len(bounds):\n return self.random_tentacle_positions(bounds, num_positions)\n if len(valid_memory) > self.max_training_mem:\n random.shuffle(valid_memory) # so the model can change\n valid_memory = valid_memory[:self.max_training_mem]\n # base_estimator = cook_estimator(\"GP\", space=bounds,noise=.005)\n opt = skopt.Optimizer(bounds, n_initial_points=0, n_jobs=-1,\n acq_optimizer_kwargs={\"n_restarts_optimizer\": 10, \"n_points\": 30_000}, acq_func=\"EI\")\n\n x = [list(pos) for pos, cost in valid_memory]\n y = [cost for pos, cost in valid_memory]\n opt.tell(x, y) # train model\n positions = np.array(opt.ask(num_positions))\n return positions", "def _null_get_rel_pos(self, k_is=[0]):\n return [[None]*len(self.iss)]*len(k_is)", "def _cte_postformat(self):\n# if type(self.idxs) == list:\n# self.idxs = np.array(self.idxs)\n if self.sp_relative_pos is not None:\n if type(self.sp_relative_pos) == list:\n self.sp_relative_pos = np.array(self.sp_relative_pos)", "def contract_position_notional(self):\r\n contracts = self.account_position.contracts\r\n crossed = Decimal(0)\r\n isolated = {}\r\n for c in contracts:\r\n if c.position.is_zero(): continue\r\n\r\n prc: FuturesPricingData = self.pricing_data_by_symbol.get(c.symbol)\r\n if prc is None: return self.__throw(\"missing pricing data for %s\" % c.symbol)\r\n\r\n if c.marginType == MarginType.crossed:\r\n crossed = crossed + abs(c.position * prc.markPrice)\r\n elif c.marginType == MarginType.isolated:\r\n isolated[c.symbol] = abs(c.position * prc.markPrice)\r\n else:\r\n return self.__throw(\"unknown marginType: \" + c.marginType)\r\n return ContractPositionNotional(crossed=crossed, isolated=isolated)", "def beginScope():", "def _null_set_rel_pos(self, rel_pos):\n self.get_sp_rel_pos = self._null_get_rel_pos", "def test_by_ref_non_contiguous(self):\n self.init()\n corners = self.ff64_2[::2,::2]\n assert not corners.flags['OWNDATA']\n set_to_zero_by_ref(corners)\n assert np.all(self.ff64_2 == np.array([[0,1,0],[3,4,5],[0,7,0]]))", "def zero_val(self):\r\n self.piDD = {\"[0]\": None}\r\n self.top_node = \"[0]\"\r\n self.dim = 0", "def pos(x):\r\n\r\n x[x < 0.] = 0.\r\n return x", "def resetPos(self):\n self.angle = self.startangle\n self.pos = []\n self.pos.extend(self.startpos)", "def positions(self):\n return self.preorder()", "def checked_positions():\n for base_position in chain([me.shipyard], me.get_dropoffs()):\n x_shipyard = base_position.position.x\n y_shipyard = base_position.position.y\n for x in range(-search_range, search_range):\n for y in range(-search_range, search_range):\n yield hlt.Position(\n x=x_shipyard + x,\n y=y_shipyard + y)", "def _free_indicies(self):\n return np.logical_not(self._fixed_indicies)", "def _notstaticneighs_get_corestored_by_inds_notslice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = []\n for k in range(len(self.idxs)):\n idxs.append([self.idxs[k][i] for i in inds])\n idxs = np.array(idxs) if type(self.idxs) == np.ndarray else idxs\n\n if self.sp_relative_pos is not None:\n sp_relative_pos = []\n for k in range(len(self.sp_relative_pos)):\n sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def find_empty_space(self, state):\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n return (i, j)", "def _empty(self, w_beg, w_end):\n\n tmp = np.arange(w_beg + self.pre_pad,\n w_end - self.post_pad + (1 / self.sampling_rate),\n 1 / self.sampling_rate)\n daten = [x.datetime for x in tmp]\n\n max_coa = max_coa_norm = np.full(len(daten), 0)\n\n coord = np.full((len(daten), 3), 0)\n\n return daten, max_coa, max_coa_norm, coord", "def generate_positions(self):\n raise NotImplementedError(\"Should implement generate_positions()!\")", "def updateEmptiesSet(self):\n self.emptiesSet = []\n for i in self.Range:\n if self.get_cell(i) == 0:\n self.emptiesSet.append(i)", "def pos(v=(0, 0)):\n return _check_two_scalars('pos', v)", "def full(self):\n for x in range(0,3):\n for y in range(0,3):\n if self[x,y] is None:\n return False\n return True", "def __pos__(self):\n ret = copy.deepcopy(self)\n for row in ret:\n if __debug__:\n assert hasattr(row, '__iter__'), repr(row) + \" | \" + repr(ret)\n assert len(row) <= len(ret.header), 'header needs to be larger or equal to all! ({},{})'.\\\n format(row, ret.header)\n for i in range(len(ret.header) - len(row)):\n row.append(None)\n return ret", "def reset(self):\n self.dynamic_predictions = {}\n self.position = 0\n self.references = []", "def check_position():\n if self.variables.table:\n pos = self.variables.table.get_current_position()\n position_update()", "def get_positions(self):\r\n null_pos, black_pos, white_pos = set(), set(), set()\r\n for pos in BOARD_POSITIONS:\r\n if self.state[pos[0]][pos[1]] == 0:\r\n null_pos.add(pos)\r\n elif self.state[pos[0]][pos[1]] == 1:\r\n black_pos.add(pos)\r\n else:\r\n white_pos.add(pos)\r\n return null_pos, black_pos, white_pos", "def position(self):\r\n pass", "def available_positions(self):\n if len([x for x in self.grid.values() if x[0] != None]) < 13:\n return [x for x in assignable_positions if self.grid[x][1] == \"---\"]\n else:\n return []", "def remove_players_wo_positions(df):\n df = df[pd.notnull(df['FantPos'])]\n return df", "def empty(self):", "def getSearchSpaceCoords(self):", "def assert_stored_sp_rel_pos(self):\n# ## Temporal\n# if self.sp_relative_pos is not None:\n# if self._constant_neighs:\n# if self.staticneighs:\n# assert(len(np.array(self.sp_relative_pos).shape) == 3)\n# else:\n# assert(len(np.array(self.sp_relative_pos).shape) == 4)\n# #################\n array_types = [list, np.ndarray]\n if self.sp_relative_pos is not None:\n assert(type(self.sp_relative_pos) in [list, np.ndarray])\n# if type(self.sp_relative_pos) in [float, int, np.int32, np.int64]:\n# ### Probably redundant\n# # it is needed or possible this situation?\n# pass\n assert(type(self.sp_relative_pos) in [list, np.ndarray])\n# if self.ks is None:\n# assert(self.staticneighs)\n# assert(len(self.sp_relative_pos) == len(self.iss))\n if self.staticneighs:\n assert(len(self.sp_relative_pos) == len(self.iss))\n ## Assert deep 3\n if len(self.iss):\n assert(type(self.sp_relative_pos[0]) in array_types)\n else:\n assert(self.ks is not None)\n assert(len(self.sp_relative_pos) == len(self.ks))\n if type(self.sp_relative_pos[0]) in array_types:\n if not self.staticneighs:\n assert(len(self.sp_relative_pos[0]) == len(self.iss))\n if len(self.sp_relative_pos[0]) > 0:\n assert(type(self.sp_relative_pos[0][0]) in array_types)", "def used_xvals(self):\n return [x for x in self.xvals() if any([len(self.get_plaquette(x, y)) > 0\n for y in self.yvals()])]", "def gather_missing_entities(data: List[list], n_ents: int, positions: List[int]) -> np.array:\n\n appeared = np.zeros(n_ents, dtype=np.int)\n for datum in data:\n for pos in positions:\n appeared[datum[pos]] = 1\n\n # Return this removed from range(n_ents)\n return np.arange(n_ents)[appeared == 0]", "def nonzero(self):\n\t\t_x = self.__seqvector.vec.nonzero()[1]\n\t\t_x = list(set(_x)) # uniquify them\n\t\t_x.sort() # sort positions\n\t\treturn _x", "def random_position():\n pos = np.random.randn(3)\n pos[2] = 0\n return pos", "def test_emtpy_conflict_places(conflict_places):\n assert conflict_places.named_place(\"Woodshop\") == None", "def refine_pos(self, posvalues):\n self._positions = []\n self._directions = []\n line_cur=posvalues[0]\n # print(line_cur)\n idx=0\n # print 'refine_pos'\n for values in posvalues[0:]:\n line_next = values\n unit_dir, grads=calc_dir(line_cur,line_next, self._eps/3)\n if unit_dir!=None or not self._use_direction:\n self._positions.append(line_cur)\n if unit_dir!=None:\n vec = unit_dir.tolist()\n self._directions.append(vec)\n line_cur=line_next\n idx +=1\n # add the last point and zero direction at the end\n # line_cur[0]=line_cur[0] + self._eps\n zero_dir = np.zeros(7)\n self._positions.append(line_cur)\n # print 'last'\n # print line_cur[0:4]\n vec=zero_dir.tolist()\n vec[0]=line_cur[0]\n # vec.insert(0,idx)\n self._directions.append(vec)\n return idx>1\n # with open('dir1.csv', 'wb') as csvfile:\n # writer = csv.writer(csvfile, delimiter=',',\n # quotechar='|', quoting=csv.QUOTE_MINIMAL)\n # # writer.writerow('path_pos, x, y, z, rot, rot, rot, rot')\n # [writer.writerow(r) for r in self._directions]\n # print 'end of refine_pos'", "def get_empty_pos(arr):\n\n\tpos = []\n\tfor i in range(len(arr)):\n\t\tif arr[i] == 0:\n\t\t\tpos.append(i)\n\n\treturn pos", "def _staticneighs_get_corestored_by_inds_notslice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = [self.idxs[i] for i in inds]\n idxs = np.array(idxs) if type(self.idxs) == np.ndarray else idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = [self.sp_relative_pos[i] for i in inds]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def simplified(self):\n return self.foreach(\n lambda k,v: (k,v) if numpy.count_nonzero(v)>0 else None,\n dimensions = self.dims,\n shape = self.shape,\n )", "def _computeNoisyPositions(self, state):\n positions = state.getGhostPositions()\n w = self.args.w\n w2 = 2*w+1\n div = float(w2 * w2)\n new_positions = []\n for p in positions:\n (x, y) = p\n dist = util.Counter()\n for i in range(x - w, x + w + 1):\n for j in range(y - w, y + w + 1):\n dist[(i, j)] = 1.0 / div\n dist.normalize()\n new_positions.append(util.chooseFromDistribution(dist))\n return new_positions", "def _computeNoisyPositions(self, state):\n positions = state.getGhostPositions()\n w = self.args.w\n w2 = 2*w+1\n div = float(w2 * w2)\n new_positions = []\n for p in positions:\n (x, y) = p\n dist = util.Counter()\n for i in range(x - w, x + w + 1):\n for j in range(y - w, y + w + 1):\n dist[(i, j)] = 1.0 / div\n dist.normalize()\n new_positions.append(util.chooseFromDistribution(dist))\n return new_positions", "def empty_cells(self) -> List[Cell]:\n return list(ob.pos[0] for ob in self.new_obs())", "def _list_only_set_rel_pos(self, rel_pos):\n self._array_only_set_rel_pos(rel_pos)", "def reset(self):\n self.cur_pos = self._get_current_pos_in_1d()\n\n return self.cur_pos", "def fill_octree(self):\n if len(self.children) <= 0:\n self.generate_octants()\n for point in self.points:\n self.append_point(point)\n self.points = np.array([])", "def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)", "def reset(self):\n super().reset()\n self.prev_obj3_position = None", "def get_pos_dtdt(self) -> WAVector:\n pass", "def test_slice_delslice_forbidden(self):\n global setVal\n class foo:\n def __delslice__(self, i, j, value):\n global setVal\n setVal = i, j, value\n def __delitem__(self, index):\n global setVal\n setVal = index\n\n del foo()[::]\n self.assertEqual(setVal, slice(None, None, None))\n del foo()[::None]\n self.assertEqual(setVal, slice(None, None, None))", "def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD", "def _set_None(self):\n for obj in self.axes:\n obj._set_None()\n self.normalizations = None\n self.FTparameters = None\n self.values = None\n # Set to None the properties inherited from Data\n super(DataND, self)._set_None()", "def find_free(self):\n\n free_position = np.where(self.block == 0)\n free_position = np.array(free_position).flatten()\n return free_position", "def nullcontext() -> Iterator[None]:\n yield", "def get_positions(self):\n return self.positions", "def init_constraints(self):\n for (row, col), curr_value in np.ndenumerate(self.final_values):\n self.possible_values[row][col] = [] # Initialize empty list\n if curr_value == 0: # If the final value is 0 then the position is vacant\n for value in range(1, 10): # Iterate through all possible values (1, 9) and check if they are possible\n if self.__is_valid_value(row, col, value):\n self.possible_values[row][col].append(value) # Append possible values to the corresponding list\n return", "def _notstaticneighs_get_corestored_by_inds_slice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = self.idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = []\n for k in range(len(self.sp_relative_pos)):\n sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def others_locations(state):\n others_ = others(state)\n locations = {i: e['pos'] for i, e in others_.items()}\n return locations", "def ghost_points(self):\n return self.central.loffset, self.central.roffset", "def complete_mapping(self):\r\n\r\n self._reset_map()\r\n #position_prey = self.prey.position\r\n #self.complete_map[position_prey[1], position_prey[0]] = 1.0\r\n position_body = [part.position for part in self.body]\r\n\r\n for position in position_body:\r\n self.complete_map[position[1], position[0]] = 1\r\n\r\n return self.complete_map", "def calc_positions(self) :\n\t\tx, y = self.x0, self.y0\n\n\t\twhile self.is_visible(x, y) :\n\t\t\tx = 0.5 * self.gx * self.t**2 + self.vx0 * self.t + self.x0\n\t\t\ty = 0.5 * self.gy * self.t**2 + self.vy0 * self.t + self.y0\n\t\t\t\n\t\t\tself.t += self.dt\n\t\t\tself.pos_x.append(x)\n\t\t\tself.pos_y.append(y)", "def nocoordinate(self):\n return self.__nocoordinate", "def emptyAt(self, position):\n\n #check for any sprites at the position\n for key in self.sprites:\n s = self.sprites[key]\n if s.position == position and s.visible: #not visible means it isn't taking up the tile\n return False\n\n #check whether the position is reserved \n for pos in self.reservedPositions:\n if pos == position:\n return False\n\n #if nothing found, it must be empty \n return True", "def get_position(self, position):", "def null_heuristic(pos, problem):\n return 0" ]
[ "0.6175911", "0.5931269", "0.5778021", "0.5594402", "0.54442143", "0.54219913", "0.54111654", "0.53869516", "0.53672886", "0.5318788", "0.53052735", "0.52933586", "0.52916795", "0.52916795", "0.52916795", "0.5270822", "0.5251649", "0.5251649", "0.52438754", "0.5243215", "0.5238593", "0.52187586", "0.52038866", "0.5201521", "0.51956916", "0.5187985", "0.51572955", "0.5154493", "0.51510775", "0.51444715", "0.5142323", "0.5131361", "0.51013106", "0.5096833", "0.5095814", "0.50899404", "0.5088324", "0.50821173", "0.5063274", "0.50585514", "0.5057916", "0.5050897", "0.5047992", "0.50467795", "0.50438863", "0.5039818", "0.5038571", "0.50247717", "0.5023342", "0.50119084", "0.5000214", "0.49977022", "0.4994637", "0.49875614", "0.49826965", "0.4981894", "0.4969749", "0.49669442", "0.49632657", "0.49526283", "0.49445787", "0.49433267", "0.49322933", "0.49267408", "0.49207702", "0.491519", "0.4909002", "0.49088377", "0.49078897", "0.48978296", "0.489665", "0.48836643", "0.48779374", "0.48707503", "0.48698545", "0.48635048", "0.48620293", "0.48620293", "0.48530245", "0.4851603", "0.485034", "0.4838928", "0.4835929", "0.4834003", "0.48326194", "0.48232624", "0.48231676", "0.48229015", "0.48215565", "0.48092857", "0.47993973", "0.47959608", "0.47947145", "0.4791957", "0.47915256", "0.47898003", "0.4781551", "0.47801954", "0.47793016", "0.47789133", "0.47701845" ]
0.0
-1
copes with positions with no values
def test_get_logo_alt_input_type(self): data = [ {"A": 0.1, "C": 0.3, "G": 0.5, "T": 0.1}, {"A": 0.05, "C": 0.8, "G": 0.05, "T": 0.1}, {"A": 0.0, "C": 0.0, "G": 0.0, "T": 0.0}, {"A": 0.7, "C": 0.1, "G": 0.1, "T": 0.1}, {"A": 0.6, "C": 0.15, "G": 0.05, "T": 0.2}, ] get_logo(data) data[-2] = {} get_logo(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filled_positions(self):\n return [x for x in assignable_positions if self.grid[x][0]]", "def no_empty_positions(self):\n clauses = []\n\n for position in range(0,self.graph.num_vertices):\n clause = []\n for vertex in range(0,self.graph.num_vertices):\n clause.append(ClauseVariable(False,vertex,position))\n clauses.append(clause)\n return clauses", "def _some_variables(use_posInd=False):\n\n parent = (\n np.array(\n [\n 0,\n 1,\n 2,\n 3,\n 4,\n 5,\n 1,\n 7,\n 8,\n 9,\n 10,\n 1,\n 12,\n 13,\n 14,\n 15,\n 13,\n 17,\n 18,\n 19,\n 20,\n 21,\n 20,\n 23,\n 13,\n 25,\n 26,\n 27,\n 28,\n 29,\n 28,\n 31,\n ]\n )\n - 1\n )\n\n offset = np.array(\n [\n 0.000000,\n 0.000000,\n 0.000000,\n -132.948591,\n 0.000000,\n 0.000000,\n 0.000000,\n -442.894612,\n 0.000000,\n 0.000000,\n -454.206447,\n 0.000000,\n 0.000000,\n 0.000000,\n 162.767078,\n 0.000000,\n 0.000000,\n 74.999437,\n 132.948826,\n 0.000000,\n 0.000000,\n 0.000000,\n -442.894413,\n 0.000000,\n 0.000000,\n -454.206590,\n 0.000000,\n 0.000000,\n 0.000000,\n 162.767426,\n 0.000000,\n 0.000000,\n 74.999948,\n 0.000000,\n 0.100000,\n 0.000000,\n 0.000000,\n 233.383263,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 121.134938,\n 0.000000,\n 0.000000,\n 115.002227,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 151.034226,\n 0.000000,\n 0.000000,\n 278.882773,\n 0.000000,\n 0.000000,\n 251.733451,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 99.999627,\n 0.000000,\n 100.000188,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 257.077681,\n 0.000000,\n 0.000000,\n 151.031437,\n 0.000000,\n 0.000000,\n 278.892924,\n 0.000000,\n 0.000000,\n 251.728680,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n 99.999888,\n 0.000000,\n 137.499922,\n 0.000000,\n 0.000000,\n 0.000000,\n 0.000000,\n ]\n )\n\n offset = offset.reshape(-1, 3)\n\n rotInd = [\n [5, 6, 4],\n [8, 9, 7],\n [11, 12, 10],\n [14, 15, 13],\n [17, 18, 16],\n [],\n [20, 21, 19],\n [23, 24, 22],\n [26, 27, 25],\n [29, 30, 28],\n [],\n [32, 33, 31],\n [35, 36, 34],\n [38, 39, 37],\n [41, 42, 40],\n [],\n [44, 45, 43],\n [47, 48, 46],\n [50, 51, 49],\n [53, 54, 52],\n [56, 57, 55],\n [],\n [59, 60, 58],\n [],\n [62, 63, 61],\n [65, 66, 64],\n [68, 69, 67],\n [71, 72, 70],\n [74, 75, 73],\n [],\n [77, 78, 76],\n [],\n ]\n\n # definitions are originating from matlab file --> bring them to zero based indexing\n rotInd = [[e - 1 for e in s if len(s) > 0] for s in rotInd]\n posInd = [0, 1, 2] if use_posInd else None\n\n expmapInd = np.split(np.arange(4, 100) - 1, 32)\n\n return parent, offset, rotInd, expmapInd, posInd", "def find_free_ortho_spaces(self):\r\n for pos, ortho in ORTHOGONAL_POSITIONS.items():\r\n if self.board[pos[0]][pos[1]].color != 0:\r\n for p in ortho:\r\n if self.board[p[0]][p[1]].color == 0:\r\n if (pos[0], pos[1]) in self.free_pos.keys():\r\n self.free_pos[(pos[0], pos[1])].add(p)\r\n else:\r\n self.free_pos[(pos[0], pos[1])] = {p}", "def _reset_stored(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n self._setted = False\n self.ks = None\n self.iss = [0]", "def null(cls):\n return cls(*[Point.origin() for i in range(2)])", "def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret", "def get_objpositions(scope):\n positions = []\n print('Press enter after each position has been found; press control-c to end')\n while True:\n try:\n input()\n except KeyboardInterrupt:\n break\n positions.append(scope.stage.position)\n positions[-1].insert(0,scope.nosepiece.position)\n print('Position {}: {}'.format(len(positions), tuple(positions[-1])), end='')\n return positions", "def observation_space():", "def supress_atomPosition_singulrarities(self) -> None:\n\n if (\"POSITION\" in dir(self)):\n for ind, atom in enumerate(self.POSITION.content):\n atom.xp = atom.xp + 10 ** (-7) * ind\n atom.yp = atom.yp - 10 ** (-7) * ind\n atom.zp = atom.zp - 10 ** (-7) * ind", "def recheckPosition(self):\n self.start = self.bounds[0].pos\n self.end = self.bounds[1].pos", "def get_unhindered_positions(self, endposition):\n pass", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def positions(self):\n return self.inorder() # make inorder the default", "def cleavagePos(self):\n raise NotImplementedError", "def cleavagePos(self):\n raise NotImplementedError", "def positions(self):\n return self.preorder() # return entire preorder iteration", "def reset_pos(self):\n\n return self.pos(1, 1)", "def max_positions(self):\n return None", "def fillPositions(self):\r\n if self.th is not None:\r\n self.df['POSITION'] = self.th.positions['Qty']\r\n self.df['REGS'] = self.th.positions['REGS']\r\n self.df['144A'] = self.th.positions['144A']\r\n self.df['POSITION'].fillna(0, inplace=True)\r\n self.df['REGS'].fillna(0, inplace=True)\r\n self.df['144A'].fillna(0, inplace=True)\r\n self.df['RISK'] = -self.df['RISK_MID'] * self.df['POSITION'] / 10000.", "def __init__(self):\n\n\t\tself.position = np.array([0, 0])", "def zeroWorkingCoordinates(self):\n self.workingZeroX = self.x\n self.workingZeroY = self.y\n self.workingZeroZ = self.z\n\n self.sendCommand(\"G10 L20 P1 X0 Y0 Z0\")", "def free_positions(self):\n positions = []\n for i in range(self.grid_size):\n for j in range(self.grid_size):\n if self.grid[i][j] == 0:\n positions.append((i, j))\n if positions == []:\n raise GameException('Game Over. No free position left.')\n return positions", "def global_coords(self) -> GlobalCoordsABC:", "def reset(self, position):\n size = self.stack.size\n if size < position: raise Exception(\"MultiDict.reset(), can't return to a point (%s) that is higher than the current size (%s)\" % (position, size))\n for i in range(size-1, position-1, -1):\n name, _, prev = self.stack[i]\n if prev is None: del self.lookup[name]\n else: self.lookup[name] = prev\n self.stack.size = position", "def remove_filled_positions(self, positions, board):\n\n new_positions = []\n for p in positions:\n if board.check_move(p[0], p[1]):\n new_positions.append(p)\n return new_positions", "def default_startpos(self) -> Dict[AtomKey, numpy.array]:\n ...", "def autoreveal_empty_spaces(self, position):\n revealed = []\n zero_spaces = []\n check_stack = [position]\n checked = []\n\n while len(check_stack) > 0:\n pos = x, y = check_stack.pop()\n if self.get_num_mines_around_position(x, y) == 0:\n zero_spaces.append(pos)\n \n # Add spaces around\n for ay in range(y-1, y+2):\n for ax in range(x-1, x+2):\n if ay >= 0 and ax >= 0 and ay < len(self.mine_map) and ax < len(self.mine_map[ay]): # Don't check spaces that are outside of the array\n apos = ax, ay\n if apos not in checked:\n check_stack.append(apos)\n revealed.append(apos)\n checked.append(pos)\n \n self.revealed.extend(revealed)", "def reset_position(self):\n self.set_position(copy.deepcopy(self.ab_pos))", "def ExecuteBeforeSolutionLoop(self):\n super().ExecuteBeforeSolutionLoop()\n num_of_vaviables = len(self.variables) + len(self.nonhistorical_variables)\n self.values = [[-1e6] * num_of_vaviables for _ in self.found_positions]", "def empty(self):\n return _osgAnimation.mapVertexInfluence_empty(self)", "def reset(self):\n super(PolygonTool, self).reset()\n # self.__nsides = None\n # self.__increment = None\n # self.__external = False # make this adjustable?\n self.__center = None\n for _i in range(self.__nsides):\n self.__xpts[_i] = 0.0\n self.__ypts[_i] = 0.0", "def delPosition(self):\n self.components = [0 for i in range(len(self.components))]", "def bounds(self, pos):", "def restorePositionsOfUnownedAntennas() :\n if ( s.getInitializationFlag() == True ): return\n unownedAnts = subarrayAntSetup( True )\n progress(\"Setting positions of unowned and uninitialized antennas %s\" % helpers.formatAsRanges( unownedAnts) )\n progress(\"....Pads\")\n restoreAntCommand(pad, unownedAnts, subarray=DEFAULT)\n progress( \"....Pad Offsets\" )\n restoreAntCommand( padOffset, unownedAnts, subarray=DEFAULT )\n progress(\"....Antenna positional offset and axis non-intersection\")\n restoreAntCommand( antennaOffset, unownedAnts, subarray=DEFAULT )\n restoreAntCommand( axisNonIntersection, unownedAnts, subarray=DEFAULT )", "def smart_tentacle_positions(self, bounds: np.ndarray, num_positions) -> np.ndarray:\n valid_memory = [(pos, cost) for pos, cost in self.memory if\n np.all(pos >= bounds[:, 0]) and np.all(pos <= bounds[:, 1])]\n if len(valid_memory) < 2 * len(bounds):\n return self.random_tentacle_positions(bounds, num_positions)\n if len(valid_memory) > self.max_training_mem:\n random.shuffle(valid_memory) # so the model can change\n valid_memory = valid_memory[:self.max_training_mem]\n # base_estimator = cook_estimator(\"GP\", space=bounds,noise=.005)\n opt = skopt.Optimizer(bounds, n_initial_points=0, n_jobs=-1,\n acq_optimizer_kwargs={\"n_restarts_optimizer\": 10, \"n_points\": 30_000}, acq_func=\"EI\")\n\n x = [list(pos) for pos, cost in valid_memory]\n y = [cost for pos, cost in valid_memory]\n opt.tell(x, y) # train model\n positions = np.array(opt.ask(num_positions))\n return positions", "def _null_get_rel_pos(self, k_is=[0]):\n return [[None]*len(self.iss)]*len(k_is)", "def _cte_postformat(self):\n# if type(self.idxs) == list:\n# self.idxs = np.array(self.idxs)\n if self.sp_relative_pos is not None:\n if type(self.sp_relative_pos) == list:\n self.sp_relative_pos = np.array(self.sp_relative_pos)", "def contract_position_notional(self):\r\n contracts = self.account_position.contracts\r\n crossed = Decimal(0)\r\n isolated = {}\r\n for c in contracts:\r\n if c.position.is_zero(): continue\r\n\r\n prc: FuturesPricingData = self.pricing_data_by_symbol.get(c.symbol)\r\n if prc is None: return self.__throw(\"missing pricing data for %s\" % c.symbol)\r\n\r\n if c.marginType == MarginType.crossed:\r\n crossed = crossed + abs(c.position * prc.markPrice)\r\n elif c.marginType == MarginType.isolated:\r\n isolated[c.symbol] = abs(c.position * prc.markPrice)\r\n else:\r\n return self.__throw(\"unknown marginType: \" + c.marginType)\r\n return ContractPositionNotional(crossed=crossed, isolated=isolated)", "def beginScope():", "def _null_set_rel_pos(self, rel_pos):\n self.get_sp_rel_pos = self._null_get_rel_pos", "def test_by_ref_non_contiguous(self):\n self.init()\n corners = self.ff64_2[::2,::2]\n assert not corners.flags['OWNDATA']\n set_to_zero_by_ref(corners)\n assert np.all(self.ff64_2 == np.array([[0,1,0],[3,4,5],[0,7,0]]))", "def zero_val(self):\r\n self.piDD = {\"[0]\": None}\r\n self.top_node = \"[0]\"\r\n self.dim = 0", "def pos(x):\r\n\r\n x[x < 0.] = 0.\r\n return x", "def resetPos(self):\n self.angle = self.startangle\n self.pos = []\n self.pos.extend(self.startpos)", "def positions(self):\n return self.preorder()", "def checked_positions():\n for base_position in chain([me.shipyard], me.get_dropoffs()):\n x_shipyard = base_position.position.x\n y_shipyard = base_position.position.y\n for x in range(-search_range, search_range):\n for y in range(-search_range, search_range):\n yield hlt.Position(\n x=x_shipyard + x,\n y=y_shipyard + y)", "def _free_indicies(self):\n return np.logical_not(self._fixed_indicies)", "def _notstaticneighs_get_corestored_by_inds_notslice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = []\n for k in range(len(self.idxs)):\n idxs.append([self.idxs[k][i] for i in inds])\n idxs = np.array(idxs) if type(self.idxs) == np.ndarray else idxs\n\n if self.sp_relative_pos is not None:\n sp_relative_pos = []\n for k in range(len(self.sp_relative_pos)):\n sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def find_empty_space(self, state):\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n return (i, j)", "def _empty(self, w_beg, w_end):\n\n tmp = np.arange(w_beg + self.pre_pad,\n w_end - self.post_pad + (1 / self.sampling_rate),\n 1 / self.sampling_rate)\n daten = [x.datetime for x in tmp]\n\n max_coa = max_coa_norm = np.full(len(daten), 0)\n\n coord = np.full((len(daten), 3), 0)\n\n return daten, max_coa, max_coa_norm, coord", "def generate_positions(self):\n raise NotImplementedError(\"Should implement generate_positions()!\")", "def updateEmptiesSet(self):\n self.emptiesSet = []\n for i in self.Range:\n if self.get_cell(i) == 0:\n self.emptiesSet.append(i)", "def pos(v=(0, 0)):\n return _check_two_scalars('pos', v)", "def full(self):\n for x in range(0,3):\n for y in range(0,3):\n if self[x,y] is None:\n return False\n return True", "def __pos__(self):\n ret = copy.deepcopy(self)\n for row in ret:\n if __debug__:\n assert hasattr(row, '__iter__'), repr(row) + \" | \" + repr(ret)\n assert len(row) <= len(ret.header), 'header needs to be larger or equal to all! ({},{})'.\\\n format(row, ret.header)\n for i in range(len(ret.header) - len(row)):\n row.append(None)\n return ret", "def reset(self):\n self.dynamic_predictions = {}\n self.position = 0\n self.references = []", "def check_position():\n if self.variables.table:\n pos = self.variables.table.get_current_position()\n position_update()", "def get_positions(self):\r\n null_pos, black_pos, white_pos = set(), set(), set()\r\n for pos in BOARD_POSITIONS:\r\n if self.state[pos[0]][pos[1]] == 0:\r\n null_pos.add(pos)\r\n elif self.state[pos[0]][pos[1]] == 1:\r\n black_pos.add(pos)\r\n else:\r\n white_pos.add(pos)\r\n return null_pos, black_pos, white_pos", "def position(self):\r\n pass", "def available_positions(self):\n if len([x for x in self.grid.values() if x[0] != None]) < 13:\n return [x for x in assignable_positions if self.grid[x][1] == \"---\"]\n else:\n return []", "def remove_players_wo_positions(df):\n df = df[pd.notnull(df['FantPos'])]\n return df", "def empty(self):", "def getSearchSpaceCoords(self):", "def assert_stored_sp_rel_pos(self):\n# ## Temporal\n# if self.sp_relative_pos is not None:\n# if self._constant_neighs:\n# if self.staticneighs:\n# assert(len(np.array(self.sp_relative_pos).shape) == 3)\n# else:\n# assert(len(np.array(self.sp_relative_pos).shape) == 4)\n# #################\n array_types = [list, np.ndarray]\n if self.sp_relative_pos is not None:\n assert(type(self.sp_relative_pos) in [list, np.ndarray])\n# if type(self.sp_relative_pos) in [float, int, np.int32, np.int64]:\n# ### Probably redundant\n# # it is needed or possible this situation?\n# pass\n assert(type(self.sp_relative_pos) in [list, np.ndarray])\n# if self.ks is None:\n# assert(self.staticneighs)\n# assert(len(self.sp_relative_pos) == len(self.iss))\n if self.staticneighs:\n assert(len(self.sp_relative_pos) == len(self.iss))\n ## Assert deep 3\n if len(self.iss):\n assert(type(self.sp_relative_pos[0]) in array_types)\n else:\n assert(self.ks is not None)\n assert(len(self.sp_relative_pos) == len(self.ks))\n if type(self.sp_relative_pos[0]) in array_types:\n if not self.staticneighs:\n assert(len(self.sp_relative_pos[0]) == len(self.iss))\n if len(self.sp_relative_pos[0]) > 0:\n assert(type(self.sp_relative_pos[0][0]) in array_types)", "def used_xvals(self):\n return [x for x in self.xvals() if any([len(self.get_plaquette(x, y)) > 0\n for y in self.yvals()])]", "def gather_missing_entities(data: List[list], n_ents: int, positions: List[int]) -> np.array:\n\n appeared = np.zeros(n_ents, dtype=np.int)\n for datum in data:\n for pos in positions:\n appeared[datum[pos]] = 1\n\n # Return this removed from range(n_ents)\n return np.arange(n_ents)[appeared == 0]", "def nonzero(self):\n\t\t_x = self.__seqvector.vec.nonzero()[1]\n\t\t_x = list(set(_x)) # uniquify them\n\t\t_x.sort() # sort positions\n\t\treturn _x", "def random_position():\n pos = np.random.randn(3)\n pos[2] = 0\n return pos", "def test_emtpy_conflict_places(conflict_places):\n assert conflict_places.named_place(\"Woodshop\") == None", "def refine_pos(self, posvalues):\n self._positions = []\n self._directions = []\n line_cur=posvalues[0]\n # print(line_cur)\n idx=0\n # print 'refine_pos'\n for values in posvalues[0:]:\n line_next = values\n unit_dir, grads=calc_dir(line_cur,line_next, self._eps/3)\n if unit_dir!=None or not self._use_direction:\n self._positions.append(line_cur)\n if unit_dir!=None:\n vec = unit_dir.tolist()\n self._directions.append(vec)\n line_cur=line_next\n idx +=1\n # add the last point and zero direction at the end\n # line_cur[0]=line_cur[0] + self._eps\n zero_dir = np.zeros(7)\n self._positions.append(line_cur)\n # print 'last'\n # print line_cur[0:4]\n vec=zero_dir.tolist()\n vec[0]=line_cur[0]\n # vec.insert(0,idx)\n self._directions.append(vec)\n return idx>1\n # with open('dir1.csv', 'wb') as csvfile:\n # writer = csv.writer(csvfile, delimiter=',',\n # quotechar='|', quoting=csv.QUOTE_MINIMAL)\n # # writer.writerow('path_pos, x, y, z, rot, rot, rot, rot')\n # [writer.writerow(r) for r in self._directions]\n # print 'end of refine_pos'", "def get_empty_pos(arr):\n\n\tpos = []\n\tfor i in range(len(arr)):\n\t\tif arr[i] == 0:\n\t\t\tpos.append(i)\n\n\treturn pos", "def _staticneighs_get_corestored_by_inds_notslice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = [self.idxs[i] for i in inds]\n idxs = np.array(idxs) if type(self.idxs) == np.ndarray else idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = [self.sp_relative_pos[i] for i in inds]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def simplified(self):\n return self.foreach(\n lambda k,v: (k,v) if numpy.count_nonzero(v)>0 else None,\n dimensions = self.dims,\n shape = self.shape,\n )", "def _computeNoisyPositions(self, state):\n positions = state.getGhostPositions()\n w = self.args.w\n w2 = 2*w+1\n div = float(w2 * w2)\n new_positions = []\n for p in positions:\n (x, y) = p\n dist = util.Counter()\n for i in range(x - w, x + w + 1):\n for j in range(y - w, y + w + 1):\n dist[(i, j)] = 1.0 / div\n dist.normalize()\n new_positions.append(util.chooseFromDistribution(dist))\n return new_positions", "def _computeNoisyPositions(self, state):\n positions = state.getGhostPositions()\n w = self.args.w\n w2 = 2*w+1\n div = float(w2 * w2)\n new_positions = []\n for p in positions:\n (x, y) = p\n dist = util.Counter()\n for i in range(x - w, x + w + 1):\n for j in range(y - w, y + w + 1):\n dist[(i, j)] = 1.0 / div\n dist.normalize()\n new_positions.append(util.chooseFromDistribution(dist))\n return new_positions", "def empty_cells(self) -> List[Cell]:\n return list(ob.pos[0] for ob in self.new_obs())", "def _list_only_set_rel_pos(self, rel_pos):\n self._array_only_set_rel_pos(rel_pos)", "def reset(self):\n self.cur_pos = self._get_current_pos_in_1d()\n\n return self.cur_pos", "def fill_octree(self):\n if len(self.children) <= 0:\n self.generate_octants()\n for point in self.points:\n self.append_point(point)\n self.points = np.array([])", "def board_empty_positions(self, x, y):\n board = self.boards[x][y]\n coords = [(x, y, i, j) for (i, j) in board.empty_squares]\n return self.coords_to_positions(coords)", "def reset(self):\n super().reset()\n self.prev_obj3_position = None", "def get_pos_dtdt(self) -> WAVector:\n pass", "def test_slice_delslice_forbidden(self):\n global setVal\n class foo:\n def __delslice__(self, i, j, value):\n global setVal\n setVal = i, j, value\n def __delitem__(self, index):\n global setVal\n setVal = index\n\n del foo()[::]\n self.assertEqual(setVal, slice(None, None, None))\n del foo()[::None]\n self.assertEqual(setVal, slice(None, None, None))", "def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD", "def _set_None(self):\n for obj in self.axes:\n obj._set_None()\n self.normalizations = None\n self.FTparameters = None\n self.values = None\n # Set to None the properties inherited from Data\n super(DataND, self)._set_None()", "def find_free(self):\n\n free_position = np.where(self.block == 0)\n free_position = np.array(free_position).flatten()\n return free_position", "def nullcontext() -> Iterator[None]:\n yield", "def get_positions(self):\n return self.positions", "def init_constraints(self):\n for (row, col), curr_value in np.ndenumerate(self.final_values):\n self.possible_values[row][col] = [] # Initialize empty list\n if curr_value == 0: # If the final value is 0 then the position is vacant\n for value in range(1, 10): # Iterate through all possible values (1, 9) and check if they are possible\n if self.__is_valid_value(row, col, value):\n self.possible_values[row][col].append(value) # Append possible values to the corresponding list\n return", "def _notstaticneighs_get_corestored_by_inds_slice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = self.idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = []\n for k in range(len(self.sp_relative_pos)):\n sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def others_locations(state):\n others_ = others(state)\n locations = {i: e['pos'] for i, e in others_.items()}\n return locations", "def ghost_points(self):\n return self.central.loffset, self.central.roffset", "def complete_mapping(self):\r\n\r\n self._reset_map()\r\n #position_prey = self.prey.position\r\n #self.complete_map[position_prey[1], position_prey[0]] = 1.0\r\n position_body = [part.position for part in self.body]\r\n\r\n for position in position_body:\r\n self.complete_map[position[1], position[0]] = 1\r\n\r\n return self.complete_map", "def calc_positions(self) :\n\t\tx, y = self.x0, self.y0\n\n\t\twhile self.is_visible(x, y) :\n\t\t\tx = 0.5 * self.gx * self.t**2 + self.vx0 * self.t + self.x0\n\t\t\ty = 0.5 * self.gy * self.t**2 + self.vy0 * self.t + self.y0\n\t\t\t\n\t\t\tself.t += self.dt\n\t\t\tself.pos_x.append(x)\n\t\t\tself.pos_y.append(y)", "def nocoordinate(self):\n return self.__nocoordinate", "def emptyAt(self, position):\n\n #check for any sprites at the position\n for key in self.sprites:\n s = self.sprites[key]\n if s.position == position and s.visible: #not visible means it isn't taking up the tile\n return False\n\n #check whether the position is reserved \n for pos in self.reservedPositions:\n if pos == position:\n return False\n\n #if nothing found, it must be empty \n return True", "def get_position(self, position):", "def null_heuristic(pos, problem):\n return 0" ]
[ "0.6175911", "0.5931269", "0.5778021", "0.5594402", "0.54442143", "0.54219913", "0.54111654", "0.53869516", "0.53672886", "0.5318788", "0.53052735", "0.52933586", "0.52916795", "0.52916795", "0.52916795", "0.5270822", "0.5251649", "0.5251649", "0.52438754", "0.5243215", "0.5238593", "0.52187586", "0.52038866", "0.5201521", "0.51956916", "0.5187985", "0.51572955", "0.5154493", "0.51510775", "0.51444715", "0.5142323", "0.5131361", "0.51013106", "0.5096833", "0.5095814", "0.50899404", "0.5088324", "0.50821173", "0.5063274", "0.50585514", "0.5057916", "0.5050897", "0.5047992", "0.50467795", "0.50438863", "0.5039818", "0.5038571", "0.50247717", "0.5023342", "0.50119084", "0.5000214", "0.49977022", "0.4994637", "0.49875614", "0.49826965", "0.4981894", "0.4969749", "0.49669442", "0.49632657", "0.49526283", "0.49445787", "0.49433267", "0.49322933", "0.49267408", "0.49207702", "0.491519", "0.4909002", "0.49088377", "0.49078897", "0.48978296", "0.489665", "0.48836643", "0.48779374", "0.48707503", "0.48698545", "0.48635048", "0.48620293", "0.48620293", "0.48530245", "0.4851603", "0.485034", "0.4838928", "0.4835929", "0.4834003", "0.48326194", "0.48232624", "0.48231676", "0.48229015", "0.48215565", "0.48092857", "0.47993973", "0.47959608", "0.47947145", "0.4791957", "0.47915256", "0.47898003", "0.4781551", "0.47801954", "0.47793016", "0.47789133", "0.47701845" ]
0.0
-1
exercising some Letter methods
def test_letter_methods(self): # shift l = get_character("G") self.assertEqual(l.x, 0) self.assertEqual(l.y, 0) l.shift(2, 2) self.assertEqual(l.x, 2) self.assertEqual(l.y, 2) # scale adjusts the scale attributes orig_width = l.scale_x orig_height = l.scale_y l.scale(x=0.5, y=2) self.assertEqual(l.scale_x, orig_width / 2) self.assertEqual(l.scale_y, orig_height * 2) # invert changes the degree attr l.rotate(180) self.assertEqual(l.degrees, 180)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_letters(word, guesses):\n pass", "def letter_for(label):\n return \"ABCDEFGHIJ\"[label]", "def init_letters():\n return ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',\n 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',\n 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',\n 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',\n 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z')", "def getAlphabet(self):\n return self.alpha", "def getAlphabet(self):\n return self.alpha", "def getAlphabet(self):\n return self.alpha", "def letter(self) -> str:\n my_letter = None\n if self is LieType.A:\n my_letter = \"A\"\n elif self is LieType.B:\n my_letter = \"B\"\n elif self is LieType.C:\n my_letter = \"C\"\n elif self is LieType.D:\n my_letter = \"D\"\n else:\n raise ValueError(\n \"This is not in the enum of Lie types so this should be unreachable\")\n return my_letter", "def test_message_letter(Message, letter):\n assert get_message_letter(Message) == letter", "def letters(m) -> str:\n string = \"\".join(m.letter_list)\n if m[0] == \"sky\":\n string = string.upper()\n return string", "def getLetter(index):\n alphabet = string.ascii_lowercase + \" \"\n return alphabet[index]", "def next_letter(letter):\r\n\tcoded_text = ''\r\n\tstep = 1\r\n\tif letter in ascii_lowercase:\r\n\t\tcoded_text = coded_text + ascii_lowercase[ascii_lowercase.index(letter) + step % len(ascii_lowercase)]\r\n\r\n\tif letter in ascii_uppercase:\r\n\t\tcoded_text = coded_text + ascii_uppercase[ascii_uppercase.index(letter) + step % len(ascii_uppercase)]\r\n\r\n\telse:\r\n\t\tcoded_text += text\r\n\r\n\treturn coded_text", "def get_letter_dict():\n\treturn {\n\t\t'A': 0,\n\t\t'B': 0,\n\t\t'C': 0,\n\t\t'D': 0,\n\t\t'E': 0,\n\t\t'F': 0,\n\t\t'G': 0,\n\t\t'H': 0,\n\t\t'I': 0,\n\t\t'J': 0,\n\t\t'K': 0,\n\t\t'L': 0,\n\t\t'M': 0,\n\t\t'N': 0,\n\t\t'O': 0,\n\t\t'P': 0,\n\t\t'Q': 0,\n\t\t'R': 0,\n\t\t'S': 0,\n\t\t'T': 0,\n\t\t'U': 0,\n\t\t'V': 0,\n\t\t'W': 0,\n\t\t'X': 0,\n\t\t'Y': 0,\n\t\t'Z': 0\n\t}", "def next_letter(letter, step):\r\n\r\n\tif letter in ascii_uppercase:\r\n\t\tnew_letter = get_new_letter(ascii_uppercase, letter, step)\r\n\telif letter in ascii_lowercase:\r\n\t\tnew_letter = get_new_letter(ascii_lowercase, letter, step)\r\n\telse:\r\n\t\tnew_letter = letter\r\n\treturn new_letter", "def index_letter(self, index):\n\t\treturn ALPHABET[index]", "def alphabet_war(fight):", "def test_letter_delimiter(self):\n self.non_default_delimiter_template('a')", "def getletter(variable, letternumber):\n\n # Get the corresponding letter\n return str(variable)[letternumber - 1]", "def letters(string, user_input):\n\n string = upper(string) # pass the string to the upper function to capitalize it\n options = [] # The array where the options given by the user will be stored\n new_string = '' # Empty string to append to\n\n for char in user_input: # Itterate over words in the options passed by the user \n options.append(char) # Append them to the options list\n\n for char in string: # Itterate over each character in the original string\n if char == 'A' and 'A' in options: # If the character is \"A\" and given by the user\n char = '4' # Repalce it with \"4\"\n if char == 'E' and 'E' in options: # If the character is \"E\" and given by the user\n char = '3' # Repalce it with \"3\"\n if char == 'S' and 'S' in options: # If the character is \"A\" and given by the user\n char = '5' # Repalce it with \"5\"\n if char == 'B' and 'B' in options: # If the character is \"B\" and given by the user\n char = '13' # Repalce it with \"13\"\n if char == 'O' and 'O' in options: # If the character is \"O\" and given by the user\n char = '0' # Repalce it with \"0\"\n if char == 'I' and 'I' in options: # If the character is \"I\" and given by the user\n char = '1' # Repalce it with \"1\"\n if char == 'V' and 'V' in options: # If the character is \"V\" and given by the user\n char = '\\/' # Repalce it with \"\\/\"\n if char == 'W' and 'W' in options: # If the character is \"W\" and given by the user\n char = '\\/\\/' # Repalce it with \"\\/\\/\"\n new_string += char # Append all the characters to the empty string\n\n return new_string # Return the new string", "def affine_decipher_letter(letter, multiplier=1, adder=0, one_based=True):\n if letter in string.ascii_letters:\n cipher_number = pos(letter)\n if one_based: cipher_number += 1\n plaintext_number = ( \n modular_division_table[multiplier, (cipher_number - adder) % 26]\n )\n if one_based: plaintext_number -= 1\n if letter in string.ascii_uppercase:\n return unpos(plaintext_number).upper()\n else:\n return unpos(plaintext_number) \n else:\n return letter", "def letter(self):\n return self._letter", "def _get_letter_by_code(table: list, first_dig: str, second_dig: str) -> str:\n try:\n if first_dig == '1':\n return table[2][int(second_dig) - 1]\n elif first_dig == '2' or first_dig == '3':\n return table[1][int(second_dig) - 1]\n else:\n return table[0][int(second_dig) - 1]\n except IndexError:\n print(f'Неизвестный символ с кодом {first_dig}{second_dig}')\n return ''", "def get_available_letters():\n available = string.ascii_lowercase\n\n return available", "def getCode1Letter(self):\n dataDict = self.__dict__\n raise ApiError(\"\"\"%s.getCode1Letter:\n getCode1Letter should never be called - must be overridden in subclass\"\"\" % self.qualifiedName\n + \": %s\" % (self,)\n )", "def letter_num(num: int):\n if abs(num) > 26 or num == 0:\n let = ord('a') + 26 - 1\n else:\n let = ord('a') + abs(num) - 1\n return chr(let)", "def say_letter(self, keyboard, keycode, char, modifiers):\n\n if keycode[1] in ('shift', 'rshift'):\n return # ignore.. shifted keys will have their Shift modifier set\n elif keycode[1] == 'tab':\n self.play_sound('tab')\n elif keycode[1] == 'delete':\n self.play_sound('delete')\n elif keycode[1] == 'backspace':\n self.textbox.text = self.textbox.text[:-1]\n self.play_sound('backspace')\n elif keycode[1] == 'enter':\n self.textbox.text += '\\n'\n self.play_sound('enter')\n elif char == ' ':\n self.textbox.text += ' '\n self.play_sound('space') \n elif char is None:\n self.play_sound('error')\n else:\n if 'shift' in modifiers or 'rshift' in modifiers:\n self.textbox.text += char.upper()\n else:\n self.textbox.text += char\n if RENAMED_CHAR.get(char):\n self.play_sound(RENAMED_CHAR[char])\n else: \n self.play_sound(char)", "def get_letter_to_code_mappings():\n return {\n \"a\": \"Alfa\", \"b\": \"Bravo\", \"c\": \"Charlie\", \"d\": \"Delta\", \"e\": \"Echo\",\n \"f\": \"Foxtrot\", \"g\": \"Golf\", \"h\": \"Hotel\", \"i\": \"India\", \"j\":\n \"Juliett\", \"k\": \"Kilo\", \"l\": \"Lima\", \"m\": \"Mike\", \"n\": \"November\", \"o\":\n \"Oscar\", \"p\": \"Papa\", \"q\": \"Quebec\", \"r\": \"Romeo\", \"s\": \"Sierra\", \"t\":\n \"Tango\", \"u\": \"Uniform\", \"v\": \"Victor\", \"w\": \"Whiskey\", \"x\": \"Xray\",\n \"y\": \"Yankee\", \"z\": \"Zulu\", \"0\": \"Zero\", \"1\": \"One\", \"2\": \"Two\", \"3\":\n \"Three\", \"4\": \"Four\", \"5\": \"Five\", \"6\": \"Six\", \"7\": \"Seven\", \"8\":\n \"Eight\", \"9\": \"Niner\", \"=\": \"Equals\", \"?\": \"Query\", \"/\": \"Slash\", \",\":\n \"Comma\", \".\": \"Stop\", \":\": \"Colon\", \"'\": \"Apostrophe\", \"-\": \"Dash\",\n \"(\": \"Open\", \")\": \"Close\", \"@\": \"At\",\n }", "def _convert(self, message, get_leter_index):\r\n\t\tord_a = ord('a')\r\n\t\treturn \"\".join(\r\n\t\t\t_nth_letter(get_leter_index(ord(char) - ord_a, ord(key_char) - ord_a))\r\n\t\t\t\tfor char, key_char in zip(message, itertools.cycle(self.key))\r\n\t\t)", "def letter_prob(c):\n if c == ' ': return 0.1904\n if c == 'e' or c == 'E': return 0.1017\n if c == 't' or c == 'T': return 0.0737\n if c == 'a' or c == 'A': return 0.0661\n if c == 'o' or c == 'O': return 0.0610\n if c == 'i' or c == 'I': return 0.0562\n if c == 'n' or c == 'N': return 0.0557\n if c == 'h' or c == 'H': return 0.0542\n if c == 's' or c == 'S': return 0.0508\n if c == 'r' or c == 'R': return 0.0458\n if c == 'd' or c == 'D': return 0.0369\n if c == 'l' or c == 'L': return 0.0325\n if c == 'u' or c == 'U': return 0.0228\n if c == 'm' or c == 'M': return 0.0205\n if c == 'c' or c == 'C': return 0.0192\n if c == 'w' or c == 'W': return 0.0190\n if c == 'f' or c == 'F': return 0.0175\n if c == 'y' or c == 'Y': return 0.0165\n if c == 'g' or c == 'G': return 0.0161\n if c == 'p' or c == 'P': return 0.0131\n if c == 'b' or c == 'B': return 0.0115\n if c == 'v' or c == 'V': return 0.0088\n if c == 'k' or c == 'K': return 0.0066\n if c == 'x' or c == 'X': return 0.0014\n if c == 'j' or c == 'J': return 0.0008\n if c == 'q' or c == 'Q': return 0.0008\n if c == 'z' or c == 'Z': return 0.0005\n return 1.0", "def get_letter(self, vowel_need):\r\n\r\n return self.letters.get(vowel_need, self.vowels)", "def letters():\n letters = \"BINGO\"\n for letter in letters:\n yield letter", "def week2exercise2():\n indices = [12, 2, 26, 7, 0, 12, 12, 4, 17]\n wordArray = map(getLetter, indices)\n wordArray[0] = wordArray[0].upper()\n wordArray[1] = wordArray[1].upper()\n wordArray[3] = wordArray[3].upper()\n secret_word = \"\".join(wordArray)\n print(secret_word)\n return secret_word", "def letter_dot(letter):\r\n if letter == 'a' or letter == 'A':\r\n return [[0,1,1,0], [1,0,0,1], [1,0,0,1], [1,1,1,1], [1,0,0,1]]\r\n if letter == 'b' or letter == 'B':\t\r\n return [[1,1,1,0], [1,0,0,1], [1,1,1,0], [1,0,0,1], [1,1,1,0]]\r\n if letter == 'c' or letter == 'C':\r\n return [[0,1,1,1], [1,0,0,0], [1,0,0,0], [1,0,0,0], [0,1,1,1]]\r\n if letter == 'd' or letter == 'D':\r\n return [[1,1,1,0], [1,0,0,1], [1,0,0,1], [1,0,0,1], [1,1,1,0]]\r\n if letter == 'e' or letter == 'E':\r\n return [[1,1,1,1], [1,0,0,0], [1,1,1,0], [1,0,0,0], [1,1,1,1]]\r\n if letter == 'f' or letter == 'F':\r\n return [[1,1,1,1], [1,0,0,0], [1,1,1,0], [1,0,0,0], [1,0,0,0]]\r\n if letter == 'g' or letter == 'G':\r\n return [[0,1,1,1], [1,0,0,0], [1,0,1,1], [1,0,0,1], [0,1,1,1]]\r\n if letter == 'h' or letter == 'H':\r\n return [[1,0,0,1], [1,0,0,1], [1,1,1,1], [1,0,0,1], [1,0,0,1]]\r\n if letter == 'i' or letter == 'I':\r\n return [[1,1,1,0], [0,1,0,0], [0,1,0,0], [0,1,0,0], [1,1,1,0]]\r\n if letter == 'j' or letter == 'J':\r\n return [[1,1,1,1], [0,0,0,1], [0,0,0,1], [1,0,0,1], [0,1,1,0]]\r\n if letter == 'k' or letter == 'K':\r\n return [[1,0,0,1], [1,0,1,0], [1,1,0,0], [1,0,1,0], [1,0,0,1]]\r\n if letter == 'l' or letter == 'L':\r\n return [[1,0,0,0], [1,0,0,0], [1,0,0,0], [1,0,0,1], [1,1,1,1]]\r\n if letter == 'm' or letter == 'M':\r\n return [[1,0,0,0,1], [1,1,0,1,1], [1,0,1,0,1], [1,0,0,0,1], [1,0,0,0,1]]\r\n if letter == 'n' or letter == 'N':\r\n return [[1,0,0,0,1], [1,1,0,0,1], [1,0,1,0,1], [1,0,0,1,1], [1,0,0,0,1]]\r\n if letter == '0' or letter == 'O':\r\n return [[0,1,1,0], [1,0,0,1], [1,0,0,1], [1,0,0,1], [0,1,1,0]]\r\n if letter == 'p' or letter == 'P':\t\r\n return [[1,1,1,0], [1,0,0,1], [1,0,0,1], [1,1,1,0], [1,0,0,0]]\r\n if letter == 'q' or letter == 'Q':\r\n return [[0,1,1,0], [1,0,0,1], [1,0,0,1], [1,0,1,1], [0,1,1,1]]\r\n if letter == 'r' or letter == 'R':\t\r\n return [[1,1,1,0], [1,0,0,1], [1,0,0,1], [1,1,1,0], [1,0,0,1]]\r\n if letter == 's' or letter == 'S':\r\n return [[1,1,1,1], [1,0,0,0], [0,1,1,0], [0,0,0,1], [1,1,1,1]]\r\n if letter == 't' or letter == 'T':\r\n return [[1,1,1,1,1], [0,0,1,0,0], [0,0,1,0,0], [0,0,1,0,0], [0,0,0,0,0]]\r\n if letter == 'u' or letter == 'U':\r\n return [[1,0,0,1], [1,0,0,1], [1,0,0,1], [1,0,0,1], [0,1,1,0]]\r\n if letter == 'v' or letter == 'V':\r\n return [[1,0,0,0,1], [1,0,0,0,1], [1,0,0,0,1], [1,0,0,0,1], [0,0,1,0,0]]\r\n if letter == 'w' or letter == 'W':\r\n return [[1,0,1,0,1], [1,0,1,0,1], [1,0,1,0,1], [1,0,1,0,1], [0,1,0,1,0]]\r\n if letter == 'x' or letter == 'X':\r\n return [[1,0,0,0,1], [0,1,0,1,0], [0,0,1,0,0], [0,1,0,1,0], [1,0,0,0,1]]\r\n if letter == 'y' or letter == 'Y':\r\n return [[1,0,0,0,1], [0,1,0,1,0], [0,0,1,0,0], [0,0,1,0,0], [0,0,1,0,0]]\r\n if letter == 'z' or letter == 'Z':\r\n return [[1,1,1,1,1], [0,0,0,1,0], [0,0,1,0,0], [0,1,0,0,0], [1,1,1,1,1]]\r\n return []", "def letter_picker(self):\r\n \r\n self.letter_count = dict.fromkeys(string.ascii_lowercase, 0) # reset\r\n for word in self.valid_words:\r\n word = \"\".join(set(word)) # removes duplicate letters in valid words\r\n for letter in word:\r\n self.letter_count[letter] += 1\r\n for letter in self.guesses: # prevents repeating guesses\r\n self.letter_count[letter] = 0\r\n self.chosen_letter = max(self.letter_count, key=self.letter_count.get)\r\n self.guesses.append(self.chosen_letter)\r\n print(f\"{self.chosen_letter} is the letter I think is right\")", "def __getRandChar(self):\n return self.letterbag[random.randint(0,25)]", "def is_letter(c):\n return 'A' <= c <= 'Z' or 'a' <= c <= 'z'", "def find_letter_in_dics(self,letter):\r\n if str.isupper(letter)==True and letter not in self.special_letters_dic and letter not in self.special_characters_dic: #taken from above\r\n position=self.general_upper_word_list[letter]\r\n elif str.islower(letter)==True and letter not in self.special_letters_dic and letter not in self.special_characters_dic:\r\n position=self.general_lower_word_list[letter]\r\n elif self.special_characters_dic!=None and letter in self.special_characters_dic:\r\n position=self.special_characters_dic[letter]\r\n elif letter in self.special_letters_dic:\r\n position=self.special_letters_dic[letter]\r\n elif letter in self.general_numbers_dic:\r\n position=self.general_numbers_dic[letter]\r\n return position", "def get_word(letters):\r\n\r\n word = \"\"\r\n for letter in letters:\r\n word += letter \r\n \r\n return word", "def alpha_score(upper_letters):\r\n return sum(map(lambda l: 1 + ord(l) - ord('A'), upper_letters))", "def horizontal_char(self):\n ...", "def index_letter_string(self, index):\n\t\treturn \"(\" + ALPHABET[index] + \")\"", "def GetAlphabet(self):\n alphabet = list(self._charAlphabet) #Creates a list of the alphabet characters\n numbers = [i for i in range(0,26)] #Creates a list of numbers up to 25\n numberOff = dict( zip(alphabet, numbers)) #Pairs each character with a number in a chronological sequence to number the characters from 0 to 25\n \n return numberOff", "def ask_letter(self):\n letter = ' '\n while letter not in string.ascii_lowercase:\n letter = input('Write a letter:\\n')\n letter.lower()\n\n return letter", "def test_letters(self):\n all_letters = set()\n self.assertTrue(self._letters_proto.item)\n for i, item in enumerate(self._letters_proto.item):\n # Letter message should not be empty.\n self.assertTrue(item.letter)\n self.assertTrue(item.letter.uname)\n self.assertLen(item.letter.uname, 1)\n self.assertTrue(item.letter.raw)\n\n # Check that there are no duplicates and the letters are in the\n # Unicode codepoint order.\n u_char = item.letter.raw\n self.assertNotIn(item.letter.raw, all_letters,\n f'Letter {i}: Duplicate letter `{u_char}` found')\n all_letters.add(u_char)\n if i:\n this_codepoint = ord(u_char)\n prev_codepoint = ord(self._letters_proto.item[i - 1].letter.raw)\n self.assertLess(prev_codepoint, this_codepoint,\n f'Letter {i}: Should be in Unicode codepoint order. '\n f'This codepoint: {this_codepoint}, '\n f'previous: {prev_codepoint}')\n self.assertEqual(len(self._letters_proto.item), len(all_letters))", "def letter_code(letter):\n value = ord(letter.lower()) - ord('a') + 10\n return value + value // 11", "def guess_letter(self):\r\n letter = input(\"# Enter a Letter :\")\r\n if not letter:\r\n print(\"Please Enter a Valid Value\")\r\n else:\r\n result = game_instance.check_letter(letter)\r\n\r\n if result == \"NOT FOUND\":\r\n print(\"WRONG. No corresponding letters found in the word. Try Again!\")\r\n else:\r\n temp = list(self.current_word)\r\n count=0;\r\n for x in result:\r\n count+=1\r\n temp[x] = letter\r\n self.current_word = \"\".join(temp)\r\n print(\"Good Job. You Found \"+str(count)+\" Letters.\")", "def isalpha(self) -> bool:\n pass", "def letter_name(index):\n letters = string.ascii_lowercase\n count = len(letters)\n\n return letters[index % count] * ((index // count) + 1)", "def test_letters(self):\n self.assertFalse(validate_measure_input('a', self.measures))\n self.assertFalse(validate_measure_input('1a', self.measures))", "def alpha_score(upper_letters):\r\n return sum(map(lambda l: 1 + ord(l) - ord('A'), upper_letters))", "def prev_letter(letter, step):\r\n\r\n\tif letter in ascii_uppercase:\r\n\t\tnew_letter = get_new_letter(ascii_uppercase, letter, -step)\r\n\telif letter in ascii_lowercase:\r\n\t\tnew_letter = get_new_letter(ascii_lowercase, letter, -step)\r\n\telse:\r\n\t\tnew_letter = letter\r\n\treturn new_letter", "def __init__(self, a=5, b=8, m=26):\n\n self.a = a\n self.b = b\n self.m = m\n self.alpha = list(string.ascii_lowercase)", "def paa2letter(data, alphabetSize):\r\n alphabetizedX = ''\r\n beta = breakSymble(alphabetSize)\r\n aOffset = ord('a')\r\n # print(beta, aOffset)\r\n for i in range(0, len(data)):\r\n letterFound = False\r\n for j in range(0, len(beta)):\r\n if np.isnan(data[i]):\r\n alphabetizedX += '-'\r\n letterFound = True\r\n break\r\n if data[i] < beta[j]:\r\n alphabetizedX += chr(aOffset + j)\r\n letterFound = True\r\n break\r\n if not letterFound:\r\n alphabetizedX += chr(aOffset + len(beta))\r\n\r\n return alphabetizedX", "def letter_to_index(letter):\r\n return ord(letter.lower()) - CHAR_A", "def letter_to_index(letter):\r\n return ord(letter.lower()) - CHAR_A", "def getCode1Letter(self):\n dataDict = self.__dict__\n # NB must be done by direct access\n result = dataDict['code1Letter']\n return result", "def mapping_letter(letters):\n my_list = list(map(lambda x: x.upper(), letters))\n return dict(zip(letters, my_list))", "def res_1Letter(RES):\n RES_LetterCode = ['HOH', 'ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']\n R_LetterCode = ['O', 'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']\t\n return R_LetterCode[RES_LetterCode.index(RES)]", "def random_letter(letters):\n return random.choice(letters)", "def __str__(self):\n return str(self.__alphabet)", "def get_alphabet(choice) -> str:\n if choice == 'uppercase':\n return 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n elif choice == 'lowercase':\n return 'abcdefghijklmnopqrstuvwxyz'\n elif choice == 'alphabet':\n return get_alphabet('uppercase') + get_alphabet('lowercase')\n elif choice == 'number':\n return '0123456789'\n elif choice == 'alphanumeric':\n return get_alphabet('alphabet') + get_alphabet('number')\n elif choice == 'symbol':\n return r'~!@#$%^&*()-_=+[]{}\\|;:,<.>/?\"'\n elif choice == 'random':\n return get_alphabet('alphanumeric') + get_alphabet('symbol')", "def _find_letters(self):\r\n\r\n letter_boxes = find_letter_boxes(self.img, MAXIMUM_LETTER_LENGTH)\r\n letters = [self.img.crop((letter_box[0], 0, letter_box[1], self.img.height)) for letter_box in letter_boxes]\r\n\r\n if (len(letters) == 6 and letters[0].width < MINIMUM_LETTER_LENGTH) or (len(letters) != 6 and len(letters) != 7):\r\n letters = [Image.new('L', (200, 70)) for i in range(6)]\r\n\r\n if len(letters) == 7:\r\n letters[6] = merge_horizontally(letters[6], letters[0])\r\n del letters[0]\r\n\r\n letters = [cut_the_white(letter) for letter in letters]\r\n self.letters = {str(k): v for k, v in zip(range(1, 7), letters)}", "def calculate_ascii(col, col_count, use_third, first_letter, third_letter):\n if col <= 26:\n # if it's under 26 columns, just use a single letter\n ascii_col = chr(col + 64)\n elif use_third:\n if col_count > 26:\n # first_letter describes the coordinate of what the first letter should be -\n # every 26 iterations, it increases by one to switch the first letter up by one\n first_letter += 1\n # col_count keeps track of what column you're at in the current first_letter iteration\n col_count = 1\n if first_letter > 90:\n third_letter += 1\n first_letter = 65\n ascii_col = chr(third_letter) + chr(first_letter) + chr((col_count + 64))\n\n col_count += 1\n else:\n # if it's over 26 columns, you have to calculate two different letters\n if col_count > 26:\n # first_letter describes the coordinate of what the first letter should be -\n # every 26 iterations, it increases by one to switch the first letter up by one\n first_letter += 1\n # col_count keeps track of what column you're at in the current first_letter iteration\n col_count = 1\n\n ascii_col = chr(first_letter) + chr((col_count + 64))\n\n if ascii_col == 'ZZ':\n use_third = True\n\n col_count += 1\n return ascii_col, col_count, use_third, first_letter, third_letter", "def rep_to_alphabet(alph_chars: str):\n # perfect fits\n if np.sum([a in alph_chars for a in ['gw', 'iam', 'rimes']]) > 1:\n raise ValueError('multiple perfect fitting alphabets defeat purpose')\n if 'gw' in alph_chars:\n return [Alphabet.PERFECT_GW]\n if 'iam' in alph_chars:\n return [Alphabet.PERFECT_IAM]\n if 'rimes' in alph_chars:\n return [Alphabet.PERFECT_RIMES]\n # universal alphabets\n alph_chars = alph_chars.lower()\n mapping = {'l': Alphabet.ASCII_LOWER,\n 'u': Alphabet.ASCII_UPPER,\n 'p': Alphabet.ASCII_PUNCTUATION,\n 'd': Alphabet.ASCII_DIGITS}\n alphabet = set()\n for c in alph_chars:\n alphabet.add(mapping[c])\n alphabet = list(alphabet)\n return alphabet", "def letter_grid(self, assignment):\n letters = [\n [None for _ in range(self.crossword.width)]\n for _ in range(self.crossword.height)\n ]\n for variable, word in assignment.items():\n direction = variable.direction\n for k in range(len(word)):\n i = variable.i + (k if direction == Variable.DOWN else 0)\n j = variable.j + (k if direction == Variable.ACROSS else 0)\n letters[i][j] = word[k]\n return letters", "def letter_grid(self, assignment):\n letters = [\n [None for _ in range(self.crossword.width)]\n for _ in range(self.crossword.height)\n ]\n for variable, word in assignment.items():\n direction = variable.direction\n for k in range(len(word)):\n i = variable.i + (k if direction == Variable.DOWN else 0)\n j = variable.j + (k if direction == Variable.ACROSS else 0)\n letters[i][j] = word[k]\n return letters", "def letter_grid(self, assignment):\n letters = [\n [None for _ in range(self.crossword.width)]\n for _ in range(self.crossword.height)\n ]\n for variable, word in assignment.items():\n direction = variable.direction\n for k in range(len(word)):\n i = variable.i + (k if direction == Variable.DOWN else 0)\n j = variable.j + (k if direction == Variable.ACROSS else 0)\n letters[i][j] = word[k]\n return letters", "def step1ab(self):\n\t\tif self.b[self.k] == 's':\n\t\t\tif self.ends(\"sses\"):\n\t\t\t\tself.k = self.k - 2\n\t\t\telif self.ends(\"ies\"):\n\t\t\t\tself.setto(\"i\")\n\t\t\telif self.b[self.k - 1] != 's':\n\t\t\t\tself.k = self.k - 1\n\t\tif self.ends(\"eed\"):\n\t\t\tif self.m() > 0:\n\t\t\t\tself.k = self.k - 1\n\t\telif (self.ends(\"ed\") or self.ends(\"ing\")) and self.vowelinstem():\n\t\t\tself.k = self.j\n\t\t\tif self.ends(\"at\"): self.setto(\"ate\")\n\t\t\telif self.ends(\"bl\"): self.setto(\"ble\")\n\t\t\telif self.ends(\"iz\"): self.setto(\"ize\")\n\t\t\telif self.doublec(self.k):\n\t\t\t\tself.k = self.k - 1\n\t\t\t\tch = self.b[self.k]\n\t\t\t\tif ch == 'l' or ch == 's' or ch == 'z':\n\t\t\t\t\tself.k = self.k + 1\n\t\t\telif (self.m() == 1 and self.cvc(self.k)):\n\t\t\t\tself.setto(\"e\")", "def get_new_letter(letter, my_type):\n uppercase = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\n lowercase = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n if my_type == \"upper\":\n for i in range(0,len(lowercase)):\n if lowercase[i] == letter:\n return uppercase[i]\n else:\n for i in range(0,len(uppercase)):\n if uppercase[i] == letter:\n return lowercase[i]", "def place_in_alphabet(letters):\r\n\tfor l in letters:\r\n\t\tprint(l, ':', str(ALPHABET.index(l)+1))", "def get_alphabet():\n\n alphabet = {}\n # Organized by how final output will look. ...alternative org isn't much better\n # May want to look into an external font solution TBH\n # Beware, the \" \" char is also basically the padding\n alphabet[\" \"] = [o,\n o,\n o,\n o,\n o]\n alphabet[\"A\"] = [o + X + o,\n X + o + X,\n X + X + X,\n X + o + X,\n X + o + X]\n alphabet[\"B\"] = [X + X + o,\n X + o + X,\n X + X + o,\n X + o + X,\n X + X + o]\n alphabet[\"C\"] = [X + X + X,\n X + o + o,\n X + o + o,\n X + o + o,\n X + X + X]\n alphabet[\"D\"] = [X + X + o,\n X + o + X,\n X + o + X,\n X + o + X,\n X + X + o]\n alphabet[\"E\"] = [X + X + X,\n X + o + o,\n X + X + X,\n X + o + o,\n X + X + X]\n alphabet[\"F\"] = [X + X + X,\n X + o + o,\n X + X + o,\n X + o + o,\n X + o + o]\n alphabet[\"G\"] = [X + X + X + X,\n X + o + o + o,\n X + o + X + X,\n X + o + o + X,\n X + X + X + X]\n alphabet[\"H\"] = [X + o + X,\n X + o + X,\n X + X + X,\n X + o + X,\n X + o + X]\n alphabet[\"I\"] = [X + X + X,\n o + X + o,\n o + X + o,\n o + X + o,\n X + X + X]\n alphabet[\"J\"] = [o + o + X,\n o + o + X,\n o + o + X,\n X + o + X,\n o + X + o]\n alphabet[\"K\"] = [X + o + o + X,\n X + o + X + o,\n X + X + o + o,\n X + o + X + o,\n X + o + o + X]\n alphabet[\"L\"] = [X + o + o,\n X + o + o,\n X + o + o,\n X + o + o,\n X + X + X]\n alphabet[\"M\"] = [X + o + o + o + X,\n X + X + o + X + X,\n X + o + X + o + X,\n X + o + o + o + X,\n X + o + o + o + X]\n alphabet[\"N\"] = [X + o + o + X,\n X + o + o + X,\n X + X + o + X,\n X + o + X + X,\n X + o + o + X]\n alphabet[\"O\"] = [X + X + X,\n X + o + X,\n X + o + X,\n X + o + X,\n X + X + X]\n alphabet[\"P\"] = [X + X + X,\n X + o + X,\n X + X + X,\n X + o + o,\n X + o + o]\n alphabet[\"Q\"] = [X + X + X,\n X + o + X,\n X + o + X,\n X + X + X,\n o + o + X]\n alphabet[\"R\"] = [X + X + X,\n X + o + X,\n X + X + X,\n X + X + o,\n X + o + X]\n alphabet[\"S\"] = [X + X + X,\n X + o + o,\n X + X + X,\n o + o + X,\n X + X + X]\n alphabet[\"T\"] = [X + X + X,\n o + X + o,\n o + X + o,\n o + X + o,\n o + X + o]\n alphabet[\"U\"] = [X + o + X,\n X + o + X,\n X + o + X,\n X + o + X,\n X + X + X]\n alphabet[\"V\"] = [X + o + X,\n X + o + X,\n X + o + X,\n o + X + o,\n o + X + o]\n alphabet[\"W\"] = [X + o + o + o + X,\n X + o + X + o + X,\n X + o + X + o + X,\n X + o + X + o + X,\n o + X + o + X + o]\n alphabet[\"X\"] = [X + o + X,\n X + o + X,\n o + X + o,\n X + o + X,\n X + o + X]\n alphabet[\"Y\"] = [X + o + X,\n X + o + X,\n o + X + o,\n o + X + o,\n o + X + o]\n alphabet[\"Z\"] = [X + X + X,\n o + o + X,\n o + X + o,\n X + o + o,\n X + X + X]\n alphabet[\"1\"] = [X + X + o,\n o + X + o,\n o + X + o,\n o + X + o,\n X + X + X]\n alphabet[\"2\"] = [X + X + X,\n o + o + X,\n X + X + X,\n X + o + o,\n X + X + X]\n alphabet[\"3\"] = [X + X + X,\n o + o + X,\n o + X + X,\n o + o + X,\n X + X + X]\n alphabet[\"4\"] = [X + o + X,\n X + o + X,\n X + X + X,\n o + o + X,\n o + o + X]\n alphabet[\"5\"] = [X + X + X,\n X + o + o,\n X + X + X,\n o + o + X,\n X + X + X]\n alphabet[\"6\"] = [X + X + X,\n X + o + o,\n X + X + X,\n X + o + X,\n X + X + X]\n alphabet[\"7\"] = [X + X + X,\n o + o + X,\n o + o + X,\n o + X + o,\n o + X + o]\n alphabet[\"8\"] = [X + X + X,\n X + o + X,\n X + X + X,\n X + o + X,\n X + X + X]\n alphabet[\"9\"] = [X + X + X,\n X + o + X,\n X + X + X,\n o + o + X,\n o + o + X]\n alphabet[\"0\"] = [X + X + X + X + X,\n X + o + o + X + X,\n X + o + X + o + X,\n X + X + o + o + X,\n X + X + X + X + X]\n\n return alphabet", "def get_alphabet(number):\n return chr(number + 96)", "def get_alphabet(number):\n return chr(number + 96)", "def getFENtileLetter(fen,letter,number):\n l2i = lambda l: ord(l)-ord('A') # letter to index\n piece_letter = fen[(8-number)*8+(8-number) + l2i(letter)]\n return ' KQRBNPkqrbnp'.find(piece_letter)", "def c(k):\n if isinstance(k, str):\n return k.lower() if ord(k) % 2 == 0 else k.upper()\n return k", "def test_starts_letter(x):\n return x[0].isalpha()", "def index_to_letter(index):\r\n return chr(index + CHAR_A)", "def index_to_letter(index):\r\n return chr(index + CHAR_A)", "def letters(characters):\n letters = [\"क\", \"ख\", \"ग\", \"घ\", \"ड॒\", \"च\", \"छ\", \"ज\", \"झ\", \"ञ\", \"ट\", \"ठ\", \"ड\", \"ढ\", \"ण\", \"त\", \"थ\" ,\"द\", \"ध\", \"न\", \"प\", \"फ\", \"ब\", \"भ\", \"म\", \"य\", \"र\", \"ल\", \"व\", \"श\", \"ष\", \"स\", \"ह\", \"अ\", \"आ\", \"इ\", \"ई\", \"उ\", \"ऊ\", \"ए\", \"ऐ\", \"ओ\" ]\n shabda = []\n st = \"\"\n for ch in characters:\n if ch == \"space\":\n shabda.append(st)\n st = \"\"\n shabda.append(ch)\n else:\n if ch in letters:\n if st != \"\":\n shabda.append(st)\n st = ch\n else:\n st += ch\n\n \n shabda.append(st)\n for i,ch in enumerate(shabda):\n # if i == len(shabda)-1:\n # shabda[i] = ch+\"।\"\n if ch == \"\":\n shabda.remove(ch)\n print(shabda)\n return shabda", "def alphabet(self):\n if(self.seq_type.upper()==\"DNA\"):\n return \"ATCG\"\n elif(self.seq_type.upper()==\"RNA\"):\n return \"AUCG\"\n elif(self.seq_type.upper()==\"PROTEIN\"):\n return \"ACDEFGHIKLMNPQRSTVWY\"\n else:\n return None", "def test_casing(self):\n char = Character(type=['Fish', 'Great Ape'])\n assert char.type_key == 'fish'", "def char(self, aIndex, char):\n o = ord(char)\n c = -1\n # space\n if o == 32:\n c = 16\n # dash\n if o == 45:\n c = 17\n # uppercase A-F\n if 65 <= o <= 70:\n c = o - 55\n # lowercase a-f\n if 97 <= o <= 102:\n c = o - 87\n # 0-9\n if 48 <= o <= 57:\n c = o - 48\n\n self.digit(aIndex, c)", "def get_letter_image(self, letter): # pragma: no cover\n\t\traise NotImplementedError()", "def zip_letters(xl, yl, dxl, dyl, rl, word):\n return (\n ([pl.pop(0) if pl else None for pl in (xl, yl, dxl, dyl, rl)], char)\n for char in word)", "def convert_numtoletter(n):\r\n L = seats[0][n-1] #letter\r\n return L", "def letter_grade(grade):\n\t# define dictionary of grading scale\n\t# Check the fillna above was filled w/ -1.0\n\td = {18.0: 'A', 17.0: 'A', 16.0: 'A', 15.0: 'A', 14.0: 'A', 13.0: 'A',\n\t12.0: 'A', 11.0: 'A', 10.0: 'A', 9.0: 'A', 8.0: 'B', \n\t7.0: 'C', 6.0: 'D', 5.0: 'F', 4.0: 'F', 3.0: 'F', 2.0: 'F', \n\t1.0: 'F', 0.0: 'F', -1.0: '-'}\n\t\n\t# get letter grade only if grade is not a string\n\tif type(grade) != str:\n\t\t# get the letter\n\t\tletter = d[grade]\n\t\treturn letter\n\telse:\n\t\treturn grade", "def test_single_letter_count(self):\n self.assertEqual(functions.single_letter_count(\"Hello World\", \"h\"), 1)\n self.assertEqual(functions.single_letter_count(\"Hello World\", \"z\"), 0)\n self.assertEqual(functions.single_letter_count(\"HelLo World\", \"l\"), 3)", "def alphabet_chars(alphabet: List[Alphabet]):\n # perfect fits\n if np.sum([a in alphabet for a in [Alphabet.PERFECT_GW, Alphabet.PERFECT_IAM, Alphabet.PERFECT_RIMES]]) > 1:\n raise ValueError('multiple perfect fitting alphabets defeat purpose')\n if Alphabet.PERFECT_GW in alphabet:\n return ALPHABET_PERFECT_GW\n if Alphabet.PERFECT_IAM in alphabet:\n return ALPHABET_PERFECT_IAM\n if Alphabet.PERFECT_RIMES in alphabet:\n return ALPHABET_PERFECT_RIMES\n # universal alphabets\n alph_str = ''\n if Alphabet.ASCII_LOWER in alphabet:\n alph_str += string.ascii_lowercase\n if Alphabet.ASCII_UPPER in alphabet:\n alph_str += string.ascii_uppercase\n if Alphabet.ASCII_DIGITS in alphabet:\n alph_str += string.digits\n if Alphabet.ASCII_PUNCTUATION in alphabet:\n alph_str += string.punctuation\n return alph_str", "def comp_alpha(self):\n pass", "def lower(self) -> str:", "def encode(self, letter):\n\n for plug in self.plugleads:\n if plug.pair[0] == letter or plug.pair[1] == letter:\n return plug.encode(letter)\n return letter", "def characters():\n\n letter = \"a b c d e f g h i j k l m n o p q r s t u v w x y z\".split()\n sc = \"! @ # $ % ^ & * ( ) _ - + = ? : ;\".split()\n\n\n chars = []\n chars.append(random.choice(letter))\n chars.append(random.choice(letter).upper())\n chars.append(str(random.randint(0,9)))\n chars.append(random.choice(sc))\n\n return chars", "def encrypt(word):\r\n if len(word) == 1:\r\n if word.islower() and word !='z':#only encode lower case letters\r\n return chr(ord(word) + 1)\r\n elif word.isupper and word != 'z':\r\n return word\r\n elif word == 'z': # special case: z\r\n return chr(ord(word) -25)\r\n else:\r\n myChar = word[0] #first get first chararacter in the word\r\n if myChar.islower() and myChar != 'z':\r\n myChar = chr(ord(word[0])+1)\r\n elif myChar == 'z': # special case: z\r\n myChar = chr(ord(word[0])-25)\r\n elif myChar.isupper:\r\n pass \r\n return myChar + encrypt(word[1:])", "def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]", "def pig_latin(word):\n \n first_letter = word[0]\n rest_of_word = word[1 : ]\n \n # Student should complete function on the next lines.\n \n if first_letter == 'a' or first_letter == 'e' or first_letter == 'i' or first_letter == 'o' or first_letter == 'u':\n return word + \"way\"\n else:\n return rest_of_word + first_letter + \"ay\"", "def character(x):\n if (x==\"a\"or x==\"A\"or x==\"e\"or x==\"E\"or x==\"i\"or x==\"I\"or x==\"o\"or x==\"O\"or x==\"u\"or x==\"U\"):\n return('True')\n else:\n return('False')", "def get_uppercase_and_lowercase():\n def get_uppercase():\n for c in range(26):\n yield chr(65 + c)\n\n def get_lowercase():\n for c in range(26):\n yield chr(97 + c)\n\n yield from get_uppercase()\n yield from get_lowercase()", "def alpha_chars (text):\n for letter in text:\n if letter.isalpha ():\n yield letter", "def __init__(self,alphabet=\"amino\"):\n\n super(self.__class__,self).__init__(alphabet,str)", "def column_to_letter(self, pos):\n column_dict = {}\n column_dict[0] = 'a'\n column_dict[1] = 'b'\n column_dict[2] = 'c'\n column_dict[3] = 'd'\n column_dict[4] = 'e'\n column_dict[5] = 'f'\n column_dict[6] = 'g'\n column_dict[7] = 'h'\n column_dict[8] = 'i'\n return column_dict[pos]", "def get_alphabet(self):\n return self.alphabet" ]
[ "0.68726075", "0.6728515", "0.6708404", "0.6602445", "0.6602445", "0.6602445", "0.6595359", "0.65413225", "0.6491447", "0.64886683", "0.6447457", "0.64450705", "0.64275444", "0.63569146", "0.63441706", "0.627932", "0.6252092", "0.62292844", "0.6165212", "0.6158538", "0.61365527", "0.61301607", "0.61140853", "0.60862553", "0.607768", "0.6074581", "0.6072664", "0.60617816", "0.60533845", "0.6041342", "0.60369176", "0.60323864", "0.60283875", "0.60245967", "0.60213214", "0.6020793", "0.6017445", "0.5995929", "0.5994138", "0.598973", "0.59894425", "0.59824777", "0.5971881", "0.59712344", "0.5961804", "0.59589964", "0.59521663", "0.59426624", "0.5940005", "0.5930485", "0.59210896", "0.5913881", "0.59050655", "0.59050655", "0.59011585", "0.58857536", "0.58840525", "0.58701503", "0.58686596", "0.58485126", "0.584108", "0.5831028", "0.5829197", "0.58246577", "0.58246577", "0.58246577", "0.58198404", "0.5819491", "0.5818819", "0.5818074", "0.58177286", "0.58177286", "0.581669", "0.5814917", "0.5807079", "0.58032316", "0.58032316", "0.5799924", "0.5788983", "0.5788699", "0.5778329", "0.57738125", "0.5768541", "0.5764992", "0.57642335", "0.57607657", "0.5758047", "0.5741425", "0.5738945", "0.5737418", "0.57317126", "0.5715233", "0.57136863", "0.57067126", "0.5700664", "0.5691055", "0.5687926", "0.56789106", "0.5676368", "0.56752366" ]
0.68069106
1
correctly convert a series of dicts or a DictArray to lists
def test_input_conversion(self): data = [dict(A=0.1, C=0.2), dict(A=0.1, C=0.2)] base = [("A", 0.1), ("C", 0.2)] expect = [base, base] got = _char_hts_as_lists(data) self.assertEqual(got, expect) # data = [dict(A=0.1, C=0.2), {}] base = [("A", 0.1), ("C", 0.2)] expect = [base, None] got = _char_hts_as_lists(data) self.assertEqual(got, expect) data = [dict(A=0.1, C=0.2), None] base = [("A", 0.1), ("C", 0.2)] expect = [base, None] got = _char_hts_as_lists(data) self.assertEqual(got, expect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dict_array_values_as_list(self, dict_object: dict()) -> []:\n new_list = []\n for val in dict_object.values():\n new_list = new_list + val\n return new_list", "def __dict_to_list(dict):\n\n max_val = 1\n for k,v in dict.items():\n if type(v) is list:\n if len(v) > 1 and (max_val > 1 and len(v) is not max_val):\n #FIXME: get a real error here\n sys.exit('All lists in cparams need to be of length 1 or %i.. key %s has this list: %s' %(max_val,k,v))\n max_val = max(max_val,len(v))\n\n ld = [{} for l in range(max_val)]\n for d in range(len(ld)):\n for k,v in dict.items():\n if type(v) is not list:\n ld[d][k] = v\n else:\n if len(v) == 1:\n ld[d][k] = v[0]\n else:\n ld[d][k] = v[d]\n return ld", "def dic2list(self, X: numpy.ndarray):\n raise NotImplementedError()", "def cast_to_list(value):\n if isinstance(value, str):\n value = cast_to_jdict(value)\n assert isinstance(value, list)\n return value\n elif hasattr(value, 'tolist'): # meant for numpy arrays\n # what other potential attributes to check for?\n return value.tolist()\n else:\n return list(\n value\n ) # will work with set, tuple, and other iterables (not recursively though: just level 0)", "def _get_list_from_dict(d, l):\n\n new_list = []\n\n for val in l:\n subdict = d[val]\n inner_list = []\n for subval in l:\n inner_list.append(subdict[subval])\n new_list.append(inner_list)\n\n return np.array(new_list)", "def _dict2lists(data):\n xvals = list(data.keys())\n xvals.sort()\n yvals = []\n for x in xvals:\n yvals.append(data[x])\n return xvals, yvals", "def tups2lists(obj):\n\n if isinstance(obj, dict):\n return {tups2lists(k): tups2lists(v) for k, v in obj.iteritems()}\n if isinstance(obj, tuple):\n return [tups2lists(v) for v in obj]\n return obj", "def dict_to_array(dic):\n # From dictionary of lists\n if type(list(dic.items())[0][1]) == list:\n list_items = list(dic.items())\n col_names = []\n col_data = []\n for i in range(0, len(list_items)):\n col_names.append(list_items[i][0])\n col_data.append(list_items[i][1])\n return col_data, col_names\n\n # From dictionary of numpy arrays\n elif type(list(dic.items())[0][1]) is np.ndarray:\n list_items = list(dic.items())\n col_names = []\n col_data = []\n for i in range(0, len(list_items)):\n col_names.append(list_items[i][0])\n col_data.append(list_items[i][1].tolist())\n return col_data, col_names", "def as_list(self) -> List[Dict[str, Union[float, str, datetime, None]]]:\n return [row.as_dict() for row in self.rows]", "def as_list(self) -> List[Dict[str, Union[float, str, datetime, None]]]:\n return [row.as_dict() for row in self.rows]", "def _to_list(series: Union[TimeSeries, Sequence[TimeSeries]]) -> Sequence[TimeSeries]:\n\n return [series] if not isinstance(series, Sequence) else series", "def _tolist(ndarray):\n elem_list = []\n for sub_elem in ndarray:\n if isinstance(sub_elem, sio.matlab.mio5_params.mat_struct):\n elem_list.append(_todict(sub_elem))\n elif isinstance(sub_elem, np.ndarray):\n elem_list.append(_tolist(sub_elem))\n else:\n elem_list.append(sub_elem)\n return elem_list", "def _flatten_lists(\n data: Union[Dict[str, Any], List[Any], Any]\n ) -> Union[Dict[str, Any], Any]:\n if not isinstance(data, dict):\n return data\n copy_data = cast(Dict[str, Any], data.copy())\n for attr, val in copy_data.items():\n if isinstance(val, list):\n if len(cast(List[Any], val)) == 1:\n # pull single values out of lists\n data[attr] = _flatten_lists(cast(Any, val[0]))\n else:\n data[attr] = [_flatten_lists(v) for v in cast(List[Any], val)]\n elif isinstance(val, dict):\n data[attr] = _flatten_lists(cast(Dict[str, Any], val))\n return data", "def clean_types(data: Iterable[dict]) -> List[dict]:\n data_copy = []\n for row in data:\n row_copy = {}\n for col, val in row.items():\n if type(val) == date:\n row_copy[col] = datetime.combine(val, time())\n elif type(val) == list:\n if not val:\n row_copy[col] = None\n else:\n row_copy[col] = ','.join(str(el) if el is not None else 'NULL' for el in val)\n elif type(val) == dict:\n row_copy[col] = str(val)\n else:\n row_copy[col] = val\n\n data_copy.append(row_copy)\n\n return data_copy", "def dictToLst(dictionary):\n keys = []\n values = []\n for key, value in dictionary.iteritems():\n keys.append(key)\n values.append(value)\n return [keys, values]", "def __dic2list(self,dic):\n\treturn map(lambda x:[x,dic[x]],dic)", "def object_to_lists(obj):\n\n li = []\n\n if isinstance(obj, types.DictType):\n keys = obj.keys()\n keys.sort()\n for k in keys:\n v = obj[k]\n if isinstance(v, types.DictType):\n li.append(object_to_lists(v))\n elif isinstance(v, (types.ListType, types.TupleType)):\n for el in v:\n li.append(object_to_lists(el))\n else:\n li.append((k, v))\n\n elif isinstance(obj, (types.ListType, types.TupleType)):\n for el in obj:\n li.append(object_to_lists(el))\n\n else:\n return obj\n\n return li", "def _cast_to_list(obj: JsonInput) -> JsonList:\n if isinstance(obj, list):\n return obj\n elif isinstance(obj, dict):\n return [obj]\n else:\n raise ValueError(\"Object is not a list of dict\")", "def dicts2ndarray(data_dicts):\n # NEVER make any assumption about the order of .keys() return\n aps = [ap for ap in data_dicts[0].keys() if ap != 'tag']\n aps.sort()\n data_num = len(data_dicts)\n data_len = len(data_dicts[0][aps[0]])\n\n ndary = np.zeros([data_num, len(aps), data_len], dtype=np.float32)\n for idx, d in enumerate(data_dicts):\n for aidx, ap in enumerate(aps):\n ndary[idx, aidx, :] = d[ap]\n\n return ndary", "def dict_to_list(dict, header):\n row = []\n for field in header:\n if field in dict:\n row.append(str(dict[field]))\n else:\n row.append(None)\n return row", "def change_sets_into_lists(json_like: Any) -> Any:\r\n return json.loads(json.dumps(json_like, cls=SetEncoder))", "def dict_to_array(dict_array):\n plottable_array = []\n for k in dict_array:\n for i in range(len(dict_array[k])):\n plottable_array.append(dict_array[k][i])\n\n return np.array(plottable_array)", "def _to_list(value: Union[Dict[str, Any], List, Tuple, int], name=None, list_length=None):\n if not isinstance(value, (list, tuple)):\n if list_length is not None:\n value = [value] * list_length\n else:\n value = [value]\n if list_length is not None and len(value) != list_length:\n name = '' if name is None else name\n raise ValueError(\"hparams '%s' must be a list of length %d\" % (name, list_length))\n return value", "def dict_to_list(mydict):\n\n result = [dict(**v, name=k) for k, v in mydict.items()]\n return result", "def ensure_list(value: Any) -> List[Any]:\n\n if isinstance(value, (Mapping, str)): # do not unpack dictionaries\n return [value]\n elif isinstance(value, Iterable):\n return list(value)\n else:\n return [value]", "def convert_dict_to_ndarray(*dictionaries):\n\n array_list = []\n\n # Loop all dicts\n for dictionary in dictionaries:\n # Loop all keys\n for key in dictionary.keys():\n # Skip non-ndarray types\n if not isinstance(dictionary[key], np.ndarray):\n continue\n # Append each item to a list\n array_list.append(dictionary[key])\n\n # Check non-uniform length between arrays\n for item in array_list:\n assert len(item) == len(array_list[0]), 'All arrays must have the same length'\n\n return np.vstack(array_list) # .swapaxes(0, 1)", "def transform_array(obj):\n # Check for astype failures (putative Numpy < 1.7)\n dt2001 = np.datetime64('2001')\n legacy_datetime64 = (dt2001.astype('int64') ==\n dt2001.astype('datetime64[ms]').astype('int64'))\n ## not quite correct, truncates to ms..\n if obj.dtype.kind == 'M':\n if legacy_datetime64:\n if obj.dtype == np.dtype('datetime64[ns]'):\n return (obj.astype('int64') / 10**6.0).tolist()\n else:\n return (obj.astype('datetime64[us]').astype('int64') / 1000.).tolist()\n elif obj.dtype.kind in ('u', 'i', 'f'):\n return transform_numerical_array(obj)\n return obj.tolist()", "def dict_to_list(dictionary):\n zippers = []\n for key, value in dictionary.items():\n zippers.append(zip(repeat(key), value))\n output = []\n for zipper in zip(*zippers):\n output.append(dict(zipper))\n return output", "def dlist(src):\n if isinstance(src, dict):\n for k in src:\n src[k] = dlist(src[k])\n if set(src) == set([str(k) for k in range(len(src))]):\n src = [src[str(k)] for k in range(len(src))]\n return src", "def convertToList(data):\n try:\n # Try to create a list from dictionary\n result = list(data.items())\n # Successfully created\n # Return the list\n return result\n except Exception as err:\n # Couldn't convert the dictionary to a list\n print(err)\n return None", "def dataframe_to_list(df: pandas.DataFrame) -> list:\n return json.loads(df.to_json(orient=\"records\"))", "def querySet_to_list(qs):\n return [dict(q) for q in qs]", "def _fit_result_to_list(r: Union[Mapping[str, float], None],\n no_offset: bool = False) -> List[float]:\n if r is None:\n return []\n ret = ([r[\"amplitude\"]] + list(r[\"center\"]) + list(r[\"sigma\"]) +\n [r[\"rotation\"]])\n if not no_offset:\n ret.append(r[\"offset\"])\n return ret", "def convert_from_numpy_to_list(data_as_np):\n evaluations = list(data_as_np['evaluations'])\n\n if data_as_np['var_noise'] is not None:\n var_noise = list(data_as_np['var_noise'])\n else:\n var_noise = []\n\n n_points = len(evaluations)\n points = [list(data_as_np['points'][i, :]) for i in xrange(n_points)]\n\n data = {}\n data['points'] = points\n data['var_noise'] = var_noise\n data['evaluations'] = evaluations\n return data", "def _ax_tolist(ax: object) -> list:\n if isinstance(ax, list):\n pass\n elif isinstance(ax, dict):\n axlist = ax.keys()\n ax = [ax for ax in ax[axlist]]\n elif isinstance(ax, np.ndarray):\n ax = ax.tolist()\n elif isinstance(ax, matplotlib.axes.Axes): # a single axes from a subplot\n ax = [ax]\n return [a for a in ax if a is not None]", "def dict_values(d):\n return list(d.values())", "def clean_arrays_dict(arrays_dict):\n for k in arrays_dict.keys():\n volumes_list = arrays_dict[k]\n arrays_dict[k] = [convert_Volume_to_slices(v) for v in volumes_list]", "def test_convert_dicts_to_teradata_rows_returns_empty_list():\n data = []\n output = row_handling.convert_dicts_to_teradata_rows(data)\n assert output == []", "def _as_list(arr):\n if not isinstance(arr, (list, tuple)):\n return [arr]\n return arr", "def listAxes(axd):\n if type(axd) is not dict:\n if type(axd) is list:\n return axd\n else:\n print(\"listAxes expects dictionary or list; type not known (fix the code)\")\n raise\n axl = [axd[x] for x in axd]\n return axl", "def _data_list(json: 'a json'):\n data = json['Time Series (Daily)']\n return list(data.items())", "def dict2list(d: dict) -> list:\n\n d = d if d else {}\n flattened_list = []\n for k, v in d.items():\n flattened_list.append(k)\n if isinstance(v, str):\n flattened_list.extend(v.split())\n else:\n flattened_list.append(str(v))\n\n return flattened_list", "def Mongodb_to_list(res):\n\n ep_list = []\n for i in res:\n ep = (str(i[\"obj1\"]), str(i[\"obj1_type\"]), str(i[\"obj2\"]), \\\n str(i[\"obj2_type\"]), str(i[\"spatial_relation\"]), \\\n int(i[\"start_frame\"]), int(i[\"end_frame\"]))\n ep_list.append(ep)\n return ep_list", "def deserialize_to_lists_of_list_of_immutable_maps(values):\n return tuple([deserialize_to_list_immutable_maps(value) for value in values])", "def _extract_data_points_from_series(series: dict) -> List[dict]:\n data_points = series[\"generic:Obs\"]\n if type(data_points) != list:\n data_points = [data_points]\n return data_points", "def __dic2list(self,dic):\n return [(x,dic[x]) for x in dic]", "def _sequences_to_new_records(sequences):\n if isinstance(sequences, dict):\n sequences = list(sequences.items())\n records = []\n for seq in sequences:\n if hasattr(seq, \"id\"):\n records.append(deepcopy(seq))\n else:\n name, seq = seq\n records.append(\n sequence_to_biopython_record(seq, id=name, name=name)\n )\n return records", "def dict_to_list(dict: Dict, name_key: str = \"name\", value_key: str = \"value\"):\n if not is_dict(dict):\n raise AttributeError(\n \"Argument must be a dictionary, invalid argument received '{}'.\".format(\n dict\n )\n )\n\n list = []\n\n for key, val in dict.items():\n list.append({name_key: key, value_key: val})\n\n return list", "def convert_data(df):\n print(\"Converting history...\")\n return [ dict(row) for i, row in df.iterrows() ]", "def _sdk_object_to_list(object):\n result_list = []\n for item in object:\n result_list.append(_get_sdk_object_dict(item))\n return result_list", "def _decode_list(\n data_type, obj, alias_validators, strict, old_style, for_msgpack):\n if not isinstance(obj, list):\n raise bv.ValidationError(\n 'expected list, got %s' % bv.generic_type_name(obj))\n return [\n _json_compat_obj_decode_helper(\n data_type.item_validator, item, alias_validators, strict,\n old_style, for_msgpack)\n for item in obj]", "def attrval_as_list(attrdict, key):\n if key not in attrdict:\n return []\n val = attrdict[key]\n if not isinstance(val, list):\n val = [val]\n return val", "def dict_of_lists_to_list_of_dicts(d):\n key_value_pairs = dict_to_lists(d)\n return map(dict, itertools.product(*key_value_pairs))", "def _deserialize_array(self, schema, annotated_datum):\n if not isinstance(annotated_datum, list):\n raise AvroTypeException(schema, annotated_datum)\n if annotated_datum is None:\n raise AvroTypeException(schema, annotated_datum)\n deserialize = functools.partial(self._deserialize_data, schema.items)\n return list(map(deserialize, annotated_datum))", "def key_value_list(d):\n if not isinstance(d, dict) and not isinstance(d, list):\n return []\n\n key_values = []\n\n if isinstance(d, list):\n for entry in d:\n if isinstance(entry, dict):\n key_values.extend(key_value_list(entry))\n else:\n for k, v in d.items():\n if k is None or v is None:\n continue\n\n key_values.append((k, v))\n key_values.extend(key_value_list(v))\n\n return key_values", "def key_value_list(d):\n if not isinstance(d, dict) and not isinstance(d, list):\n return []\n\n key_values = []\n\n if isinstance(d, list):\n for entry in d:\n if isinstance(entry, dict):\n key_values.extend(key_value_list(entry))\n else:\n for k, v in d.items():\n if k is None or v is None:\n continue\n\n key_values.append((k, v))\n key_values.extend(key_value_list(v))\n\n return key_values", "def __convert_data_to_list_of_dict__(self, data):\n jsons = list()\n for row in data:\n json_for_row = dict(zip(self.__fieldnames__, row))\n jsons += [json_for_row]\n return jsons", "def _convert_values_to_correct_datatypes(d: dict):\n for key, value in d.items():\n if isinstance(value, dict):\n __class__._convert_values_to_correct_datatypes(value)\n elif isinstance(value, list):\n d[key] = [__class__._convert_value_to_correct_datatype(item) for item in value]\n else:\n d[key] = __class__._convert_value_to_correct_datatype(value)", "def hydrate_list(self, values):\n assert isinstance(values, list)\n for i, value in enumerate(values):\n if isinstance(value, (list, dict, Structure)):\n values[i] = self.hydrate_object(value)\n return values", "def map_res(val):\n if isinstance(val, (pa.Table, pa.Array, pa.ChunkedArray)):\n return val.to_pylist()\n if isinstance(val, pa.Scalar):\n return val.as_py()\n return val", "def to_listlist(list_dict):\n\n # Get headers\n headers = [key for key in list_dict[0]]\n table = [headers] # append headers\n\n # Get data\n for dct in list_dict:\n # Store values in temporary list\n temp_lst = []\n\n # Fetch values inside dictionary\n for ikey in dct:\n temp_lst.append(dct[ikey])\n\n # Add tempt list to table container\n table.append(temp_lst)\n\n return table", "def dict_to_coord_lists(dictionary):\n\tkey_list = []\n\tvalue_list = []\n\n\tfor key in dictionary:\n\t\tkey_list.append(key)\n\t\tvalue_list.append(dictionary[key])\n\n\treturn [key_list, value_list]", "def convert(data):\n return {k: [d[k] for d in data] for k in data[0].keys()}", "def primitivas_np2list(primitivas):\n for primitiva in primitivas:\n for key, value in primitiva.items():\n if key[:3]==\"pos\" or key[:3]==\"mag\" or key[:3]==\"dif\":\n try:\n value_temp = value.tolist()\n except AttributeError:\n # pass\n continue\n else:\n primitiva[key] = value_temp \n\n return primitivas", "def array_of_dicts_from_map(map, remove_fields=None, sub_values=None):\n # It's faster to do this once ahead of the dataset instead of redoing it every call\n remove_fields = make_set_of_field_names(remove_fields)\n sub_values = make_set_of_field_names(sub_values)\n return [x if not isinstance(x, declarative_base())\n else dict_from_row(x, remove_fields, sub_values)\n for x in map.values()]", "def dictToList(dict):\n\treturn map(lambda x: x[0], dict.iterkeys())\n\t# [x[0] for x in dict.iterkeys()]", "def flatten(x: Any) -> List[Any]:\n l: List[Any] = []\n if isinstance(x, torch.Size):\n l.append(x)\n elif isinstance(x, dict):\n # sorted(x.items(), key=lambda t: t[0])\n for y in x.values():\n l.extend(flatten(y))\n elif isinstance(x, list) or isinstance(x, set) or isinstance(x, tuple):\n for y in x:\n l.extend(flatten(y))\n else:\n l.append(x)\n return l", "def validate_to_python(self, value):\n super(ListOfDictField, self).validate(value)\n if value == None:\n return []\n if not isinstance(value, (list, tuple)):\n raise ValidationError('Must be a list or tuple, got {0}'.format(type(value).__name__))\n cleaned = []\n for index, dct in enumerate(value):\n if not isinstance(dct, dict):\n raise ValidationError('Item {0}: Must be a list of dicts, got {1}'.format(index, type(value)))\n form = self.Form(dct)\n if form.is_valid():\n cleaned.append(form.cleaned_data)\n else:\n errors = form.errors.as_text()\n raise ValidationError('Item {0}: Invalid format:\\n{1}'.format(index, errors))\n return cleaned", "def _to_list(obj):\n if not isinstance(obj, list):\n return [obj]\n else:\n return obj", "def transform_series(obj):\n vals = obj.values\n return transform_array(vals)", "def _get_serieses(parsed_response: dict) -> list:\n serieses = parsed_response[\"message:GenericData\"][\"message:DataSet\"][\"generic:Series\"]\n if type(serieses) != list:\n serieses = [serieses]\n return serieses", "def dict_flatten(*args):\n hold = []\n for a in args:\n hold.append([i for s in a.values() for i in s])\n return hold", "def dict_to_lists(d):\n\n def get_generator(k, l):\n return ((k, li) for li in l)\n\n for k, l in d.items():\n yield get_generator(k, l)", "def deserialize_to_list_immutable_maps(value):\n return tuple([immutables.Map(i) for i in value])", "def array_of_dicts_from_array_of_keyed_tuples(keyed_tuples):\n return [x._asdict() for x in keyed_tuples]", "def _to_pylist(self):\r\n\t\tpylist = []\r\n\t\tdef record_values(i, list):\r\n\t\t\tpylist.append(list._value)\r\n\t\t\treturn True\r\n\t\tself._traverse(record_values)\r\n\t\treturn pylist", "def _separate_raw_data(self, raw_data):\n for key, value in raw_data.items():\n if type(value) == dict:\n self.data_dict[key] = value\n elif type(value) == list:\n self.data_list[key] = value", "def _dict_to_row(val_in):\n out = []\n\n # keep order\n keys = sorted(val_in.keys())\n for k in keys:\n v = val_in[k]\n if not isinstance(v, dict):\n out.append((k, v,))\n else:\n sub_out = _dict_to_row(v)\n for item in sub_out:\n out.append((f'{k}.{item[0]}', item[1],))\n return out", "def to_key_val_list(value):\n if value is None:\n return None\n\n if isinstance(value, (str, bytes, bool, int)):\n raise ValueError('cannot encode objects that are not 2-tuples')\n\n if isinstance(value, collections.Mapping):\n value = value.items()\n\n return list(value)", "def _listofdict_to_df(self, osw_dict=None): \n if type(osw_dict) is not dict:\n raise ValueError(\"The 'osw_dict' arg is invalid!\")\n \n frame = pd.DataFrame.from_dict(osw_dict, orient='columns')\n \n return frame", "def as_array(value):\n\tif not isinstance(value, list):\n\t\treturn [value]\n\treturn value", "def as_list(obj):\n return obj if isinstance(obj, list) else [obj]", "def serialize_ndarrays(d):\n def dict_handler(d):\n return d.items()\n\n handlers = {list: enumerate, tuple: enumerate,\n set: enumerate, frozenset: enumerate,\n dict: dict_handler}\n\n def serialize(o):\n for typ, handler in handlers.items():\n if isinstance(o, typ):\n for key, val in handler(o):\n if isinstance(val, np.ndarray):\n o[key] = val.tolist()\n else:\n o[key] = serialize_ndarrays(o[key])\n return o\n\n return serialize(d)", "def result2list(foo):\n if isinstance(foo, ParseResults):\n return [result2list(bar) for bar in foo]\n else:\n return foo", "def jsonify(data):\n\n for key in data:\n if type(data[key]) == numpy.ndarray:\n data[key] = data[key].tolist()\n\n if isinstance(data[key], list):\n data[key] = [0 if isinstance(x, float) and math.isnan(x) else x for x in data[key]]\n\n return data", "def toFieldValue(self, value):\n if value is self.field.missing_value:\n return value\n else:\n data = []\n for dict_ in value:\n new_dict = {}\n for key, value in dict_.items():\n if isinstance(value, list):\n new_dict[safe_unicode(key)] = \\\n [safe_unicode(x) for x in value]\n else:\n new_dict[safe_unicode(key)] = safe_unicode(value)\n data.append(new_dict)\n return data", "def _as_list(value):\n if not isinstance(value, list):\n value = [value]\n return value", "def _json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "def flatten(x):\n result = []\n for el in x:\n if isinstance(x, collections.Iterable) and not (isinstance(el, str)|isinstance(el, dict)):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result", "def serialize_list(data):\n new_data = []\n for value in data:\n if isinstance(value, list):\n new_data.append(serialize_list(value))\n elif isinstance(value, dict):\n new_data.append(serialize_dict(value))\n elif isinstance(value, int) or isinstance(value, float) or isinstance(value, str) or isinstance(value, bool):\n new_data.append(value)\n else:\n return []\n\n return new_data", "def dict_to_array(self, d):\n n_fit_p = len(self.fit_parameters)\n n_nui_p = len(self.nuisance_parameters)\n n_wc = len(self.fit_wc_names)\n arr = np.zeros(n_fit_p + n_nui_p + n_wc)\n arr[:n_fit_p] = [d['fit_parameters'][p] for p in self.fit_parameters]\n arr[n_fit_p:n_fit_p+n_nui_p] = [d['nuisance_parameters'][p] for p in self.nuisance_parameters]\n arr[n_fit_p+n_nui_p:] = [d['fit_wc'][c] for c in self.fit_wc_names]\n return arr", "def _deep_list(array_like):\n if isinstance(array_like, (list, tuple)):\n return list(map(_deep_list, array_like))\n return array_like", "def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "def json_numpy_obj_hook(dct):\n if isinstance(dct, dict) and '__ndarray__' in dct:\n data = base64.b64decode(dct['__ndarray__'])\n return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])\n return dct", "def list_cast(inputs, dst_type):\n return iter_cast(inputs, dst_type, return_type=list)", "def values_as_sorted_list(dict):\n return [dict[k] for k in keys_as_sorted_list(dict)]", "def _deserialize_list(data, boxed_type):\n return [_deserialize(sub_data, boxed_type)\n for sub_data in data]", "def test_from_to_json_stat_as_list(self):\n\n results = pyjstat.from_json_stat(self.oecd_datasets)\n json_data = json.loads(pyjstat.to_json_stat(results),\n object_pairs_hook=OrderedDict)\n data_df = pyjstat.from_json_stat(json.loads(json.dumps(json_data),\n object_pairs_hook=\n OrderedDict))\n line_thirty = ['Unemployment rate', 'Belgium', 2009, 7.891892855]\n dimensions = pyjstat.get_dimensions(self.oecd_datasets['oecd'],\n 'label')\n self.assertTrue(len(data_df) == 2)\n self.assertTrue(set(data_df[0].columns.values[:-1]) ==\n set(dimensions[1]))\n self.assertTrue(set(data_df[0].iloc[30].values) ==\n set(line_thirty))", "def aslist(self):\n try:\n return [x.aslist() for x in self]\n except Exception:\n pass\n return [x for x in self]", "def sanitize_values(values: dict):\n for (key, value) in values.items():\n if isinstance(value, list):\n values.update({key: value[0]})" ]
[ "0.65203464", "0.6443881", "0.64112335", "0.62288743", "0.62244505", "0.62078065", "0.61272603", "0.60739225", "0.60704356", "0.60704356", "0.6049034", "0.5973778", "0.59609526", "0.5909848", "0.5888783", "0.5881044", "0.58610386", "0.5849552", "0.5837635", "0.5829828", "0.58158547", "0.58051956", "0.5730339", "0.570623", "0.5706019", "0.5698686", "0.5695913", "0.5691202", "0.56593823", "0.56589985", "0.5654475", "0.56536615", "0.5648872", "0.5632567", "0.5620599", "0.56180406", "0.5592172", "0.5590944", "0.55679846", "0.55679727", "0.5567744", "0.5542828", "0.5524577", "0.54959214", "0.5467995", "0.5464163", "0.54574764", "0.5435215", "0.5429585", "0.54248977", "0.54229283", "0.5394646", "0.5386811", "0.53832066", "0.5377045", "0.5377045", "0.5374323", "0.53647757", "0.53626335", "0.5360961", "0.534594", "0.53391826", "0.53360146", "0.5335318", "0.5329112", "0.5323292", "0.53180176", "0.531452", "0.5312857", "0.53081226", "0.5305652", "0.53020674", "0.52911264", "0.52819836", "0.5264074", "0.5263426", "0.5250189", "0.524245", "0.52231", "0.5219348", "0.5215111", "0.5214874", "0.5209499", "0.520608", "0.5205978", "0.52032787", "0.52003324", "0.51963973", "0.51882124", "0.51857173", "0.5173219", "0.51572436", "0.5152071", "0.5152071", "0.5152071", "0.514631", "0.5143827", "0.51426715", "0.5137652", "0.51364326", "0.51331013" ]
0.0
-1
Load an image from a specific path.
def parse_img(image_path): image = tf.read_file(image_path) image = tf.image.decode_image(image) image = tf.reshape(image, [INITIAL_RES, INITIAL_RES, 3]) image = tf.image.resize_images(image, [OUTPUT_RES, OUTPUT_RES]) #image = image[:, :, ::-1] # BGE -> RGB conversion if needed? #image = tf.image.rgb_to_grayscale(image) #image = tf.image.convert_image_dtype(image, tf.float32) # In neuralNet.py image = image.eval() # Convert from tensor to Numpy array for Keras return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(path) -> Image:\n return Image.open(path)", "def load_image(self, path):\n if path:\n self.original_image = cv2.imread(path, 1)\n self.prepare_images()", "def load_image(file_path):\r\n return Image.open(file_path)", "def load_image(path_to_image, image_name):\n print(\"Loading: \", path_to_image + image_name, \" ...\")\n return Image.open(path_to_image + image_name)", "def load(cls, path):\n assert os.path.exists(path), \"No such file: %r\" % path\n\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n\n image = Image(None)\n image._path = path\n image._format = Image.image_format(extension)\n\n return image", "def load(path):\n print(\"path\", path)\n print(Path(path).is_file())\n if Path(path).is_file():\n img = image.imread(path)\n print(f\"Loading image of dimensions {img.shape[0]} x \"\n f\"{img.shape[1]}\")\n return np.array(img)\n raise FileNotFoundError", "def load(path):\n img = plt.imread(path)\n dimensions = f\"{img.shape[0]} x {img.shape[1]}\"\n print(f\"Loaded image at {path} of dimensions {dimensions}\")\n return img", "def load_img(path):\n img = cv2.imread(path)\n return img", "def load_image(self):\n try:\n return Image.open(self._path, 'r')\n except IOError:\n messagebox.showerror(\"Error\", \"Wrong sprite file path!\")", "def loadImage(self, path: str) -> ndarray:\n try:\n self.img = np.asarray(Image.open(path))\n\n except FileNotFoundError:\n\n print(\"NO such File {}\".format(path))\n return None\n return self.img", "def load(image_path):\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n # Use skimage io.imread\n out = io.imread(image_path)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def read_image(path):\n img = misc.imread(path)\n return img", "def get_image_by_path(image_path, target_size=None):\n img = image.load_img(image_path, target_size=target_size)\n return img", "def load_image(image_path):\n # Case insenstive check of the image type.\n img_lower = image_path.lower()\n if (\n img_lower.endswith(\n \".jpg\",\n -4,\n )\n or img_lower.endswith(\n \".png\",\n -4,\n )\n or img_lower.endswith(\n \".jpeg\",\n -5,\n )\n ):\n try:\n image_data = cv2.imread(image_path)\n image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)\n config_utils.logger.info(\"img shape: '{}'.\".format(image_data.shape))\n except Exception as e:\n config_utils.logger.error(\n \"Unable to read the image at: {}. Error: {}\".format(image_path, e)\n )\n exit(1)\n elif img_lower.endswith(\n \".npy\",\n -4,\n ):\n image_data = load(image_path)\n else:\n config_utils.logger.error(\"Images of format jpg,jpeg,png and npy are only supported.\")\n exit(1)\n return image_data", "def read_image(path: str):\n return Image.open(path, mode=\"r\")", "def load_image(path: str):\n if path.endswith('.npy'):\n return np.load(path)\n if path.endswith(('.nii', '.nii.gz', '.hdr', '.img')):\n import nibabel as nib\n return nib.load(path).get_data()\n if path.endswith('.tif'):\n from PIL import Image\n with Image.open(path) as image:\n return np.asarray(image)\n\n raise ValueError(f\"Couldn't read image from path: {path}.\\n\"\n \"Unknown file extension.\")", "def read_img(path):\r\n if os.path.isfile(path):\r\n return cv2.imread(path)\r\n else:\r\n raise ValueError('hiiiiiiiiii')", "def load_image(image_path):\n image = io.imread(image_path)\n io.imshow(image)\n io.show()\n print(\"Size of the image is {} KB\".format(round(os.path.getsize(image_path)/1024,2)))\n return image", "def load_image(self, filename):\n return pygame.image.load(os.path.join('images', filename))", "def load(self):\n logger.debug(f\"Reading {self.path.name}\")\n self.label = int(Data.fromLabel(self.path.parent.name))\n self.image = skimg.data.imread(self.path)", "def imread(path):\n img = cv2.imread(path)\n return img", "def load_img(path: str) -> np.ndarray:\n \n return np.array(Image.open(path))", "def imgLoad(path, gray=False):\n\tif gray:\n\t\treturn to_tensor(Image.open(path).convert('L'))[None,...]\n\treturn to_tensor(Image.open(path))[None,...]", "def imgLoad(path, gray=False):\n\tif gray:\n\t\treturn to_tensor(Image.open(path).convert('L'))[None,...]\n\treturn to_tensor(Image.open(path))[None,...]", "def get_image_from_file(path):\n try:\n img = Image.open(path)\n return img\n except IOError as e:\n print e\n return None", "def pil_loader(path):\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n sqrWidth = np.ceil(np.sqrt(img.size[0]*img.size[1])).astype(int)\n return img.convert('L').resize((sqrWidth, sqrWidth))", "def _load_image(path):\r\n image = Image.open(path)\r\n size = image.size\r\n \r\n image = image.resize((550,550), Image.ANTIALIAS)\r\n# image = image.thumbnail((200,200), Image.ANTIALIAS)\r\n return image", "def load_image(self, path):\n\n image = cv2.imread(path) / 255\n h, w, _ = image.shape\n image = cv2.resize(image, (self.input_size, self.input_size))\n nh, nw, _ = image.shape\n return image, (nh/h, nw/w)", "def _load_image(file: str) -> pyglet.image.AbstractImage:\n\n return pyglet.image.load(Config.RES_DIR + \"img\" + Config.FILE_SEPARATOR + file)", "def __load(self, node, path):\n\n self.firstgid = node['firstgid']\n self.margin = node['margin']\n self.spacing = node['spacing']\n\n # convierte la ruta de la imagen en una ruta relativa al proyecto\n directory = os.path.dirname(path)\n self.image_path = os.path.join(directory, *node['image'].split(r'\\/'))\n self.image_path = os.path.normpath(self.image_path)", "def test_load_jpg():\n parameters = {'path': 'green-dot.jpg'}\n\n images.load(parameters)", "def load_image(data_dir, image_file):\n image_path = os.path.join(data_dir, image_file)\n image = mpimg.imread(image_path)\n return image", "def _load_img(self, img_path):\n img = Image.open(img_path).convert('RGB')\n\n if self.use_landmarks:\n landmarks = np.array(self.landmarks[img_path[img_path.rfind('/')+1:]]).reshape(-1)\n img = FivePointsAligner.align(np.array(img), landmarks, show=False)\n img = Image.fromarray(img)\n\n if self.transform is None:\n return img\n\n return self.transform(img)", "def load(image_path):\n\tpil_image = Image.open(image_path).convert(\"RGB\")\n\t# convert to BGR format\n\timage = np.array(pil_image)[:, :, [2, 1, 0]]\n\treturn image", "def load(self, path):\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n return Costume(name, Image.load(path))", "def load_image(self, path, target_size=None):\n img = self.pil_image.open(path)\n if img.mode != 'RGB':\n img = img.convert('RGB')\n if target_size is not None:\n width_height_tuple = (target_size[1], target_size[0])\n if img.size != width_height_tuple:\n img = img.resize(width_height_tuple, self.pil_interpolation)\n return img", "def load_image(self, path, max_size=400, shape=None):\n\n if 'http' in path:\n response = requests.get(path)\n image = Image.open(BytesIO(response.content)).convert('RGB')\n else:\n image = Image.open(path).convert('RGB')\n \"\"\" Check image size \"\"\"\n if max(image.size) > max_size:\n size = max_size\n else:\n size = image.size\n\n if shape is not None:\n size = shape\n \"\"\" Transform image \"\"\"\n input_transform = transforms.Compose([transforms.Resize(size),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))])\n self = input_transform(image)[:3, :, :].unsqueeze(0)\n return self", "def imread(img_path):\n if not os.path.exists(img_path):\n raise ImageNotFoundError(f\"Image {img_path} could'nt be located\")\n\n img = cv2.imread(img_path)\n\n if img is None:\n raise InvalidImageError(f\"Image {img_path} could'nt be loaded\")\n\n return img", "def load_image(self, index):\n image_path = os.path.join(self.folder_path, self.image_ids[index] + '.jpg')\n img = Image.open(image_path).convert('RGB')\n if debug:\n print(\"Loaded image: \", image_path)\n return img", "def read_image(img_path):\n img = imageio.imread(uri=img_path)\n return img", "def load_image(self, image_id):\n # Load image\n path = self.image_info[image_id]['path']\n if path.endswith(\".png\" or \".jpg\"):\n image = skimage.io.imread(path)\n elif path.endswith(\".dcm\"):\n ds = pydicom.read_file(path)\n image = ds.pixel_array\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "def load(self, path, shape=(1024, 1024, 35), dtype='uint16'):\n valid_dtypes = ['uint8', 'uint16']\n if dtype not in valid_dtypes:\n raise ValueError('dtype should be either one of %s' % ', '.join(valid_dtypes))\n\n im = io.imread(path)\n im = numpy.rollaxis(im, 0, 3)\n\n if im.shape != shape and shape is not None:\n factors = tuple(map(lambda z: int(z[0] / z[1]), zip(im.shape, shape)))\n if any([f > 1 for f in factors]):\n # im = resize(im, shape, mode='constant')\n im = downscale_local_mean(im, factors=factors).astype(im.dtype)\n # if 'conf' in path.lower():\n else:\n warnings.warn('Target shape is not a multiple below initial shape')\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n if dtype == 'uint8' and im.dtype != numpy.uint8:\n im = img_as_ubyte(im)\n if dtype == 'uint16' and im.dtype != numpy.uint16:\n im = img_as_uint(im)\n\n self.image_raw = im\n self.name = path", "def load_image(self, image_path):\n # Load image\n image = cv2.imread(image_path)\n #TODO 如果是灰度图先转为RGB的\n # If grayscale. Convert to RGB for consistency.\n # if image.ndim != 3:\n # image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image\n pass", "def read_image(image_path):\n if not os.path.exists(image_path):\n raise IOError('File does not exist: %s' % image_path)\n else:\n return Image.open(image_path)", "def load_img(path, imsize, device):\n transform = transforms.Compose([\n transforms.Resize(imsize), # resize image\n transforms.ToTensor() # PIL image to Tensor\n ])\n img = Image.open(path)\n # fake batch dimension required to fit network's input dimensions\n img = transform(img).unsqueeze(0)\n return img.to(device, torch.float)", "def hload_pil(filepath):\n img = Image.open(filepath)\n return img", "def get_image(path):\n\n # Check if the picture exists or not.\n if not os.path.isfile(path):\n print('Cannot open the image. Please try again!')\n exit(1)\n\n try:\n # Open the image.\n image = Image.open(path)\n\n # If everything is okay return it.\n return image\n # If an error occurred.\n except Exception as err:\n print('Error occurred while trying to open the image:', err, 'Please try again!')\n exit(1)", "def load_single_image(path: str) -> np.uint8:\n if not os.path.exists(path):\n print(f\"Warning, try to load non-exist image {path}\")\n return None\n if path.endswith(\".npy\"):\n img = np.load(path)\n elif path.endswith(\".png\") or path.endswith(\".jpeg\") or path.endswith(\".jpg\"):\n img = plt.imread(path)\n if img.dtype != \"uint8\":\n img = (255 * img).astype(np.uint8)\n return img", "def load_image(self, path, convert_alpha=False):\n if convert_alpha:\n return load(self.get_path(path)).convert_alpha()\n return load(self.get_path(path)).convert()", "def load_image(cls, fullname):\n\t\ttry:\n\t\t\timage_stream = open(fullname, 'rb')\n\t\t\timage = pyglet.image.load(fullname, file=image_stream)\n\t\texcept IOError, message:\n\t\t\tprint 'Cannot load image:', fullname\n\t\t\traise ImageLoadFileIOError, message\n\t\treturn image", "def get_image(image_path):\r\n\r\n return Image.open(image_path)", "def load_image(self, **kwargs):\n ...", "def readImage(self, path, tt=1):\n return cv2.imread( path, tt)", "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n return image", "def load_image(img_file_name):\n file_name = os.path.join('.', 'images', img_file_name)\n img = pygame.image.load(file_name)\n img.convert()\n return img", "def pil_loader(path, color=True):\n imgExt = os.path.splitext(path)[1]\n if imgExt == \".npy\":\n img = np.load(path)[0]\n return np.swapaxes(np.swapaxes(img, 0, 2), 0, 1)\n\n # open path as file to avoid ResourceWarning\n # (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n if color:\n return img.convert('RGB')\n else:\n return img.convert('L')", "def set_image(self, path):\r\n \r\n image = self._load_image(path)\r\n self.image_raw = image\r\n self.image = ImageTk.PhotoImage(image)\r\n self.image_panel.configure(image=self.image)", "def loadImage(self, imagePath, customScaleFactor=None):\n\t\tif customScaleFactor: scaleFactor = customScaleFactor\n\t\telse: scaleFactor = self.IMAGESCALEUP\n\n\t\timg = pygame.image.load(imagePath)\n\t\timg = pygame.transform.scale(img, (img.get_width() * scaleFactor, img.get_height() * scaleFactor))\n\t\timg.convert_alpha()\n\t\treturn img", "def load_from_file(self, filename):\n\n loader = ImageLoader()\n loader.load(self, filename)", "def _load_image(path, filename, bits, mode):\n if filename.rsplit('.')[1].lower() == 'dcm':\n ds = pydicom.dcmread(os.path.join(path, filename))\n m = ('I;16' if bits == 16 else 'L') if mode == 'L' else 'RGB'\n image = Image.frombuffer(\n m, (ds.Columns, ds.Rows), ds.PixelData, 'raw', m, 0, 1)\n else:\n image = Image.open(os.path.join(path, filename)).convert(mode)\n return image", "def load_image(self, image_id):\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image", "def load_image_file(filename, mode='RGB'):\n return imread(filename, mode=mode)", "def load_single_image(image_path, dim=100):\n if not isinstance(image_path, str):\n img = Image.open(image_path)\n img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n img = preprocess_data(img, dim)\n else:\n img = cv2.imread(image_path, cv2.IMREAD_COLOR)\n img = preprocess_data(img, dim)\n\n img = np.array([img])\n\n return img", "def load_image(self, img_name):\n img_data = cv2.imread(img_name, 0)\n return img_data", "def loadImage(img_path):\n\n img = Image.open(img_path)\n np_img = np.array(img)\n return (np_img)", "def load(cls, path, name, **kwargs):\n path = Path(path)\n assert path.exists() and path.is_dir(), f\"Load location {path} doesnt exist.\"\n\n pickle_path = path / (name + \".pkl\")\n image_path = path / (name + \"_image.npy\")\n depths_path = path / (name + \"_depths.npy\")\n\n if pickle_path.is_file():\n with open(pickle_path, 'rb') as pickle_file:\n return dill.load(pickle_file)\n\n assert image_path.is_file(), \"_image.npy file must exist if pickle doesnt.\"\n img = np.load(image_path)\n\n if depths_path.is_file():\n kwargs[\"depths\"] = np.load(depths_path)\n else:\n assert (\n \"top\" in kwargs.keys() and \"base\" in kwargs.keys()\n ), \"Depth info needed.\"\n\n return cls(img, **kwargs)", "def load(path):\n pass", "def read_img(img_path:str) -> object:\n img = cv2.imread(img_path)\n return img", "def LoadImage(self, filename, mode):\n print(\"TODO: CHECK FOR >PNG?\")\n path = \"static/CVImages/\" + filename\n print(\" path \" + path)\n img = cv2.imread(path, mode) # 0 for black, 1 for rgb\n return img", "def load_image():\n return cv2.imread('test.png')\n pass", "def load(f, as_grey=False):\n use_plugin('pil')\n return imread(os.path.join(assets, f), as_grey=as_grey)", "def load_image(image_path):\n img_transforms = get_standard_img_transforms()\n image = Image.open(image_path)\n images = img_transforms(image).unsqueeze(0)\n return images", "def read_im(im_path):\n im = cv2.imread(im_path)\n return im", "def load_image(path, height, width, mode='RGB'):\n image = PIL.Image.open(path)\n image = image.convert(mode)\n image = np.array(image)\n # squash\n image = scipy.misc.imresize(image, (height, width), 'bilinear')\n return image", "def load_image(path, image_size, num_channels, interpolation,\n smart_resize=False):\n img = io_ops.read_file(path)\n img = image_ops.decode_image(\n img, channels=num_channels, expand_animations=False)\n if smart_resize:\n img = keras_image_ops.smart_resize(img, image_size,\n interpolation=interpolation)\n else:\n img = image_ops.resize_images_v2(img, image_size, method=interpolation)\n img.set_shape((image_size[0], image_size[1], num_channels))\n return img", "def imread(path):\n with open(path, 'rb') as f:\n with PIL.Image.open(f) as img:\n return img.convert('RGB')", "def load_image(path, feature=None, transform=None):\n\n img = mpimg.imread(path)\n\n if img.shape[0] != 50 or img.shape[1] != 50 or img.shape[2] != 3:\n return None\n\n if feature is not None:\n img = feature(img)\n else:\n img = img.reshape([-1])\n\n try:\n return img if transform is None else transform(img)\n except ValueError:\n return None", "def _load(f, as_gray=False):\n # importing io is quite slow since it scans all the backends\n # we lazy import it here\n from skimage.io import imread\n return imread(os.path.join(data_dir, f), plugin='pil', as_gray=as_gray)", "def loadImage(j, im, opts={}):\n displayMessage(j, \"j.Load(%s, ...)\" % im)\n j.Load(im, opts)\n waitStatus(j)", "def load(self, path):\n pass", "def load(self, path):\n pass", "def load_img(fname):\n img = cv2.imread(fname)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img", "def load_image(fname):\n return load_tiff(fname)", "def load(self, path: str):\n pass", "def _read_image(self, image_path:str, label:str):\n # Get the full path to the image\n image = \"\"\n if label == \"real\":\n image = os.path.join(self.root, \"real\", image_path)\n else:\n image = os.path.join(self.root, \"fake\", image_path)\n \n # Read the image\n image = cv2.imread(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Normalize the image\n image = image / 255.0\n\n # Convert the image to floating point to use it as\n # an input to the PyTorch model\n image = image.astype(np.float32)\n\n return image", "def imread(fname):\r\n return skimage.io.imread(fname)", "def test_load_fail():\n parameters = {'path': 'foo.bar'}\n\n images.load(parameters)", "def load(filename, flag=None):\n\tif flag is None:\n\t\timg = cv2.imread(filename)\n\telif flag is 1 or flag is 0 or flag is -1:\n\t\timg = cv2.imread(filename, flag)\n\telse:\n\t\tprint \"ERROR: Load: Incorrect flag parameter: \" + str(flag) + \"\\n\"\n\t\tsys.exit()\n\tif img is None:\n\t\tprint \"ERROR: Load: Image not found/supported at: \" + str(filename) + \"\\n\"\n\t\tsys.exit()\n\telse:\n\t\treturn img", "def load(path):\n if path.startswith('gs://'):\n return _gcs_load(path)\n if path.startswith('file://'):\n return _file_load(path)\n raise ValueError('Unknown URI: {}'.format(path))", "def load_image(self, image_id):\n # Load image\n# print(self.image_info[image_id]['path'])\n image = cv2.imread(self.image_info[image_id]['path'],cv2.IMREAD_GRAYSCALE) \n image = image[:,:, np.newaxis] #Add 1 dimension for grayscale images\n return image", "def read(path: Union[Path, str]) -> np.ndarray:\n return _reader.imread(str(path))", "def load_image(infilename):\n data = mpimg.imread(infilename)\n return data", "def load_image(path, preprocess=True):\n x = image.load_img(path, target_size=(H, W))\n if preprocess:\n x = image.img_to_array(x)\n x = np.expand_dims(x, axis=0)\n x = x / 255.0\n return x", "def read_image(image_path, *args, **kwargs):\n # TODO: Implement the method\n image2 = Image.open(image_path)\n image = num.asarray(image2)\n\n return image", "def load_img(path, grayscale=False, target_size=None):\n img = io.imread(path, grayscale)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if target_size:\n img = cv2.resize(img, (target_size[1], target_size[0]))\n return img", "def image_load(path) -> numpy.ndarray:\n # file\n na = numpy.array(Image.open(path))\n # fix shape\n na = numpy.moveaxis(na, [2,0,1], [0,1,2])\n # shape is now (3,h,w), add 1\n na = na.reshape(1,3,na.shape[1],na.shape[2])\n # change type\n na = na.astype(\"float32\") / 255.0\n return na", "def load_image(self, image_id):\n info = self.image_info[image_id]\n # bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n # image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n # image = image * bg_color.astype(np.uint8)\n # for shape, color, dims in info['shapes']:\n # image = self.draw_shape(image, shape, dims, color)\n\n width, height = info['width'], info['height']\n\n if info['real']:\n # load image from disk\n impath = os.path.join(self.real_image_dirpath, info['real_image_path'])\n image = cv2.imread(impath,1)\n image = cv2.resize(image, (width, height), cv2.INTER_CUBIC)\n else:\n # synthesize image\n background_path = info['background_image_path']\n card_template_path = info['card_template_path']\n cornerpoints = info['cornerpoints']\n image = self.synthesize_image(card_template_path, background_path, cornerpoints, (width, height))\n return image", "def LoadPicture(filename):\n return Bitmap(filename)", "def image(self, label, fname):\n if not os.path.exists(fname):\n raise OptionError(\"%s - no such file or directory\" % label)\n try:\n return Image(fname)\n except:\n raise OptionError(\"%s - invalid image file\" % label)", "def _open_image(self, path):\n return cv.imread(path, 1)\n # .astype(float)", "def load_image(filename):\n rgb = imread(filename)\n return UncertainImage(rgb)" ]
[ "0.82594067", "0.8017533", "0.7872266", "0.7861384", "0.7613008", "0.7607398", "0.7584714", "0.75568944", "0.7168932", "0.7154638", "0.70918506", "0.70681524", "0.6985307", "0.6985266", "0.69554704", "0.6927727", "0.6898096", "0.68906724", "0.6875575", "0.686947", "0.68326443", "0.6797013", "0.676368", "0.676368", "0.67557645", "0.6718003", "0.6703009", "0.6700429", "0.6689615", "0.6640667", "0.6631473", "0.66170883", "0.6603487", "0.65988415", "0.6586127", "0.65788954", "0.6556623", "0.6556352", "0.65405035", "0.6532397", "0.65269494", "0.6525676", "0.6509119", "0.650572", "0.6473177", "0.6455351", "0.6446769", "0.6444735", "0.64400506", "0.6436912", "0.6425188", "0.6413746", "0.6400422", "0.63983655", "0.6389206", "0.63834536", "0.63585436", "0.63492996", "0.63327533", "0.63320553", "0.63184315", "0.63129914", "0.630506", "0.6289355", "0.6279255", "0.6278955", "0.6268993", "0.6263604", "0.6242829", "0.6239042", "0.6235491", "0.62048", "0.6197211", "0.61832064", "0.61669135", "0.6165911", "0.6161506", "0.61585534", "0.6155442", "0.6150397", "0.6150397", "0.6146495", "0.6145273", "0.61047775", "0.6090749", "0.6089348", "0.6086355", "0.60502195", "0.60479796", "0.60465664", "0.60326076", "0.60325485", "0.60240763", "0.601112", "0.6009341", "0.5998713", "0.59976864", "0.5994883", "0.59894794", "0.59894127", "0.5988988" ]
0.0
-1
Loads all images from ../img folder, split into training and test data.
def load_data(train_test_ratio = 0.8, class_range = 8, randomised = True): # Get image filenames, labels, and the number of classification classes filenames = glob.glob("../img/*.png") if randomised: random.shuffle(filenames) img_labels = [] for filename in filenames: label = int(filename.split("-d",1)[1].split('-',1)[0]) label = max(0, (label - 1) // (class_range)) img_labels.append(label) num_classes = max(img_labels) + 1 # E.g. max label 5 -> 0-5 inclusive num_total_samples = len(filenames) num_train_samples = int(num_total_samples * train_test_ratio) num_test_samples = num_total_samples - num_train_samples training_images = np.empty( (num_train_samples, OUTPUT_RES, OUTPUT_RES, 3), dtype='uint8' ) training_labels = np.asarray(img_labels[:num_train_samples], dtype='uint8') for i in range(0, num_train_samples): training_images[i] = parse_img(filenames[i]) test_images = np.empty( (num_test_samples, OUTPUT_RES, OUTPUT_RES, 3), dtype='uint8' ) test_labels = np.asarray(img_labels[num_train_samples:], dtype='uint8') for i in range(0, num_test_samples): test_images[i] = parse_img(filenames[i + num_train_samples]) return ((training_images, training_labels), (test_images, test_labels), num_classes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_images(test_data_dir, image_size = (300, 300)):\n # loop over the input images\n images_data = []\n labels = []\n imagePaths = sorted(list(paths.list_images(test_data_dir)))\n for imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, image_size)\n image = img_to_array(image)\n images_data.append(image)\n\n # extract the class label from the image path and update the\n # labels list\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\n return images_data, sorted(labels)", "def load_scraped_food_images(ROOT):\n Xtr, Ytr = load_food_image_batch(os.path.join(ROOT, 'train'),50000)\n Xte, Yte = load_food_image_batch(os.path.join(ROOT, 'test'),10000)\n return Xtr, Ytr, Xte, Yte", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n # all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n # test_files = [all_files[idx] for x in np.random.choice(len(all_files), 200, replace=False)]\n # for filepath in test_files:\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n existing_dirs = [os.path.basename(dir) for dir in os.listdir(FLAGS.output_dir)]\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.JPEG')):\n with tf.gfile.Open(filepath, 'rb') as f:\n image = np.array(Image.open(f).resize([FLAGS.image_height, FLAGS.image_width]).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n if os.path.basename(os.path.normpath(input_dir))=='*':\n head, tail = os.path.split(filepath)\n dirname=os.path.basename(head)\n if dirname in existing_dirs:\n continue\n filename = os.path.join(dirname, tail)\n else:\n filename = os.path.basename(filepath)\n filenames.append(filename)\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def load_data_in_folder(self):\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in range(0, idx_max-1):\n data = []\n for f in self.filenames[idx:idx+64]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n if(FLAGS.checkpoint_file_name==\"vgg_16.ckpt\")or(FLAGS.checkpoint_file_name==\"vgg_19.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_50.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_101.ckpt\")or(FLAGS.checkpoint_file_name==\"resnet_v1_152.ckpt\"):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float)\n images[idx, :, :, :] = image\n else:\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(self, target = \"standard\", path=OMNIGLOT_DATAPATH):\n X = []\n Y = []\n folderName = {}\n if target == \"standard\":\n trainFolders = [\"images_background\"]\n testFolders = [\"images_evaluation\"]\n elif target == \"minimal\":\n trainFolders = [\"images_background_small1\", \"images_background_small2\"]\n testFolders = [\"images_evaluation\"]\n \n if self.train:\n for trainFolder in trainFolders:\n folderPath = os.path.join(path, trainFolder)\n imgAllCount = 0 # this is counted for the whole images in all alphabet\n chaAllCount = 0 # this is counted for the whole characters in all alphabet\n\n for alphabet in sorted(os.listdir(folderPath)):\n alphabetPath = os.path.join(folderPath, alphabet)\n folderName[alphabet] = {'totalChar': 0, 'charIndex': [], 'totalImg': 0, 'imgIndex': []}\n \n imgAlphabetCount = 0 # this is counted for the number of images in this alphabet\n chaAlphabetCount = 0 # this is counted for the number of character in this alphabet\n\n folderName[alphabet]['charIndex'].append(chaAllCount)\n folderName[alphabet]['imgIndex'].append(imgAllCount)\n \n for letter in sorted(os.listdir(alphabetPath)):\n letterPath = os.path.join(alphabetPath, letter)\n \n for letterImage in os.listdir(letterPath):\n imagePath = os.path.join(letterPath, letterImage)\n image = mpimg.imread(imagePath)\n X.append(image)\n Y.append(chaAllCount)\n \n imgAlphabetCount += 1\n imgAllCount += 1\n\n chaAlphabetCount += 1\n chaAllCount += 1\n \n folderName[alphabet]['totalChar'] = chaAlphabetCount\n folderName[alphabet]['totalImg'] = imgAlphabetCount\n folderName[alphabet]['charIndex'].append(chaAllCount-1)\n folderName[alphabet]['imgIndex'].append(imgAllCount-1)\n \n X = np.stack(X) \n X = X.reshape(-1, IMAGES_PER_CHARACTER, X.shape[1], X.shape[2])\n return X, np.stack(Y), folderName", "def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels", "def _preload_all_samples(self):\n if self.mode in ['train_noval', 'train_with_val']:\n\n self._images_train, self._labels_train = [], []\n desc = \"Loading train image pairs & flows\"\n with tqdm(total=len(self._img_trn_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_trn_path):\n pbar.update(1)\n label_path = self._lbl_trn_path[n]\n image, label = self._load_sample(image_path, label_path)\n self._labels_train.append(label)\n self._images_train.append(image)\n\n if self.mode == 'train_with_val':\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n if self.opts['tb_test_imgs'] is True:\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))\n\n elif self.mode in ['val', 'val_notrain']:\n\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n elif self.mode == 'test':\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 1.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def load_images(self, image_path):\n X_train = []\n\n # Load all files from the image path using Image.open.\n for i in recursive_list(image_path):\n # Open images as ???\n img = Image.open(i)\n # Convert to NP array.\n img = np.asarray(img)\n # Append them into higher order array.\n if img.shape == (128, 128, 3):\n X_train.append(img)\n\n # return all the images concatenated as a 4D array\n return np.asarray(X_train)", "def load_images(input_dir, batch_shape=[2000,299,299,3]):\n \n filenames = []\n idx = 0\n filepaths=tf.gfile.Glob(os.path.join('./', '*.png'))\n print(len(filepaths))\n print(filepaths)\n batch_shape[0]=len(filepaths)\n batch_size = batch_shape[0]\n print(batch_shape)\n print(\"ZZZ\")\n images = np.zeros(batch_shape, dtype=np.float32)\n \n for filepath in filepaths:\n# with tf.gfile.Open(filepath) as f:\n# image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n image = np.array(scipy.misc.imresize(scipy.misc.imread(filepath),(299,299)),dtype=np.float32)/255\n \n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image -0.5 #* 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n return filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n return filenames, images", "def load_images(input_dir, batch_shape):\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n if idx > 0:\n yield filenames, images", "def load_dataset(data_dir, img_size):\n global input_set\n global test_set\n\n imgs = []\n img_files = os.listdir(data_dir)\n for img in img_files:\n # try:\n tmp = scipy.misc.imread(data_dir + \"/\" + img)\n x, y, z = tmp.shape # shape : width * length * chanel\n coords_x = int(x / img_size) # 坐标\n coords_y = int(y / img_size) #\n coords = [(q, r) for q in range(coords_x) for r in range(coords_y)] # 列表 x * y\n for coord in coords:\n imgs.append((data_dir + \"/\" + img, coord)) # 为列表添加文件目录\n # except BaseException:\n # print(\"oops\")\n test_size = min(10, int(len(imgs) * 0.2))\n random.shuffle(imgs)\n test_set = imgs[:test_size]\n train_set_X = imgs[test_size:][:200]\n train_set = imgs[test_size:][200:400]\n return", "def load_images_train():\n\n global pool\n x_train = []\n x_train_id = []\n y_train = []\n x_shape = []\n start_time = time.time()\n\n print(\"Reading train images\")\n folders = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']\n #folders = ['new']\n for fld in folders:\n index = folders.index(fld)\n print('Loading folder {} (Index: {})'.format(fld, index))\n path = os.path.join('./train1', fld, '*.jpg')\n files = glob.glob(path)\n pool = multiprocessing.Pool(processes=8)\n for fl in files:\n flbase = os.path.basename(fl)\n img = cv2.imread(fl,cv2.IMREAD_COLOR)\n result_list = pool.map(process_image, [fl])\n x_train.append(result_list[0])\n x_train_id.append(flbase)\n y_train.append(index)\n #x_shape.append(shape)\n\n print('Read train data time: {} seconds'.format(round(time.time() - start_time, 2)))\n pool.close()\n return x_train, y_train, x_train_id", "def load_images(input_dir, batch_shape, vgg_batch_shape):\n ens_images = np.zeros(batch_shape)\n inc_images = np.zeros(batch_shape)\n tcd_images = np.zeros(batch_shape)\n vgg_images = np.zeros(vgg_batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):\n with tf.gfile.Open(filepath) as f:\n image = imread(f, mode='RGB')\n\n tcd_image = transcode(image).astype(np.float)\n image = image.astype(np.float)\n vgg_image = vgg_distort(tcd_image, vgg_batch_shape[1:3])\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n image = (image / 255.0) * 2.0 - 1.0\n ens_images[idx] = ens_distort(image)\n # Resize and mean subtract for VGG\n vgg_image -= np.array((_R_MEAN, _G_MEAN, _B_MEAN)).reshape((1, 1, 3))\n vgg_images[idx] = vgg_image\n inc_images[idx] = inc_distort(image)\n tcd_images[idx] = (tcd_image / 255.0) * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n idx += 1\n if idx == batch_size:\n yield filenames, ens_images, vgg_images, inc_images, tcd_images\n filenames = []\n idx = 0\n if idx > 0:\n yield filenames, ens_images, vgg_images, inc_images, tcd_images", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def get_train_test_loaders(self, num_workers=2):\n print('Loading the image data...')\n \n train_path_info, test_path_info = self.get_train_test_info()\n\n train_transform = transforms.Compose([transforms.Resize((self.image_width, self.image_height)),\n transforms.RandomAffine(10,translate=(0.1,0.1)),\n transforms.ToTensor()])\n\n test_transform = transforms.Compose([transforms.Resize((self.image_width, self.image_height)),\n transforms.ToTensor()])\n\n trainset = PocovidDataset(train_path_info, transform = train_transform)\n testset = PocovidDataset(test_path_info, transform = test_transform)\n \n self.class_map = trainset.get_class_map()\n self.classes = [self.class_map[key] for key in sorted(self.class_map)]\n\n train_loader = torch.utils.data.DataLoader(trainset, num_workers=num_workers, shuffle=True,\n batch_size=self.batch_size, drop_last=True)\n\n test_loader = torch.utils.data.DataLoader(testset, num_workers=num_workers, shuffle=True,\n batch_size=self.batch_size)\n \n print('Image data is loaded with fold {} as the test data'.format(self.fold))\n print('Number of training images:', len(trainset))\n print('Number of testing images:', len(testset))\n print('*'*100)\n print('The classes are:', self.classes)\n print('*'*100)\n \n return train_loader, test_loader", "def load_images(file):\n\timage_list = [] # List for storing all the images\n\ttargets = []\n\t\n\tfor filename in glob.glob(file + '/*.png'):\n\t\t# ==================\n\t\t# Reading the image\n\t\t# ==================\n\t\timage = scipy.misc.imread(filename).astype(np.float32)\n\t\t\n\t\t# ================================\n\t\t# Converting the image to a vector\n\t\t# ================================\n\t\timage = image.flatten() # (784, )\n\t\t\n\t\t# ==============================\n\t\t# Normalizing the image to numpy\n\t\t# ==============================\n\t\timage = image / 255.0\n\t\timage = image - 0.5\n\t\timage = image * 2.0\n\t\t\n\t\t# ===============================\n\t\t# Appending the image to the list\n\t\t# ===============================\n\t\timage_list.append(image)\n\t\t\n\t\t_, value = filename.split('\\\\')\n\t\t# print(value[0])\n\t\ttargets.append(int(value[0]))\n\t\n\timage_list = np.array(image_list)\n\ttargets = np.array(targets)\n\t\n\t# ================================================\n\t# \t\t\tShuffling the data\n\t# ================================================\n\timage_list, targets = shuffle(image_list, targets)\n\t\n\ttrain_images, test_images, train_targets, test_targets = split(image_list, targets)\n\treturn train_images, test_images, train_targets, test_targets", "def load_data(self):\n # make sure preprocessing is same as preprocessing as the network\n # reduce mean, and divide by a value to do scaling\n self.train_datagen = ImageDataGenerator(\n rescale=1./ 255,\n shear_range=0.05,\n rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)\n zoom_range=[0.9, 1.1], # Randomly zoom image\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n brightness_range=[0.8, 1.2],\n fill_mode='reflect',\n validation_split=0.2)\n\n self.test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n self.train_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"training\")\n\n self.validation_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"validation\")\n\n self.test_generator = self.test_datagen.flow_from_directory(\n self.test_dir,\n target_size=(224, 224),\n shuffle=False,\n batch_size=1,\n class_mode='categorical')", "def load_imagenet(directory):\n path_train, path_val = directory + '/ILSVRC2012_img_train', directory + '/ILSVRC2012_img_val'\n train_labels = os.listdir(path_train)\n train_data = []\n for label in train_labels:\n imgs_path = os.path.join(path_train, label)\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_labels = os.listdir(path_val)\n test_data = []\n for label in test_labels:\n imgs_path = os.path.join(path_val, label)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, imgs_path, img_name, img, imgs\n \n return train_data, train_labels, test_data, test_labels", "def load_data(path):\n # Training Images Details\n IMG_SIZE = 224 # Size of images used for training\n IMG_MEAN = [0.485, 0.456, 0.406] # image normalization mean\n IMG_SDEV = [0.229, 0.224, 0.225] # image normalization standard deviation\n\n # Training phases\n phases = ['train', 'valid', 'test']\n\n # Define data locations\n data_dir = {n: path + n for n in phases}\n\n # Define transforms for the training, validation, and testing sets\n data_transforms = {\n 'train':\n transforms.Compose([\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(IMG_SIZE),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)]),\n 'valid':\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(IMG_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)]),\n 'test':\n transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(IMG_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(IMG_MEAN, IMG_SDEV)])\n }\n\n # Load the datasets\n image_datasets = {n: datasets.ImageFolder(\n data_dir[n], transform=data_transforms[n])\n for n in phases}\n\n # Create the PyTorch dataloaders\n dataloaders = {n: torch.utils.data.DataLoader(\n image_datasets[n], batch_size=64, shuffle=True)\n for n in phases}\n\n # mapping of classes to training indices\n class_to_idx = image_datasets['train'].class_to_idx\n\n return dataloaders, class_to_idx", "def load_train_batch(self):\n def _parse_train_img(img_path):\n with tf.device('/cpu:0'):\n img_buffer = tf.read_file(img_path)\n image_decoded = tf.image.decode_jpeg(img_buffer)\n tgt_image, src_image_stack = \\\n self.unpack_image_sequence(\n image_decoded, self.img_height, self.img_width, self.num_source)\n return tgt_image, src_image_stack\n\n def _batch_preprocessing(stack_images, intrinsics, optional_data):\n intrinsics = tf.cast(intrinsics, tf.float32)\n image_all = tf.concat([stack_images[0], stack_images[1]], axis=3)\n\n if self.match_num == 0: # otherwise matches coords are wrong\n image_all, intrinsics = self.data_augmentation(\n image_all, intrinsics, self.img_height, self.img_width)\n tgt_image = image_all[:, :, :, :3]\n src_image_stack = image_all[:, :, :, 3:]\n intrinsics = self.get_multi_scale_intrinsics(intrinsics, self.num_scales)\n return tgt_image, src_image_stack, intrinsics, optional_data\n\n file_list = self.format_file_list(self.dataset_dir, 'train')\n self.steps_per_epoch = int(len(file_list['image_file_list'])//self.batch_size)\n\n input_image_names_ph = tf.placeholder(tf.string, shape=[None], name='input_image_names_ph')\n image_dataset = tf.data.Dataset.from_tensor_slices(\n input_image_names_ph).map(_parse_train_img)\n\n cam_intrinsics_ph = tf.placeholder(tf.float32, [None, 3, 3], name='cam_intrinsics_ph')\n intrinsics_dataset = tf.data.Dataset.from_tensor_slices(cam_intrinsics_ph)\n\n datasets = (image_dataset, intrinsics_dataset, intrinsics_dataset)\n if self.read_pose:\n poses_ph = tf.placeholder(tf.float32, [None, self.num_source+1, 6], name='poses_ph')\n pose_dataset = tf.data.Dataset.from_tensor_slices(poses_ph)\n datasets = (image_dataset, intrinsics_dataset, pose_dataset)\n if self.match_num > 0:\n matches_ph = tf.placeholder(tf.float32, [None, self.num_source, self.match_num, 4], name='matches_ph')\n match_dataset = tf.data.Dataset.from_tensor_slices(matches_ph)\n datasets = (image_dataset, intrinsics_dataset, match_dataset)\n\n all_dataset = tf.data.Dataset.zip(datasets)\n all_dataset = all_dataset.batch(self.batch_size).repeat().prefetch(self.batch_size*4)\n all_dataset = all_dataset.map(_batch_preprocessing)\n iterator = all_dataset.make_initializable_iterator()\n return iterator", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def load_data(data_dir):\n\n # Initiate lists\n images = []\n labels = []\n\n main_dir = os.path.abspath(os.curdir)\n\n for i in range(NUM_CATEGORIES):\n os.chdir(os.path.join(data_dir, str(i))) # Open directory i\n dir_images = os.listdir() # Create a list of all images in directory\n\n for j in range(len(dir_images)):\n image = cv2.imread(dir_images[j]) # Read image from file\n image = tf.keras.preprocessing.image.img_to_array(image) # Transform image to numpy array\n image = tf.image.resize(image, (IMG_WIDTH, IMG_HEIGHT)) # Reshape image to 30 x 30 px\n image = image/255 # Normalize image RGB values\n images.append(image) \n labels.append(i)\n\n os.chdir(main_dir)\n \n return (images, labels)", "def load_images_from_directory(input_dir, batch_shape):\n def input_filenames(input_dir):\n all_files = tf.gfile.Glob(os.path.join(input_dir, '*.png'))\n all_files.sort()\n return all_files\n\n\n images = np.zeros(batch_shape)\n filenames = []\n idx = 0\n batch_size = batch_shape[0]\n\n for filepath in input_filenames(input_dir):\n with tf.gfile.Open(filepath, mode='rb') as f:\n image = imread(f, mode='RGB').astype(np.float) / 255.0\n\n # Images for inception classifier are normalized to be in [-1, 1] interval.\n images[idx, :, :, :] = image * 2.0 - 1.0\n filenames.append(os.path.basename(filepath))\n\n idx += 1\n if idx == batch_size:\n yield filenames, images\n filenames = []\n images = np.zeros(batch_shape)\n idx = 0\n\n # This is a partial batch left over at end.\n # Note that images will still have the proper size.\n if idx > 0:\n yield filenames, images", "def import_data(self, img_size):\n path = self._path\n images = []\n labels = []\n\n categs_name = [filename for filename in os.listdir(path)]\n for categ in categs_name:\n if isdir(join(path, categ)):\n\n for img_name in os.listdir(join(path, categ)):\n\n if \".jpg\" in img_name:\n\n img_name = self.correct_filename(img_name, categ)\n img_path = join(path, categ, img_name)\n img = cv2.imread(img_path)\n\n if img_size:\n dim = (img_size, img_size)\n try:\n img = cv2.resize(img, dim)\n except:\n print(img_name, \"has not been loaded.\")\n continue\n\n images.append(img)\n labels.append(categ)\n\n X = np.array(images)\n y = self.transform_labels(labels)\n\n return X, y", "def load_dataset(path_test, width, height):\n tot_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n tot_images += 1\n\n # allocate the memory\n # THE DTYPE is float, should be the right one\n all_images = np.zeros((tot_images, width, height, 3))\n\n true_labels = []\n num_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n # for img_name in listdir(label_full)[:10]:\n img_name_full = join(label_full, img_name)\n print(f\"Opening {img_name_full} {width}\")\n\n image = cv2.imread(img_name_full)\n\n image = cv2.resize(image, (width, height))\n\n # scale the pixel values to [0, 1]\n image = image.astype(\"float\") / 255.0\n\n all_images[num_images, :, :, :] = image\n\n num_images += 1\n true_labels.append(label)\n\n print(f\"All_images.shape {all_images.shape}\")\n\n # cv2.imshow('Resized all_images[0]', all_images[0])\n # cv2.waitKey(0)\n\n return all_images, true_labels", "def load_test_dataset(self):\n test_data_path = \"testdata\"\n root = Path(test_data_path)\n classes = sorted([j.name.split('/')[-1] for j in root.iterdir()])\n print(classes)\n\n transform = transforms.Compose([\n transforms.Resize(300),\n transforms.RandomHorizontalFlip(),\n transforms.CenterCrop(250),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.6071, 0.4828, 0.3934], std=[0.2845, 0.3187, 0.3240])\n ])\n\n dataset = datasets.ImageFolder(test_data_path, transform=transform)\n testloader = DataLoader(dataset, batch_size=4, shuffle=True)\n print(\"Loaded data\")\n return testloader", "def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def _load_images_and_labels(image_dir):\n\n print('Extracting images from: ', image_dir)\n\n image_paths = _load_image_paths(image_dir)\n images = _extract_images(image_paths)\n num_images = len(image_paths)\n labels = np.ones(num_images, dtype=np.int64)\n\n return images, labels", "def read_from_folder(args, n_values=50):\n images = []\n img_id = 0\n basedir = str(args['input_train'])\n class_dirs = os.listdir(basedir)\n # load images from base directory\n for class_dir in class_dirs:\n image_files = glob.glob(os.path.join(basedir, class_dir, \"*\"))\n\n # test case\n if args['test']:\n image_files = image_files[0:n_values]\n\n for image_file in image_files:\n img = image.OCRImage(pil_image=Image.open(image_file),\n img_id=img_id,\n img_class=class_dir,\n img_hex=image_file[:-4][-4:])\n images.append(img)\n img_id += 1\n\n return images", "def __initDataFromImages(self):\n #Check if the local_db exist\n initial_dirs = os.listdir(os.getcwd())\n is_db_empty = False\n if len(os.listdir(self.base_dir)) == 1: #Empty here means no person data\n [images_dir] = os.listdir(self.base_dir)\n is_db_empty = images_dir == cfg.local[\"IMG_DIR\"]\n if cfg.local[\"DEFAULT_IMGS_DIR\"] in initial_dirs and is_db_empty:\n default_path = os.path.join(os.getcwd(), cfg.local[\"DEFAULT_IMGS_DIR\"])\n self.X, self.y = loadDataFromImagesPath(self.detector, default_path)\n self.le = LabelEncoder()\n #Nothing relate to mapping name to dir here, we don't care about\n #This data because of the user doesn't exist in the database\n self.__savePreProcessedData()", "def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)", "def load_data(data_dir):\n \n #Define training, validation, and testing directories, structured for use with ImageFolder Class\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n #Define image transforms for training, validation, and testing\n training_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n validation_transforms = transforms.Compose([transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n testing_transforms = validation_transforms\n\n\n #Load the datasets with ImageFolder\n training_data = datasets.ImageFolder(train_dir, transform = training_transforms)\n validation_data = datasets.ImageFolder(valid_dir, transform = validation_transforms)\n testing_data = datasets.ImageFolder(test_dir, transform = testing_transforms)\n\n #Using the image datasets and the trainforms, define the dataloaders\n training_loader = torch.utils.data.DataLoader(training_data, batch_size = 64, shuffle = True)\n validation_loader = torch.utils.data.DataLoader(validation_data, batch_size = 64, shuffle = False)\n testing_loader = torch.utils.data.DataLoader(testing_data, batch_size = 64, shuffle = False)\n \n return training_loader, validation_loader, testing_loader", "def load_dataset(self):\n\n train_path = os.path.join(self.dataset_path, 'images_background')\n validation_path = os.path.join(self.dataset_path, 'images_evaluation')\n\n # First let's take care of the train alphabets\n for alphabet in os.listdir(train_path):\n if alphabet[0] == '.':\n continue\n alphabet_path = os.path.join(train_path, alphabet)\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.train_dictionary[alphabet] = current_alphabet_dictionary\n\n # Now it's time for the validation alphabets\n for alphabet in os.listdir(validation_path):\n alphabet_path = os.path.join(validation_path, alphabet)\n if alphabet[0] == '.':\n continue\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.evaluation_dictionary[alphabet] = current_alphabet_dictionary", "def load_training_images(path_train_images: str, path_train_labels: str, max_projection: bool):\n X = sorted(glob(path_train_images))\n Y = sorted(glob(path_train_labels))\n assert len(X) > 0 and len(Y) > 0, \"Error: No images found in either X or Y.\"\n assert all(Path(x).name == Path(y).name for x, y in zip(X, Y)), \"Error: Filenames in X and Y do not match.\"\n X = list(map(imread, X))\n Y = list(map(imread, Y))\n n_channel = 1 if X[0].ndim == 3 else X[0].shape[-1]\n axis_norm = (0, 1, 2) # normalize channels independently\n # axis_norm = (0,1,2,3) # normalize channels jointly\n if n_channel > 1:\n print(\n \"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 3 in axis_norm else 'independently'))\n sys.stdout.flush()\n\n X = [normalize(x, 1, 99.8, axis=axis_norm) for x in tqdm(X)]\n Y = [fill_label_holes(y) for y in tqdm(Y)]\n if len(X) == 1:\n print(\n \"Warning: only one training data was provided! It will be used for both training and validation purposes!\")\n X = [X[0], X[0]]\n Y = [Y[0], Y[0]]\n rng = np.random.RandomState(42)\n ind = rng.permutation(len(X))\n n_val = max(1, int(round(0.15 * len(ind))))\n ind_train, ind_val = ind[:-n_val], ind[-n_val:]\n X_val, Y_val = [X[i] for i in ind_val], [Y[i] for i in ind_val]\n X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train]\n print('number of images: %3d' % len(X))\n print('- training: %3d' % len(X_trn))\n print('- validation: %3d' % len(X_val))\n print(f\"{X[0].shape=}\")\n i = 0\n img, lbl = X[i], Y[i]\n assert img.ndim in (3, 4)\n img = img if img.ndim == 3 else img[..., :3]\n if max_projection:\n plot_img_label_max_projection(img, lbl)\n else:\n plot_img_label_center_slice(img, lbl)\n\n return X, Y, X_trn, Y_trn, X_val, Y_val, n_channel", "def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def _process_dataset(all_train_img, all_train_label, all_test_img, all_test_label):\n # Read all training and test images and set the correct path\n train_files = tf.io.gfile.listdir(all_train_img)\n test_files = tf.io.gfile.listdir(all_test_img)\n all_train_class_path = [os.path.join(all_train_img, f) for f in train_files]\n all_test_img_path = [os.path.join(all_test_img, f) for f in test_files]\n # Since Labels start at 1, substract -1 for correct indices with starting '0'\n label_np_test = read_labels_txt(all_test_label) - 1\n synsets_np_train = read_labels_mat(all_train_label)\n\n all_train_img_path = []\n label_np_train = []\n for folder in all_train_class_path:\n img_class_files = tf.io.gfile.listdir(folder)\n synset = os.path.basename(os.path.normpath(folder))\n label_train = synsets_np_train.index(synset)\n for f in img_class_files:\n all_train_img_path.append(os.path.join(folder, f))\n label_np_train.append(label_train)\n\n # Create the Datasets for training and test images with corresponding labels\n path_ds_train = tf.data.Dataset.from_tensor_slices((all_train_img_path, label_np_train))\n img_label_ds_train = path_ds_train.map(_process_image)\n path_ds_test = tf.data.Dataset.from_tensor_slices((all_test_img_path, label_np_test))\n img_label_ds_test = path_ds_test.map(_process_image)\n\n print(img_label_ds_train)\n print(img_label_ds_test)\n\n # Check an example image if necessary\n # example, = img_label_ds_test.take(1)\n for i in range(5):\n example, = img_label_ds_train.take(1)\n image, label = example[0], example[1]\n plt.figure(i)\n if image.shape[2] == 1:\n plt.imshow(tf.squeeze(image), cmap='gray')\n else:\n plt.imshow(image/255)\n print(\"Label: {}\".format(label.numpy()))\n plt.show()\n\n return img_label_ds_train, img_label_ds_test", "def load_images(self):\n self.img_paths = sorted(glob(self.img_pattern))\n self.imgs = []\n for idx, this_path in enumerate(self.img_paths):\n try:\n this_img = cv2.imread(this_path)\n if self.downscale > 1:\n this_img = cv2.resize(this_img, (0, 0),\n fx=1/float(self.downscale),\n fy=1/float(self.downscale),\n interpolation=cv2.INTER_LINEAR)\n except Exception as e:\n print(\"error loading img: %s\" % (this_path))\n if this_img is not None:\n self.imgs.append(this_img)\n print(\"loaded img %d size=(%d,%d): %s\" %\n (idx, this_img.shape[0], this_img.shape[1], this_path))\n print(\"loaded %d images\" % (len(self.imgs)))", "def load_images(self, tmx):\n for image_data in tmx.images:\n if image_data:\n image, _, _ = image_data\n self.load_image(image)", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def _split_data(self):\n\n # Set training data\n self.train_data = torchvision.datasets.ImageFolder(\n os.path.join(self.path, 'train'),\n transform=self._transform()\n )\n self.classes = self.train_data.classes\n\n # Set validation data\n self.val_data = torchvision.datasets.ImageFolder(\n os.path.join(self.path, 'test'),\n transform=self._transform(train=False)\n )", "def load_images(self):\n images_list = [os.path.join(self.root, image['file_name'])\n for image in self.data['images']]\n\n if self.shuffle:\n random.shuffle(images_list)\n images_list = images_list[:self.max_samples] if self.max_samples is not None and self.max_samples <= len(\n images_list) else images_list\n\n return images_list", "def get_data_loaders(img_dir, img_height, img_width, batch_size=8):\n total_count = sum([len(files) for r, d, files in os.walk(img_dir)])\n\n data_transform = torchvision.transforms.Compose(\n [\n transforms.Resize((img_height, img_width)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ]\n )\n \n # build a dataset of images from the img_dir directory\n im_folder = torchvision.datasets.ImageFolder(img_dir, transform=data_transform)\n model_dataset = td.datasets.WrapDataset(im_folder)\n\n dataset_loader = torch.utils.data.DataLoader(model_dataset, batch_size=batch_size)\n\n return dataset_loader, total_count", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def load_data(data_dir):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n for directory in [train_dir, valid_dir, test_dir]:\n if not os.path.isdir(directory):\n raise IOError(\"Directory \" + directory + \" does not exist\")\n \n # Define transforms for the training, validation, and testing sets\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomHorizontalFlip(),\n transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n data_transforms = transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor()])\n \n # Load the datasets with ImageFolder\n train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)\n valid_datasets = datasets.ImageFolder(valid_dir, transform=data_transforms)\n test_datasets = datasets.ImageFolder(test_dir, transform=data_transforms)\n \n # Using the image datasets and the trainforms, define the dataloaders\n trainloader = torch.utils.data.DataLoader(train_datasets, batch_size=64, shuffle=True)\n validloader = torch.utils.data.DataLoader(valid_datasets, batch_size=32, shuffle=True)\n testloader = torch.utils.data.DataLoader(test_datasets, batch_size=32, shuffle=True)\n \n return {\n 'datasets': {\n 'train': train_datasets,\n 'valid': valid_datasets,\n 'test': test_datasets\n },\n 'loader': {\n 'train': trainloader,\n 'valid': validloader,\n 'test': testloader\n }\n }", "def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes", "def load_svhn_images(folder_path):\n images = []\n for file in os.listdir(folder_path):\n if file.endswith(\".png\"):\n image = Image.open(file)\n image.load()\n # Load image data as 1 dimensional array\n # We're using float32 to save on memory space\n feature = np.array(image, dtype=np.float32)\n images.append(feature)\n\n return images", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def _load_data_worker(self,img_dir,lbl_dir):\n data = []\n\n for img,lbl in zip(glob(img_dir+\"/*.jpg\"),glob(lbl_dir+\"/*.txt\")):\n im = np.array(Image.open(img))\n im = make_square_image_with_padding(im, self.core_config.num_colors)\n lbl_fh = open(lbl,encoding='utf-8')\n\n objects = self._get_objects(lbl_fh)\n sorted_objects = sort_object_list(objects)\n object_class = self._get_object_classes(sorted_objects)\n \n image_with_objects = {\n 'img':im,\n 'objects':sorted_objects,\n 'object_class': object_class\n }\n\n image_with_mask = convert_to_mask(image_with_objects, self.core_config)\n\n data.append(image_with_mask)\n lbl_fh.close()\n\n return data", "def populate_train_test_val_dirs_nonrandomly(root_dir=(os.getcwd()), val_ratio=0.15, test_ratio=0.05):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = join(root_dir, 'CoregisteredImages')\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) / (val_ratio * len(all_file_names))\n\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) / (test_ratio * len(all_file_names))\n\n # Get the list of validation file names, test file names, and train file names\n val_file_names = all_file_names[::int(val_skip_number)]\n test_file_names = [filename for filename in all_file_names[::int(test_skip_number + 1)] if filename not in val_file_names]\n train_file_names = [filename for filename in all_file_names if filename not in val_file_names and filename not in test_file_names]\n\n ''' Print the file distribution amongst the folders '''\n print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names), len(test_file_names))\n\n print(train_file_names)\n\n ''' Copy-Pasting Images '''\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/train/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/train/BlurryImages')\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/val/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/val/BlurryImages')\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/test/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/test/BlurryImages')", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def load_dataset(data_dir='flowers'):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n # Apply transformations on training set, leave alone validation and testing sets:\n data_transforms = {\n \"training\" : transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])]),\n # For validation and tesing sets, since they are the \"unseen\" data that used to measure the model performance, so they should not be applied by any transformations, however, resizing is stil needed.\n \"validation\" : transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])]),\n \"testing\" : transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n }\n \n # Load datasets with ImageFolder:\n image_datasets = {\n \"training\" : datasets.ImageFolder(train_dir, transform = data_transforms[\"training\"]),\n \"validation\" : datasets.ImageFolder(valid_dir, transform = data_transforms[\"validation\"]),\n \"testing\" : datasets.ImageFolder(test_dir, transform = data_transforms[\"testing\"])\n }\n \n # Using the image datasets and the trainforms, define the dataloaders: \n dataloaders = {\n \"training\" : torch.utils.data.DataLoader(image_datasets[\"training\"], batch_size = 64, shuffle = True),\n \"validation\" : torch.utils.data.DataLoader(image_datasets[\"validation\"], batch_size = 64),\n \"testing\" : torch.utils.data.DataLoader(image_datasets[\"testing\"], batch_size = 64)\n }\n \n return (dataloaders['training'],\n dataloaders['validation'],\n dataloaders['testing'],\n image_datasets['training'],\n image_datasets['validation'],\n image_datasets['testing'])", "def load_sample_images():\n # Try to import imread from scipy. We do this lazily here to prevent\n # this module from depending on PIL.\n try:\n try:\n from scipy.misc import imread\n except ImportError:\n from scipy.misc.pilutil import imread\n except ImportError:\n raise ImportError(\"The Python Imaging Library (PIL) \"\n \"is required to load data from jpeg files\")\n ROOT_Dir = os.getcwd()\n module_path = os.path.join(ROOT_Dir, \"images\")\n with open(os.path.join(module_path, 'README.txt')) as f:\n descr = f.read()\n filenames = [os.path.join(module_path, filename)\n for filename in os.listdir(module_path)\n if filename.endswith(\".jpg\")]\n # Load image data for each image in the source folder.\n images = [imread(filename) for filename in filenames]\n\n return Bunch(images=images,\n filenames=filenames,\n DESCR=descr)", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))", "def dataloaders():\n # train data path\n data_train = '../dataset/train/'\n # set transformations\n train_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n train_data = datasets.ImageFolder(data_train, transform = train_transforms)\n trainloader = torch.utils.data.DataLoader(train_data, batch_size = 16, shuffle = True)\n \n return trainloader", "def load_data(data_dir):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n\n # define your transforms for the training, validation, and testing sets\n data_transforms_training = transforms.Compose([\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n data_transforms_validation = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n data_transforms_test = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n # Load the datasets with ImageFolder\n image_datasets_training = datasets.ImageFolder(train_dir, transform=data_transforms_training)\n image_datasets_validation = datasets.ImageFolder(valid_dir, transform=data_transforms_validation)\n image_datasets_test = datasets.ImageFolder(test_dir, transform=data_transforms_test)\n\n # Using the image datasets and the trainforms, define the dataloaders\n dataloaders_training = torch.utils.data.DataLoader(image_datasets_training, shuffle=True, batch_size=128)\n dataloaders_validation = torch.utils.data.DataLoader(image_datasets_validation, shuffle=True, batch_size=128)\n dataloaders_test = torch.utils.data.DataLoader(image_datasets_test, shuffle=True, batch_size=128)\n\n return {\"training_dataloader\": dataloaders_training,\n \"validation_dataloader\": dataloaders_validation,\n \"testing_dataloader\": dataloaders_test,\n \"class_to_idx\": image_datasets_training.class_to_idx}", "def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)", "def load_images_from_folder(folder):\n images = []\n for filename in os.listdir(folder):\n img = Image.open(os.path.join(folder,filename))\n images.append(img)\n return images", "def _load_images_labels(self):\n path_dataset_file = self.path_model_id.joinpath(f'{self.set_name}_set.csv')\n \n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n rows = list(csv_reader)\n\n if self.shuffle:\n rng = default_rng(self.seed)\n rng.shuffle(rows)\n \n self.n_examples = len(rows)\n\n ds_files = tf.data.Dataset.from_tensor_slices(\n [path.join(str(self.path_data), f'label_{row[1]}', row[0])\n for row in rows])\n \n ds_images = ds_files.map(self._load_preprocess_image)\n\n class_labels_enc = self.class_le.fit_transform(\n [row[1] for row in rows])\n\n ds_labels = tf.data.Dataset.from_tensor_slices(\n class_labels_enc)\n\n return ds_images, ds_labels", "def get_training_data(data_dir):\n data = []\n for label in labels:\n path = os.path.join(data_dir, label)\n class_num = labels.index(label)\n img_set = os.listdir(path)\n n = len(img_set)\n for i in range(n):\n try:\n img = img_set[i]\n img_arr = cv2.imread(os.path.join(path, img))\n resized_arr = cv2.resize(img_arr, (img_size, img_size)) # Reshaping images to preferred size\n data.append([resized_arr, class_num])\n if i % 100 == 0:\n print(\"Processing images: {}/{}\".format(i + 1, n))\n except Exception as e:\n print(e)\n return np.array(data)", "def read_dataset(image_dir: str = IMAGE_DIR, dump: bool = True, **kwargs):\n global TRAIN_X, TRAIN_Y\n logdir = \"logs/scalars/\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = TensorBoard(log_dir=logdir)\n\n base_model = InceptionV3(include_top=False,\n weights='imagenet',\n input_shape=(WIDHT, HEIGHT, 3))\n for layer in base_model.layers:\n layer.trainable = False\n\n model = Sequential()\n model.add(base_model)\n model.add(GlobalAveragePooling2D())\n # model.add(Dense(512, activation='relu'))\n model.add(Dense(LABEL_SIZE, activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'],\n )\n\n def define_label(parent_name):\n return \"-\".join(parent_name.split('-')[1:])\n\n for subdir, dirs, files in os.walk(image_dir):\n for file in files:\n path = pathlib.Path(subdir).absolute() / file\n image_label = define_label(path.parent.name)\n TRAIN_Y.append(image_label)\n\n label_encoder = LabelEncoder()\n TRAIN_Y = label_encoder.fit_transform(TRAIN_Y)\n TRAIN_Y = np.array(to_categorical(TRAIN_Y, num_classes=LABEL_SIZE))\n\n count = 0\n current_length_train_x = 0\n\n for subdir, dirs, files in os.walk(image_dir):\n print(f'PATH: {subdir} is processing')\n count += 1\n for file in files:\n path = pathlib.Path(subdir).absolute() / file\n image = load_img(str(path), target_size=WH)\n TRAIN_X.append(np.array(image))\n\n if count % 40 == 0:\n slice_left = current_length_train_x\n slice_right = slice_left + len(TRAIN_X)\n current_length_train_x = slice_right\n # convert to binary matrix (120 labels at all) 2^10 = 128\n # normalize image\n # split image\n\n # TODO: make active on resume iterations\n # if count == 40:\n # # make empty\n # TRAIN_X = []\n # model = load_model(f'{model_name}_iter_40.dump')\n # continue\n\n x_train, x_test, y_train, y_test = train_test_split(\n np.array(TRAIN_X),\n TRAIN_Y[slice_left:slice_right],\n test_size=0.2,\n random_state=69,\n )\n\n # make empty\n TRAIN_X = []\n\n augs_gen.fit(x_train)\n model.fit_generator(\n augs_gen.flow(x_train, y_train, batch_size=25),\n validation_data=(x_test, y_test),\n validation_steps=1000,\n steps_per_epoch=1000,\n epochs=20,\n verbose=1,\n callbacks=[tensorboard_callback],\n )\n del x_train, x_test, y_train, y_test\n model.save(f'{model_name}_iter_{count}.dump')\n\n print(f'Executed {count} / 121')\n print('Prepare to write data on the disk')\n # if dump:\n # with open(DATA_DIR / 'xes.dump', 'wb') as file_x:\n # pickle.dump(TRAIN_X, file_x)\n # with open(DATA_DIR / 'ykes.dump', 'wb') as file_y:\n # pickle.dump(TRAIN_Y, file_y)\n\n # print('Dumped on the disk')\n # time.sleep(5)", "def get_dataset(image_folder: str, img_size: str, self_training: bool = False, no_augmentation: bool = False, valid_dir: str = None):\n \n primary_img_paths = glob.glob(image_folder + os.sep + \"*/*.jpg\")\n primary_img_paths += glob.glob(image_folder + os.sep + \"*/*.png\")\n if valid_dir is None:\n \n y = [os.path.basename(os.path.dirname(path)) for path in primary_img_paths]\n\n train_img_paths, test_img_paths, _, _ = train_test_split(primary_img_paths, y, \n stratify = y, \n test_size = 1 - TRAIN_RATIO)\n #primary_img_paths = undersample(primary_img_paths)\n \n SIZE = len(primary_img_paths)\n shuffle(primary_img_paths)\n \n TRAIN = int(SIZE*TRAIN_RATIO)\n TEST = SIZE - TRAIN\n \n if self_training:\n print(\"Using predictions on unlabelled data in train set!\".rjust(70, \"#\").ljust(90, \"#\"))\n secondary_img_path = glob.glob(\"data/secondary_dataset\" + os.sep + \"*/*.jpg\")\n shuffle(secondary_img_path)\n\n #train_img_paths = primary_img_paths[:TRAIN] + secondary_img_path\n train_img_paths += secondary_img_path\n #else:\n # train_img_paths = primary_img_paths[:TRAIN]\n \n #test_img_paths = primary_img_paths[TRAIN:]\n TRAIN = len(train_img_paths) # For display purpose\n \n if self_training:\n TRAIN += len(secondary_img_path) # For display purpose\n else:\n train_img_paths = glob.glob(image_folder + os.sep + \"*/*.jpg\") + glob.glob(image_folder + os.sep + \"*/*.png\")\n test_img_paths = glob.glob(valid_dir + os.sep + \"*/*.jpg\") + glob.glob(valid_dir + os.sep + \"*/*.png\")\n TRAIN = len(train_img_paths)\n TEST = len(test_img_paths)\n\n label_names = os.listdir(image_folder)\n if no_augmentation:\n train_dataset = CustomImageDataset(train_img_paths, get_test_transforms(img_size), label_names)\n else:\n train_dataset = CustomImageDataset(train_img_paths, get_train_transforms(img_size), label_names)\n test_dataset = CustomImageDataset(test_img_paths, get_test_transforms(img_size), label_names)\n class_to_idx = train_dataset.class_to_idx\n \n # Create DataLoader for training\n train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\n test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE)\n \n \n \n weights = get_class_weights(train_img_paths, class_to_idx, label_names) # For balancing dataset using inverse-frequency\n \n\n print(f\"Number of classes {NUM_CLASSES}, Train size: {TRAIN} images, Test size: {TEST} images, Batch size: {BATCH_SIZE}, Image size: {img_size}x{img_size}\")\n return train_dataloader, test_dataloader, class_to_idx, weights", "def populate_train_test_val_dirs_randomly(root_dir=(os.getcwd()), val_ratio=0.15, test_ratio=0.05):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = root_dir # The folder to copy images from\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n np.random.shuffle(all_file_names)\n\n train_file_names, val_file_names, test_file_names = np.split(np.array(all_file_names),\n [int(len(all_file_names) * (\n 1 - val_ratio + test_ratio)),\n int(len(all_file_names) * (1 - test_ratio))])\n ''' Print the file distribution amongst the folders '''\n print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names), len(test_file_names))\n\n print(train_file_names)\n\n ''' Copy-Pasting Images '''\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/train/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/train/BlurryImages')\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/val/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/val/BlurryImages')\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredImages', name), root_dir + '/test/CoregisteredImages')\n shutil.copy(join(root_dir, 'BlurryImages', name), root_dir + '/test/BlurryImages')", "def _load_images(paths):\n assert isinstance(paths, list)\n _R_MEAN = 123.68\n _G_MEAN = 116.78\n _B_MEAN = 103.94\n\n # allocate memory\n images = np.zeros([len(paths), FLAGS.target_height, FLAGS.target_width, 3],\n dtype=np.float32)\n\n # load all images\n pbar = ProgressBar(max_value=len(paths))\n for i in range(len(paths)):\n img = sio.imread(paths[i])\n\n # resize images\n img = sresize(img, (FLAGS.target_height, FLAGS.target_width, 3),\n mode='constant', preserve_range=True)\n\n # store images\n images[i] = img.astype(np.float32)\n pbar.update(i)\n\n # mean removal\n images -= [_R_MEAN, _G_MEAN, _B_MEAN]\n return images", "def loading_data(source_path_name, dataset_path, attentive, not_attentive, image_count, train_rate, dimension,\n next_instance, root):\n\n # dictionary to store the four destination path\n dest_path = {}\n for s in SETS:\n for d in SUB_DIRS:\n dest_path[f\"{s}_{d}\"] = os.path.join(os.path.join(dataset_path, s), d)\n\n train_img_count = math.ceil(int(image_count) * float(train_rate[0]) * 0.1)\n test_img_count = image_count - train_img_count\n\n def loading_faces(source_image_set_path, dest_image_set_path, source_image_set):\n \"\"\"\n This is function write data into destination directory.\n\n :param source_image_set_path: directory from where images are coming\n :param dest_image_set_path: directory we created to insert the valid images\n :param source_image_set: list of valid images\n \"\"\"\n dimensions_of_img = find_dimensions_not_attentive_imgs\n if 'attentive' in dest_image_set_path:\n dimensions_of_img = find_dimensions_attentive_imgs\n for image_name in source_image_set:\n\n # loading gray image\n gray_image = cv2.imread(source_image_set_path + \"/\" + image_name, 0)\n\n # find co-ordinates of faces in images\n y1, x2, y2, x1 = dimensions_of_img(*face_recognition.face_locations(gray_image)[0], np.shape(gray_image))\n\n # crop image and resize to particular dimension\n crop_img = gray_image[y1:y2, x1:x2]\n resize_crop_img = cv2.resize(crop_img, (int(dimension[0:3]), int(dimension[0:3])))\n\n # load images from source to destination directory\n cv2.imwrite(dest_image_set_path + \"/\" + image_name, resize_crop_img)\n\n # building progress bar\n next_instance.destroy()\n progress = ThemedTk(theme=\"aqua\")\n progress.title(\"Progress\")\n\n info_label = Label(progress, text=\"Building of Training set is on progress\", font=(\"Times New Roman\", 12, \"bold\"))\n info_label.pack(pady=10)\n progress_bar = Progressbar(progress, orient=HORIZONTAL, length=220, mode='determinate')\n progress_bar.pack(pady=20)\n\n progress_bar['value'] = 0\n progress.update()\n\n # create the dataset structure contain the training and testing set\n create_structure(dataset_path)\n\n # training of attentive images\n loading_faces(source_path_name[\"attentive\"], dest_path[\"train_set_attentive\"], attentive[:train_img_count])\n\n progress_bar['value'] = 25\n progress.update()\n\n # training of not attentive images\n loading_faces(source_path_name[\"not_attentive\"], dest_path[\"train_set_not_attentive\"],\n not_attentive[:train_img_count])\n\n progress_bar['value'] = 50\n info_label['text'] = 'Building of Testing set is on progress'\n progress.update()\n\n # testing of attentive images\n loading_faces(source_path_name[\"attentive\"], dest_path[\"test_set_attentive\"], attentive[-test_img_count:])\n\n progress_bar['value'] = 75\n progress.update()\n\n # testing of not attentive images\n loading_faces(source_path_name[\"not_attentive\"], dest_path[\"test_set_not_attentive\"],\n not_attentive[-test_img_count:])\n\n progress_bar['value'] = 100\n progress.update()\n info_label['text'] = 'Data Processing is completed'\n progress.destroy()\n root.deiconify()\n\n info = open(f\"{dataset_path}/dataset_info.txt\", \"a\")\n info.write(f\"source directory path - {source_path_name['attentive'].rsplit('//')[0]}\")\n info.write('\\n\\n######### dataset parameter ##########')\n info.write(f\"\\ndataset name - {dataset_path}\")\n info.write(f\"\\nimage count - {image_count}\")\n info.write(f\"\\ntrain rate - {train_rate}\")\n info.write(f\"\\ndimension - {dimension}\")\n\n info.close()\n\n messagebox.showinfo(\"info\", \"Data Processing is Completed\")", "def _load_ID_files(self):\n if self.mode in ['train_noval', 'train_with_val']:\n if not os.path.exists(self._trn_IDs_file) or not os.path.exists(self._val_IDs_file):\n return False\n\n with open(self._trn_IDs_file, 'r') as f:\n self._trn_IDs = f.readlines()\n self._trn_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._trn_IDs]\n\n with open(self._val_IDs_file, 'r') as f:\n self._val_IDs = f.readlines()\n self._val_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._val_IDs]\n\n self._img_trn_path = [(self._trn_dir + '/' + ID[0], self._trn_dir + '/' + ID[1]) for ID in self._trn_IDs]\n self._lbl_trn_path = [self._trn_lbl_dir + '/' + ID[2] for ID in self._trn_IDs]\n\n if self.mode == 'train_noval':\n # Train over the original training set (no validation split)\n self._trn_IDs += self._val_IDs\n for ID in self._val_IDs:\n self._img_trn_path.append((self._val_dir + '/' + ID[0], self._val_dir + '/' + ID[1]))\n self._lbl_trn_path.append(self._val_lbl_dir + '/' + ID[2])\n else:\n # Train over the training split, validate over the validation split\n self._img_val_path, self._lbl_val_path, self._pred_lbl_val_path = [], [], []\n for ID in self._val_IDs:\n self._img_val_path.append((self._val_dir + '/' + ID[0], self._val_dir + '/' + ID[1]))\n self._lbl_val_path.append(self._val_lbl_dir + '/' + ID[2])\n lbl_id = ID[2].replace('.pfm', '.flo').replace('.png', '.flo')\n self._pred_lbl_val_path.append(self._val_pred_lbl_dir + '/' + lbl_id)\n\n if self.opts['tb_test_imgs'] is True:\n # Make test images available to model in training mode\n if not os.path.exists(self._tst_IDs_file):\n return False\n\n with open(self._tst_IDs_file, 'r') as f:\n self._tst_IDs = f.readlines()\n self._tst_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._tst_IDs]\n\n self._img_tst_path, self._pred_lbl_tst_path = [], []\n for ID in self._tst_IDs:\n self._img_tst_path.append((self._tst_dir + '/' + ID[0], self._tst_dir + '/' + ID[1]))\n self._pred_lbl_tst_path.append(self._tst_pred_lbl_dir + '/' + ID[2])\n\n elif self.mode in ['val', 'val_notrain']:\n # Validate over the validation split\n if not os.path.exists(self._val_IDs_file):\n return False\n\n with open(self._val_IDs_file, 'r') as f:\n self._val_IDs = f.readlines()\n self._val_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._val_IDs]\n\n if self.mode == 'val_notrain':\n with open(self._trn_IDs_file, 'r') as f:\n self._trn_IDs = f.readlines()\n self._trn_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._trn_IDs]\n self._val_IDs += self._trn_IDs\n\n self._img_val_path, self._lbl_val_path, self._pred_lbl_val_path = [], [], []\n for ID in self._val_IDs:\n self._img_val_path.append((self._val_dir + '/' + ID[0], self._val_dir + '/' + ID[1]))\n self._lbl_val_path.append(self._val_lbl_dir + '/' + ID[2])\n lbl_id = ID[2].replace('.pfm', '.flo').replace('.png', '.flo')\n self._pred_lbl_val_path.append(self._val_pred_lbl_dir + '/' + lbl_id)\n\n else:\n # Test over the entire testing set\n if not os.path.exists(self._tst_IDs_file):\n return False\n\n with open(self._tst_IDs_file, 'r') as f:\n self._tst_IDs = f.readlines()\n self._tst_IDs = [tuple(ID.rstrip().split(\"###\")) for ID in self._tst_IDs]\n\n self._img_tst_path, self._pred_lbl_tst_path = [], []\n for ID in self._tst_IDs:\n self._img_tst_path.append((self._tst_dir + '/' + ID[0], self._tst_dir + '/' + ID[1]))\n self._pred_lbl_tst_path.append(self._tst_pred_lbl_dir + '/' + ID[2])\n\n # Build a list of simplified IDs for Tensorboard logging\n if self._trn_IDs is not None:\n self._trn_IDs_simpl = self.simplify_IDs(self._trn_IDs)\n if self._val_IDs is not None:\n self._val_IDs_simpl = self.simplify_IDs(self._val_IDs)\n if self._tst_IDs is not None:\n self._tst_IDs_simpl = self.simplify_IDs(self._tst_IDs)\n\n if _DBG_TRAIN_VAL_TEST_SETS != -1: # Debug mode only\n if self._trn_IDs is not None:\n self._trn_IDs = self._trn_IDs[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._img_trn_path is not None:\n self._img_trn_path = self._img_trn_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._lbl_trn_path is not None:\n self._lbl_trn_path = self._lbl_trn_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._val_IDs is not None:\n self._val_IDs = self._val_IDs[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._img_val_path is not None:\n self._img_val_path = self._img_val_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._lbl_val_path is not None:\n self._lbl_val_path = self._lbl_val_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._pred_lbl_val_path is not None:\n self._pred_lbl_val_path = self._pred_lbl_val_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._tst_IDs is not None:\n self._tst_IDs = self._tst_IDs[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._img_tst_path is not None:\n self._img_tst_path = self._img_tst_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n if self._pred_lbl_tst_path is not None:\n self._pred_lbl_tst_path = self._pred_lbl_tst_path[0:_DBG_TRAIN_VAL_TEST_SETS]\n\n return True", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def load_data(model, set='train', img_rows=128, img_cols=128):\n print('#' * 30)\n print('Loading {} data from file.'.format(set))\n\n # read in the .npy file containing the images\n images_train = np.load('output/processed_data/images_{}.npy'.format(set))\n\n # read in the .npy file containing the target features\n targets_train = np.load('output/processed_data/targets_{}.npy'.format(set))\n\n # scale image pixel values to [0, 1]\n images_train = images_train.astype(np.float32)\n images_train /= 255.\n\n # scale target center coordinates to [-1, 1] (from 0 to 95 initially)\n targets_train = targets_train.astype(np.float32)\n targets_train[:, 0] = (targets_train[:, 0] - (img_rows / 2)) / (img_rows / 2)\n targets_train[:, 1] = (targets_train[:, 1] - (img_rows / 2)) / (img_cols / 2)\n\n # reshape images according to the neural network model intended to be used\n if model == 'cnn':\n print('Indicated model is a CNN, reshaping images with channels first.')\n images_train = images_train.reshape(-1, 1, img_rows, img_cols)\n elif model == 'dnn':\n print('Indicated model is a DNN, flattening out images.')\n images_train = images_train.reshape(images_train.shape[0], img_rows * img_rows)\n\n print('Loading done. Pixel values have been scaled to [0, 1] and target center coordinates to [-1, 1].')\n print('#' * 30)\n\n return images_train, targets_train", "def get_test_files(self):\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n return test_images", "def _preprocess(self):\n print(\"Note: if root path is changed, the previously generated json files need to be re-generated (delete them first)\")\n if osp.exists(self.imgs_labeled_dir) and \\\n osp.exists(self.imgs_detected_dir) and \\\n osp.exists(self.split_classic_det_json_path) and \\\n osp.exists(self.split_classic_lab_json_path) and \\\n osp.exists(self.split_new_det_json_path) and \\\n osp.exists(self.split_new_lab_json_path):\n return\n\n mkdir_if_missing(self.imgs_detected_dir)\n mkdir_if_missing(self.imgs_labeled_dir)\n\n print(\"Extract image data from {} and save as png\".format(self.raw_mat_path))\n mat = h5py.File(self.raw_mat_path, 'r')\n\n def _deref(ref):\n return mat[ref][:].T\n\n def _process_images(img_refs, campid, pid, save_dir):\n img_paths = [] # Note: some persons only have images for one view\n for imgid, img_ref in enumerate(img_refs):\n img = _deref(img_ref)\n # skip empty cell\n if img.size == 0 or img.ndim < 3: continue\n # images are saved with the following format, index-1 (ensure uniqueness)\n # campid: index of camera pair (1-5)\n # pid: index of person in 'campid'-th camera pair\n # viewid: index of view, {1, 2}\n # imgid: index of image, (1-10)\n viewid = 1 if imgid < 5 else 2\n img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid+1, pid+1, viewid, imgid+1)\n img_path = osp.join(save_dir, img_name)\n imageio.imwrite(img_path, img)\n img_paths.append(img_path)\n return img_paths\n\n def _extract_img(name):\n print(\"Processing {} images (extract and save) ...\".format(name))\n meta_data = []\n imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir\n for campid, camp_ref in enumerate(mat[name][0]):\n camp = _deref(camp_ref)\n num_pids = camp.shape[0]\n for pid in range(num_pids):\n img_paths = _process_images(camp[pid,:], campid, pid, imgs_dir)\n assert len(img_paths) > 0, \"campid{}-pid{} has no images\".format(campid, pid)\n meta_data.append((campid+1, pid+1, img_paths))\n print(\"done camera pair {} with {} identities\".format(campid+1, num_pids))\n return meta_data\n\n meta_detected = _extract_img('detected')\n meta_labeled = _extract_img('labeled')\n\n def _extract_classic_split(meta_data, test_split):\n train, test = [], []\n num_train_pids, num_test_pids = 0, 0\n num_train_imgs, num_test_imgs = 0, 0\n for i, (campid, pid, img_paths) in enumerate(meta_data):\n \n if [campid, pid] in test_split:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n test.append((img_path, num_test_pids, camid))\n num_test_pids += 1\n num_test_imgs += len(img_paths)\n else:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n train.append((img_path, num_train_pids, camid))\n num_train_pids += 1\n num_train_imgs += len(img_paths)\n return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs\n\n print(\"Creating classic splits (# = 20) ...\")\n splits_classic_det, splits_classic_lab = [], []\n for split_ref in mat['testsets'][0]:\n test_split = _deref(split_ref).tolist()\n\n # create split for detected images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_detected, test_split)\n splits_classic_det.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n\n # create split for labeled images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_labeled, test_split)\n splits_classic_lab.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n \n write_json(splits_classic_det, self.split_classic_det_json_path)\n write_json(splits_classic_lab, self.split_classic_lab_json_path)\n\n def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):\n tmp_set = []\n unique_pids = set()\n for idx in idxs:\n img_name = filelist[idx][0]\n camid = int(img_name.split('_')[2])\n pid = pids[idx]\n if relabel: pid = pid2label[pid]\n img_path = osp.join(img_dir, img_name)\n tmp_set.append((img_path, int(pid), camid))\n unique_pids.add(pid)\n return tmp_set, len(unique_pids), len(idxs)\n\n def _extract_new_split(split_dict, img_dir):\n train_idxs = split_dict['train_idx'].flatten() - 1 # index-0\n pids = split_dict['labels'].flatten()\n train_pids = set(pids[train_idxs])\n pid2label = {pid: label for label, pid in enumerate(train_pids)}\n query_idxs = split_dict['query_idx'].flatten() - 1\n gallery_idxs = split_dict['gallery_idx'].flatten() - 1\n filelist = split_dict['filelist'].flatten()\n train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)\n query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)\n gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)\n return train_info, query_info, gallery_info\n\n print(\"Creating new splits for detected images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_det_mat_path),\n self.imgs_detected_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_det_json_path)\n\n print(\"Creating new splits for labeled images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_lab_mat_path),\n self.imgs_labeled_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_lab_json_path)", "def train_iter(self, shuffle):\n paths = glob.glob(os.path.join(self.train_dir, \"*.jpg\"))\n if shuffle:\n random.shuffle(paths)\n for path in paths:\n label = os.path.basename(path).partition(\".\")[0]\n yield (path, label)", "def collect_train_paths(self):\n\n image_paths = []\n annotation_paths = []\n\n n_images = 10000\n for i in range(1, n_images + 1):\n added = False\n for extension in ['jpg', 'png']:\n image_path = os.path.join(self.folder,\n f'ImagesPart{(i - 1) // 5000 + 1}',\n f'tr_img_{i:05}.{extension}')\n if os.path.exists(image_path):\n image_paths.append(image_path)\n added = True\n break\n if added:\n annotation_paths.append(\n os.path.join(self.folder, 'train_gt_t13', f'tr_img_{i:05}.txt')\n )\n else:\n print(f'Could not find: {image_path[:-3]}*')\n\n return image_paths, annotation_paths", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def read_training_pixels_from_multi_images(input, subImg_folder, subLabel_folder):\n img_list = io_function.get_file_list_by_ext('.tif', subImg_folder, bsub_folder=False)\n label_list = io_function.get_file_list_by_ext('.tif', subLabel_folder, bsub_folder=False)\n img_list.sort()\n label_list.sort()\n\n if len(img_list) < 1 or len(label_list) < 1:\n raise IOError('No tif images or labels in folder %s or %s' % (subImg_folder, subLabel_folder))\n if len(img_list) != len(label_list):\n raise ValueError('the number of images is not equal to the one of labels')\n\n # read them one by one\n Xs, ys = [], []\n for img, label in zip(img_list, label_list):\n # # test by hlc\n # polygon_index_img = os.path.basename(img).split('_')[-3]\n # # print(polygon_index_img)\n # if polygon_index_img not in [str(83), str(86)] :\n # continue\n\n X_aImg, y_a = read_training_pixels(img, label)\n Xs.append(X_aImg)\n ys.append(y_a)\n\n X_pixels = np.concatenate(Xs, axis=1)\n y_pixels = np.concatenate(ys, axis=0)\n X_pixels = np.transpose(X_pixels, (1, 0))\n basic.outputlogMessage(str(X_pixels.shape))\n basic.outputlogMessage(str(y_pixels.shape))\n\n return X_pixels, y_pixels", "def load_images(image_name_to_label):\n images = []\n labels = []\n\n image_names = os.listdir(DEFAULT_IMG_PATH_EDITED)\n\n # Remove directories\n image_names.remove(\"COVID-19\")\n image_names.remove(\"Normal\")\n image_names.remove(\"ViralPneumonia\")\n\n # Load images from specific image directories (COVID-19, normal, viral pneumonia)\n def load_directory(directory):\n notifier.send(\" Loading from directory: \" + directory + \"...\")\n directory_path = DEFAULT_IMG_PATH_EDITED + os.sep + directory\n directory_image_names = os.listdir(directory_path)\n for i, image_name in enumerate(directory_image_names):\n base_image_name = get_base_image_name(image_name)\n query_name = directory + \"/\" + base_image_name\n query_name = query_name.lower().replace(\" \", \"\")\n if query_name in image_name_to_label:\n print(f\" {i / len(directory_image_names) * 100}% - [{image_name}]\")\n image_path = directory_path + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[query_name])\n load_directory(\"COVID-19\")\n load_directory(\"Normal\")\n load_directory(\"ViralPneumonia\")\n\n # Load images from default directory\n if LOAD_ALL_IMAGES:\n notifier.send(\" Loading from directory: default...\")\n for i, image_name in enumerate(image_names):\n base_image_name = get_base_image_name(image_name)\n if base_image_name in image_name_to_label:\n print(f\" {i / len(image_names) * 100}% - [{image_name}]\")\n image_path = DEFAULT_IMG_PATH_EDITED + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[base_image_name])\n\n return images, labels", "def load_images(folder_path):\n images = []\n # first make image paths list\n # cv2 can take in wildcard args if using glob\n image_paths = glob.glob(folder_path + \"/*\")\n for path in image_paths:\n images.append(cv2.imread(path))\n return (images, image_paths)", "def load_test_batch(self, image_sequence_names):\n def _parse_test_img(img_path):\n with tf.device('/cpu:0'):\n img_buffer = tf.read_file(img_path)\n image_decoded = tf.image.decode_jpeg(img_buffer)\n return image_decoded\n\n image_dataset = tf.data.Dataset.from_tensor_slices(image_sequence_names).map(\n _parse_test_img).batch(self.batch_size).prefetch(self.batch_size*4)\n iterator = image_dataset.make_initializable_iterator()\n return iterator", "def splitTransform(self):\n\t\t#path_merge = \"transform\"\n\t\t#path_train = \"transform/data/\"\n\t\t#path_label = \"transform/label/\"\n\t\tpath_merge = \"train/merge\"\n\t\tpath_train = \"train/image\"\n\t\tpath_label = \"train/label\"\n\t\ttrain_imgs = glob.glob(path_merge+\"/*.\"+self.img_type)\n\t\tfor imgname in train_imgs:\n\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\timg = cv2.imread(imgname)\n\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\timg_label = img[:,:,0]\n\t\t\tcv2.imwrite(path_train+midname+\".\"+self.img_type,img_train)\n\t\t\tcv2.imwrite(path_label+midname+\".\"+self.img_type,img_label)", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def load_groundtruths(folder_path, num_images):\n imgs = []\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n # See if it is better to use dtype = int\n hot_img = convert_image_to_hot(img)\n imgs.append(hot_img)\n else:\n print('File ' + image_path + ' does not exist')\n #imgs = np.around(imgs) # Uncomment if we want to round values.\n imgs_array = np.asarray(imgs)\n return imgs_array", "def read_batch(self):\n imgs = []\n labels = []\n idx = np.random.choice(self.nImgs,self.batch_size)\n \tfor i in idx:\n imgs.append(cv2.imread(self.data_files[i]))\n \t labels.append(cv2.imread(self.label_files[i]))\n \timgs,labels = np.array(imgs),np.array(labels)\n imgs = (imgs - self.mean)/self.stddev\n \tlabels = (labels - self.mean)/self.stddev\n return imgs,labels", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def load_data(data_dir, split=1.0):\n n = 123 * 41\n positive_examples = glob.glob(data_dir + '/positive_set/*.png')\n negative_examples = glob.glob(data_dir + '/negative_set/*.png')\n x = np.zeros((n, len(positive_examples) + len(negative_examples)))\n for k in range(len(positive_examples)):\n x[:,k] = np.reshape(img.imread(positive_examples[k]), (n,))\n for k in range(len(negative_examples)):\n x[:, k+len(positive_examples)] = np.reshape(img.imread(negative_examples[k]), (n,))\n y = np.concatenate((np.ones((1,len(positive_examples))), np.zeros((1,len(negative_examples)))), axis=1)\n arr = np.concatenate((x,y), axis=0)\n part_train = int(math.floor(split * arr.shape[1]))\n np.random.shuffle(arr.T)\n x_train = arr[0:-1,0:part_train]\n y_train = arr[-1,0:part_train]\n x_test = arr[0:-1,part_train:]\n y_test = arr[-1,part_train:]\n if split == 1.0:\n return (x_train.T, y_train.T)\n else:\n return (x_train.T, y_train.T),(x_test.T,y_test.T)", "def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def load_datasets():\n from .dataset import num_classes, image_size\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n if not (len(train_folders) == len(test_folders) == num_classes):\n raise Exception('Expected %d folders, one per class. Found %d and %d instead.' % (\n num_classes, len(train_folders), len(test_folders)))\n print(\"Dataset folders: %s, %s\" % (train_folders, test_folders))\n\n # load datasets\n train_datasets = maybe_pickle(train_folders, 45000, image_size)\n test_datasets = maybe_pickle(test_folders, 1800, image_size)\n\n return train_datasets, test_datasets", "def load_images(self, image_paths):\n \n fill_list = []\n \n for idx in tqdm(range(len(image_paths))):\n path = image_paths[idx]\n yield cv2.imread(path)", "def split_test_train(train_folder_path, train_labels, test_folder, n_test_images):\n\n os.makedirs(test_folder, exist_ok=True)\n\n data = read_csv_to_list(train_labels)\n # Prepare test labels and move images to new folder\n labels = []\n for img in data[1:n_test_images]:\n # Input and new image paths\n # print(type(train_folder_path),type(img[0]))\n img_path = train_folder_path / (img[0] + \".dcm\")\n new_img_path = test_folder / (img[0] + \".dcm\")\n if Path(img_path).exists(): # there can be several annotations per image\n shutil.move(img_path, new_img_path)\n labels.append(img)\n\n # Prepare train labels. Removes duplicate as we dont need them.\n train_labels = []\n img_list_names = []\n for idx, label in enumerate(data[n_test_images + 1 :]):\n if (label[0] in img_list_names) and (idx != 0):\n continue\n img_list_names.append(label[0])\n train_labels.append(label)\n\n # labels.insert(0, data[0])\n # train_labels.insert(0, data[0])\n return train_labels, labels", "def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)" ]
[ "0.7594195", "0.7443144", "0.74420106", "0.7436539", "0.7346392", "0.73275924", "0.725564", "0.7240028", "0.7179599", "0.7174112", "0.71509814", "0.7121513", "0.7118166", "0.7098444", "0.7097929", "0.70875955", "0.7087226", "0.7081254", "0.70765895", "0.70688915", "0.70635", "0.7044705", "0.69627017", "0.6960372", "0.69588983", "0.6935554", "0.6916715", "0.69013643", "0.68922484", "0.68892926", "0.6883396", "0.68798864", "0.6866", "0.6863393", "0.6840251", "0.68241787", "0.6820585", "0.6818312", "0.68050903", "0.6797905", "0.67802674", "0.677824", "0.6766341", "0.67640376", "0.67358035", "0.6733321", "0.67262423", "0.6716504", "0.67155725", "0.67142034", "0.67077464", "0.6707155", "0.66908556", "0.66755295", "0.66614336", "0.6652132", "0.66334474", "0.6632581", "0.66216904", "0.66150516", "0.66116786", "0.6609099", "0.6606827", "0.66061854", "0.6605781", "0.65734804", "0.65717274", "0.6570753", "0.65642023", "0.6563552", "0.656137", "0.6558471", "0.6556479", "0.6554034", "0.65459824", "0.6540416", "0.65321475", "0.6526756", "0.65175176", "0.6515657", "0.6515585", "0.6507038", "0.6503747", "0.6499968", "0.6496872", "0.6491283", "0.6490947", "0.6486562", "0.6485629", "0.648474", "0.6475737", "0.6474435", "0.64707744", "0.6469035", "0.6459426", "0.64572966", "0.64553535", "0.6454566", "0.64474636", "0.6443889" ]
0.6661814
54
Test the retrieval of the actual rows for "select from HumanResources.Department"
def test_query_subset_response_AdventureWorks2014(self): with open(self.get_test_baseline(u'select_from_humanresources_department_adventureworks2014.txt'), u'r+b', buffering=0) as response_file: request_stream = io.BytesIO() rpc_client = json_rpc_client.JsonRpcClient( request_stream, response_file) rpc_client.start() # Submit a dummy request. parameters = {u'OwnerUri': u'connectionservicetest', u'BatchIndex': 0, u'ResultSetIndex': 0, u'RowsStartIndex': 0, u'RowCount': 16} request = queryservice.QuerySubsetRequest( 3, rpc_client, parameters) self.verify_subset_response(request=request) rpc_client.shutdown()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testSQLString(self): \n val = selectAllSQLStr()\n self.assertEqual(val,\"SELECT * FROM bookStore\")", "def test_get_records(self):\n pass", "def test_query(rgd):\n data = rgd.query(\"test\")\n assert isinstance(data, pd.DataFrame)\n assert data.iloc[0][\"name\"] == \"vm1\"", "def test_department_model(self):\n self.assertEqual(Department.query.count(), 3)", "def test_select(self):\n my_conn = MySQL(*self.conn_params)\n table_name = \"inf_schema\"\n inf_schema = my_conn.get_table(table_name)\n # SELECT * FROM inf_schema\n # WHERE table_name like 'INNO%' AND avg_row_length > 100\n results = my_conn.engine.execute(select('*')\n .where(inf_schema.c.table_name\n .like('INNO%'))\n .where(inf_schema.c.avg_row_length >\n 100)\n .select_from(inf_schema)).fetchall()\n table_df = pd.DataFrame(results)\n self.assertGreaterEqual(len(table_df), 6)", "def test_select_columns(self):\n self.insert()\n data = self.tbl.select()\n assert (u'id',) + tuple(data.columns) == self.tbl.columns", "def test_query_expression_get_success_case(self):\r\n m = self.table.get(self.table.column('test_id') == 0, self.table.column('attempt_id') == 0)\r\n assert isinstance(m, ResultObject)\r\n assert m.test_id == 0\r\n assert m.attempt_id == 0\r\n\r\n q = self.table.objects(self.table.column('test_id') == 0, self.table.column('attempt_id') == 0)\r\n m = q.get()\r\n assert isinstance(m, ResultObject)\r\n assert m.test_id == 0\r\n assert m.attempt_id == 0\r\n\r\n q = self.table.objects(self.table.column('test_id') == 0)\r\n m = q.get(self.table.column('attempt_id') == 0)\r\n assert isinstance(m, ResultObject)\r\n assert m.test_id == 0\r\n assert m.attempt_id == 0", "def test_query_1(self):\n with app.test_request_context():\n agency_none = db.session.query(\n Agency).filter_by(name=\"not here\").first()\n self.assertTrue(agency_none is None)", "def test_fetch_traffic(self):\n assert isinstance(_tabular.fetch_traffic_data(), \n pd.DataFrame)", "def test_execute(self):\n rset = self.connection.execute(self.rql, export_type=\"json\")\n self.assertTrue(len(rset) > 0)", "def test_retrieve_dyn():\n # use the same id as previous test.\n the_id = 'from-test-dyndb'\n\n # get the response using the\n response = dyn_crud.retrieve_record(the_id)\n\n # run test.\n assert True if (response['company']['S'] == 'test company' and\n response['location']['S'] == 'Shambhala') else False", "def useful_test_function(db, query):\n print pd.read_sql_query(query, db)", "def test_get(self):\n params= { \"table\": \"${table}\",\n \"id\": self.${table}_id,\n \"languageid\": \"1033\"\n }\n \n sql = \"select mtp_get_cf1 as result from mtp_get_cf1('%s')\" %(json.dumps(params) )\n \n #print( sql )\n \n self.dbi.execute(sql)\n \n rtn = self.dbi.fetchone()\n \n #print(rtn)\n assert \"id\" in rtn[0][\"result\"][0]\n assert self.${table}_id ==rtn[0][\"result\"][0][\"id\"]\n #assert 'id' in rtn[0]['result'][0]", "def select_query(self):\n query = db.select([self.tables])\n print(query)\n ResultProxy = self.connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n return ResultSet", "def test_execute(self):\n self.mocked_cursor.fetchall.return_value = []\n\n db = database.Database()\n result = db.execute(sql=\"SELECT * from FOO WHERE bar LIKE 'baz'\")\n self.assertTrue(isinstance(result, list))", "def test_get_table(self):\n my_conn = MySQL(*self.conn_params)\n inf_schema = my_conn.get_table('inf_schema') # GET TABLE example\n row_count = my_conn.engine.scalar(\n select([func.count('*')]).select_from(inf_schema)\n )\n # The select.columns parameter is not available in the method form of\n # select(), e.g. FromClause.select().\n # See https://docs.sqlalchemy.org/en/latest/core/selectable.html#\n # sqlalchemy.sql.expression.FromClause.select\n my_conn.engine.execute(\n select([inf_schema.c.table_name]).select_from(inf_schema))\n self.assertGreaterEqual(row_count, 100)", "def test_query_simple(self):\n tab = 'query_test'\n cols = ['col1', 'col2']\n rows_in = [{'col1': r[0], 'col2':r[1]} for r in [(1, 2), (2, 4), (3, 6)]]\n\n with self.dbh.table_recreate(tab, cols, 'integer'):\n self.dbh.insert_many(tab, cols, rows_in)\n rows_out = self.dbh.query_simple(tab, cols)\n self.assertEqual(rows_in, rows_out)", "def test_api_can_get_department_by_id(self):\n res = self.client().get(service_url+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))", "def test_get_df_db(oracle_connector):\n data_sources_spec = [\n {\n 'domain': 'Oracle test',\n 'type': 'external_database',\n 'name': 'my_oracle_sql_con',\n 'query': 'SELECT * FROM City;',\n }\n ]\n\n data_source = OracleSQLDataSource(**data_sources_spec[0])\n df = oracle_connector.get_df(data_source)\n\n assert not df.empty\n assert df.shape == (50, 5)\n assert set(df.columns) == {'ID', 'NAME', 'COUNTRYCODE', 'DISTRICT', 'POPULATION'}\n\n assert len(df[df['POPULATION'] > 500000]) == 5", "def test_get_education(self):\n current_resume = resume.objects.first()\n expected = list(current_resume.get_education())\n case = list(current_resume.education_set.all())\n self.assertEqual(case,expected)", "def test_field_rendering(self):\r\n ss = SelectStatement('table', ['f1', 'f2'])\r\n self.assertTrue(unicode(ss).startswith('SELECT \"f1\", \"f2\"'), unicode(ss))\r\n self.assertTrue(str(ss).startswith('SELECT \"f1\", \"f2\"'), str(ss))", "def department():\n # Use Pandas to perform the sql query\n stmt = db.session.query(oc_salary_db).statement\n df = pd.read_sql_query(\"select department from oc_salary group by department\", db.session.bind, coerce_float=False)\n\n # Return a list of the column names (sample names)\n return jsonify(list(df[\"department\"].values))", "def test_get_rows_with_stale_direction_query(self):\n\n mock_connector = MagicMock()\n database = Database()\n database.connect(connector_impl=mock_connector)\n\n result = database.get_rows_with_stale_direction()\n\n self.assertIsInstance(result, list)\n\n connection = mock_connector.connect()\n cursor = connection.cursor()\n self.assertTrue(cursor.execute.called)\n\n sql = cursor.execute.call_args[0][0].lower()\n self.assertIn('select', sql)\n self.assertIn('`covidcast`', sql)\n self.assertIn('join', sql)\n self.assertIn('datediff', sql)", "def test_graph_load_query_exec(self):\n provider = QueryProvider(data_environment=\"SecurityGraph\", driver=self.provider)\n df = provider.all_queries.get_alert(\"help\")\n self.assertIsNone(df)\n\n with self.assertRaises(ValueError) as cm:\n df = provider.all_queries.get_alert()\n self.assertIn(\"alert_id\", str(cm.exception))\n\n df = provider.all_queries.get_alert(alert_id=\"foo\")\n self.assertEqual(len(df), 1)\n self.assertIn(\"/foo\", df[\"query\"].iloc[0])", "def test_get_success_case(self):\r\n m = self.table.objects.get(test_id=0, attempt_id=0)\r\n assert isinstance(m, ResultObject)\r\n assert m.test_id == 0\r\n assert m.attempt_id == 0\r\n\r\n q = self.table.objects(test_id=0, attempt_id=0)\r\n m = q.get()\r\n assert isinstance(m, ResultObject)\r\n assert m.test_id == 0\r\n assert m.attempt_id == 0\r\n\r\n q = self.table.objects(test_id=0)\r\n m = q.get(attempt_id=0)\r\n assert isinstance(m, ResultObject)\r\n assert m.test_id == 0\r\n assert m.attempt_id == 0", "def test_no_one_in_db(self):\n q = self.generate_query('view_manager_report', ())\n res = self.execute_query(q)\n expected = []\n assert len(res) == 0, f'There is suppose to be an empty summary {res}'\n assert res == expected, f'The result is suppose to be empty {res}'", "def test_custom_query_basic(self):\n\n # Create a simple query statement a\n query = \"SELECT * FROM system.local\"\n statement = SimpleStatement(query)\n # Validate that various types of custom payloads are sent and received okay\n self.validate_various_custom_payloads(statement=statement)", "def test_can_select_with_dict_factory(self):\n self.session.row_factory = dict_factory\n try:\n self.session.execute('SELECT * FROM test1rf.table_num_col')\n except ValueError as e:\n self.fail(\"Unexpected ValueError exception: %s\" % e.message)", "def test_no_exception_on_select(self):\n try:\n self.session.execute('SELECT * FROM test1rf.table_num_col')\n except ValueError as e:\n self.fail(\"Unexpected ValueError exception: %s\" % e.message)", "def test_raw_sql(self):\n\n # GIVEN raw SQL command\n sql = 'SELECT id, date_joined from auth_user WHERE id > %s;'\n params = ['0']\n\n # WHEN executing the SQL\n cursor = TestModel.execute_sql(sql, params)\n\n # THEN it should succeed\n results = cursor.fetchall()\n self.assertEqual(len(results), 1)\n self.assertEqual(len(results[0]), 2)", "def test_agency_model_query_1(self):\n with app.test_request_context():\n agency1 = db.session.query(Agency).filter_by(\n name=\"Mexican Space Agency\").first()\n self.assertEqual(agency1.abbrev, \"AEM\")\n self.assertEqual(agency1.countryCode, \"MEX\")", "def test_query(self):\n conn_object = ParentConnection()\n conn_object.create_tables()\n conn_object.query(\"DROP TABLE meta\")\n conn = psycopg2.connect(**{\"host\": \"localhost\",\n \"database\": \"test\",\n \"user\": \"test\",\n \"password\": \"test\"})\n cur = conn.cursor()\n cur.execute(\"SELECT * from information_schema.tables \"\n \"WHERE table_schema = 'public' \"\n \"AND table_type = 'BASE TABLE';\")\n result = cur.fetchall()\n result = [x[2] for x in result]\n self.assertFalse('meta' in result)\n cur.close()\n conn.close()\n conn_object.delete_tables()", "def test_select():\n assert_that(users.select(), all_of(\n instance_of(SelectQuery),\n has_properties({\n 'collection': users,\n 'model': User,\n\n 'state': has_entries({\n 'properties': None\n })\n })\n ))", "def test_query_expression_count(self):\r\n assert self.table.objects.count() == 12\r\n\r\n q = self.table.objects(self.table.column('test_id') == 0)\r\n assert q.count() == 4", "def test_none_fields_rendering(self):\r\n ss = SelectStatement('table')\r\n self.assertTrue(unicode(ss).startswith('SELECT *'), unicode(ss))\r\n self.assertTrue(str(ss).startswith('SELECT *'), str(ss))", "def test_fetch_from_wide_table(self):\n try:\n self.storage.store(RECORD_TABLE, value=\"a\", extra_column=\"EEK!\")\n a = self.clerk.fetch(Record, 1)\n a.value=\"aa\"\n self.clerk.store(a)\n except AttributeError:\n self.fail(\"shouldn't die when columns outnumber attributes\")", "def __call__(self, dbio, *args, **kwargs):\n sql = self.decorated(dbio, *args, **kwargs)\n if not dbio.testing:\n logger.debug(f\"running select:{sql}\")\n cur = dbio.conn.cursor()\n cur.execute(sql)\n results = cur.fetchall()\n columns = [desc[0] for desc in cur.description]\n cur.close()\n dbio.conn.commit()\n return results, columns\n else:\n logger.debug(\"will run:{sql}\")\n return None, None", "def test_simple_query_ot_fetchall(self):\n with self.override_config(\"mysqldb\", dict(trace_fetch_methods=True)):\n conn, tracer = self._get_conn_tracer()\n\n ot_tracer = init_tracer(\"mysql_svc\", tracer)\n with ot_tracer.start_active_span(\"mysql_op\"):\n cursor = conn.cursor()\n cursor.execute(\"SELECT 1\")\n rows = cursor.fetchall()\n assert len(rows) == 1\n\n spans = tracer.pop()\n assert len(spans) == 3\n ot_span, dd_span, fetch_span = spans\n\n # confirm parenting\n assert ot_span.parent_id is None\n assert dd_span.parent_id == ot_span.span_id\n\n assert ot_span.service == \"mysql_svc\"\n assert ot_span.name == \"mysql_op\"\n\n assert_is_measured(dd_span)\n assert dd_span.service == \"mysql\"\n assert dd_span.name == \"mysql.query\"\n assert dd_span.span_type == \"sql\"\n assert dd_span.error == 0\n assert dd_span.get_metric(\"network.destination.port\") == 3306\n assert_dict_issuperset(\n dd_span.get_tags(),\n {\n \"out.host\": u\"127.0.0.1\",\n \"db.name\": u\"test\",\n \"db.system\": u\"mysql\",\n \"db.user\": u\"test\",\n \"component\": u\"mysqldb\",\n \"span.kind\": u\"client\",\n },\n )\n\n assert fetch_span.name == \"mysql.query.fetchall\"", "def test_basic(self):\n\n m = mapper(Order, orders, properties={\n 'description':deferred(orders.c.description)\n })\n\n o = Order()\n self.assert_(o.description is None)\n\n q = create_session().query(m)\n def go():\n l = q.all()\n o2 = l[2]\n print o2.description\n\n orderby = str(orders.default_order_by()[0].compile(bind=testing.db))\n self.assert_sql(testing.db, go, [\n (\"SELECT orders.order_id AS orders_order_id, orders.user_id AS orders_user_id, orders.isopen AS orders_isopen FROM orders ORDER BY %s\" % orderby, {}),\n (\"SELECT orders.description AS orders_description FROM orders WHERE orders.order_id = :param_1\", {'param_1':3})\n ])", "def test_execute_statement_2(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n statement = test_db_utils.domain_stmt(domain_data)\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertFalse(type_error)\n with self.subTest():\n self.assertFalse(value_error)", "def test_getitem_id_column(self):\n self.assertEqual(self.tester['emp_status'], 'EMP')", "def test_execute_transaction_1(self):\n result = find_domains.execute_transaction(self.connection)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 0)", "def test_get_record(self):\n pass", "def test_api_can_get_all_departments(self):\n res = self.client().get(service_url)\n self.assertEqual(res.status_code, 200)\n self.assertIn('dep 1', str(res.data))\n self.assertIn('dep 2', str(res.data))\n self.assertIn('dep 3', str(res.data))", "def test_sqlquery_resource_collection_get(self, login, web_request, context):\n service = SQLQueryService(context, web_request)\n response = service.collection_get()\n assert 'data' in response\n assert 'pagination' in response", "def test_department_list_view_does_not_require_login(self):\n\n FireDepartment.objects.create(name='Test db', population=0)\n c = Client()\n response = c.get('/departments')\n self.assertEqual(response.status_code, 200)", "def test_single_field_is_listified(self):\r\n ss = SelectStatement('table', 'field')\r\n self.assertEqual(ss.fields, ['field'])", "def get_employees_in_department(department_name: str) -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT Employee.empid, Employee.name\n FROM Employee JOIN EmployeeDepartments USING(empid)\n WHERE EmployeeDepartments.department = %s\"\"\"\n cur.execute(sql, (department_name,))\n\n # Attempt to fetch all rows\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n employees = []\n for row in result:\n employees.append(\n [row[0], row[1]]\n )\n cur.close()\n conn.close()\n return employees\n except Exception as e:\n print(\"ooo\")\n print(e)\n # If nothing was returned, return empty list\n cur.close()\n conn.close()\n return []\n\n # TODO Dummy Data - Change to be useful!\n # Return the employees in the department.\n # Each \"row\" has: [ empid, name ]\n\n # employees = [\n # [15905, 'Rea Fibbings'],\n # [9438, 'Julia Norville'],\n # [36020, 'Adora Lansdowne'],\n # [98809, 'Nathanial Farfoot'],\n # [58407, 'Lynne Smorthit'],\n # ]\n #\n # return employees", "def test_select_field():", "def test_calendar_query_partstat(self):\n raise SkipTest(\"test unimplemented\")", "def test_execute(self):\n pg_conn = PostgreSQL(*self.conn_params)\n sql = f'''CREATE TABLE table1 (id integer, column1 varchar(100),\n column2 float)'''\n pg_conn.execute(sql)\n sql = \"INSERT INTO table1 (id, column1, column2) \" \\\n \"VALUES (1, 'Varchar text (100 char)', 123456789.0123456789)\"\n pg_conn.execute(sql)\n result = pg_conn.execute(\"SELECT * FROM table1\")\n self.assertEqual('Varchar text (100 char)', result[0]['column1'][0])\n query = 'SELECT * FROM table1 ORDER BY id'\n result = pg_conn.execute(query)\n expected = 1\n current = len(result[0].index)\n self.assertEqual(expected, current)\n pg_conn.execute('DROP TABLE table1')", "def test_fetch_crime(self):\n assert isinstance(_tabular.fetch_crime_data(), \n pd.DataFrame)", "def test_function(self):\n\n s = select([users,\n (users.c.user_id * 2).label('concat'),\n func.count(addresses.c.address_id).label('count')],\n users.c.user_id == addresses.c.user_id,\n group_by=[c for c in users.c]).alias('myselect')\n\n mapper(User, s)\n sess = create_session()\n l = sess.query(User).all()\n for u in l:\n print \"User\", u.user_id, u.user_name, u.concat, u.count\n assert l[0].concat == l[0].user_id * 2 == 14\n assert l[1].concat == l[1].user_id * 2 == 16", "def test_query(config):\n\n p = PostgreSQLProvider(config)\n feature_collection = p.query()\n assert feature_collection.get('type', None) == 'FeatureCollection'\n features = feature_collection.get('features', None)\n assert features is not None\n feature = features[0]\n properties = feature.get('properties', None)\n assert properties is not None\n geometry = feature.get('geometry', None)\n assert geometry is not None", "def select(self, table, columns=['*'], condition='', orderby='', limit=0, isFetchAll=True):\n return True", "def _getData(self, entity, params):\n\n res = []\n entity_code = entity.code\n conn = self._connect(entity)\n try:\n conn.create_function(\"INLIST\", 2, self._inlist)\n\n conn.row_factory = sqlite3.Row\n cursor = conn.cursor()\n\n if not self.exists(entity_code, cursor):\n self.generate_entity(entity)\n\n my_departments = \"\"\n my_users = \"\"\n for column in entity.definition[\"columns\"]:\n if \"entityFilterByDepartment\" in column or column[\"type\"] == \"departmentSelector\":\n my_departments = self.getMyDepartments()\n if \"entityFilterByUser\" in column or column[\"type\"] == \"userSelector\":\n my_users = self.getMyUsers()\n\n # Create columnames for each column in entity metadata. Adding too related fields\n columnNames = \"A.id\"\n leftJoin = \"\"\n letter = \"B\"\n thisEntityHaveDepartmentFilter = False\n thisEntityHaveUserFilter = False\n for column in entity.definition[\"columns\"]:\n\n if column[\"type\"] in [\"numeric\", \"text\"]:\n columnNames += f\", A.[{column['field']}]\"\n\n elif column[\"type\"] == \"dateTime\":\n columnNames += f\", strftime('%Y-%m-%d',{column['field']}) as [{column['field']}]\"\n\n elif column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n columnNames += f\", A.[{column['field']}]\"\n columnNames += f\", {letter}.[{column['entityLabel']}] as {letter}_label\"\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {letter}.id = A.{column['field']} \"\n\n if \"entityFilterByDepartment\" in column:\n leftJoin += f' AND ( {letter}.departments is null or INLIST({letter}.departments,\"{my_departments}\") = 1 ) '\n if \"entityFilterByUser\" in column:\n leftJoin += f' AND ( {letter}.users is null or INLIST({letter}.users,\"{my_users}\") = 1 ) '\n\n letter = self.getNextLetter(letter)\n\n elif column[\"type\"] == \"departmentSelector\":\n columnNames += f\", A.[departments]\"\n thisEntityHaveDepartmentFilter = True\n\n elif column[\"type\"] == \"userSelector\":\n columnNames += f\", A.[users]\"\n thisEntityHaveUserFilter = True\n\n elif column[\"type\"] == \"relatedEntity\":\n columnNames += f\", {letter}.[{column['entityLabel']}] as {column.field}\"\n if \"relatedColumnRelation\" in column and column[\"relatedColumnRelation\"]:\n left_on = str(column['relatedColumnRelation']).replace(\n \"#entity#\", \"A\").replace(\"#relatedEntity#\", letter)\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {left_on} \"\n else:\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {letter}.id = A.{column['relatedForeignKey']} \"\n letter = self.getNextLetter(letter)\n\n sortBy = \"A.ID\"\n if \"sortBy\" in params and params[\"sortBy\"]:\n sortBy = f'A.{params[\"sortBy\"]}'\n elif \"sortBy\" in entity.definition and entity.definition[\"sortBy\"]:\n sortBy = f'A.{entity.definition[\"sortBy\"]}'\n where = \"\"\n letter = \"B\"\n\n if thisEntityHaveDepartmentFilter:\n where = f' WHERE ( A.departments is null or INLIST(A.departments,\"{my_departments}\") = 1 ) '\n if thisEntityHaveUserFilter:\n where = f' WHERE ( A.users is null or INLIST(A.users,\"{my_users}\") = 1 ) '\n\n # Add filter for group in related entities\n for column in entity.definition[\"columns\"]:\n if column[\"type\"] in [\"dropdown\", \"remoteDropdown\"] and (\"entityFilterByDepartment\" in column or \"entityFilterByUser\" in column):\n where += \" AND \" if where else \" WHERE \"\n where += f'A.{column[\"field\"]} is null or A.{column[\"field\"]} is not null and {letter}.id is not null '\n letter = self.getNextLetter(letter)\n\n param_list = tuple()\n if \"filters\" in params and params[\"filters\"] and len(params[\"filters\"]) > 0:\n for filter_item in params[\"filters\"]:\n if \"values\" in filter_item and filter_item[\"values\"] and len(filter_item[\"values\"]) > 0:\n if where == \"\":\n where = \" WHERE \"\n else:\n where += \" AND \"\n\n if \".\" in str(filter_item[\"field\"]):\n mm_entity = \"MM\" + str(filter_item[\"field\"]).split(\".\")[0]\n mm_field = str(filter_item[\"field\"]).split(\".\")[1]\n if len(filter_item[\"values\"]) == 1:\n where += f\" {mm_entity}.[{mm_field}] = ?\"\n param_list += (append(filter_item[\"values\"][0]),)\n else:\n where += f\" {mm_entity}.[{mm_field}] IN ({','.join( filter_item['values'])})\"\n\n leftJoin += f\" INNER JOIN [{filter_item['field'].split('.')[0]}] as {mm_entity} ON {mm_entity}.{filter_item['relatedManyToManyKey']} = A.id \"\n else:\n if len(filter_item[\"values\"]) == 1:\n if filter_item[\"useLike\"]:\n where += f\" A.[{filter_item['field']}] LIKE ?\"\n param_list += (f\"%{filter_item['values'][0]}%\",)\n else:\n where += f\" A.[{filter_item['field']}] = ?\"\n param_list += (filter_item[\"values\"][0],)\n else:\n if filter_item[\"useLike\"]:\n where += \" ( 1=2 \"\n for filter_value in filter_item[\"values\"]:\n if filter_value:\n where += f\" OR A.[{filter_item['field']}] LIKE ?\"\n param_list += (f\"%{filter_value}%\",)\n where += \" ) \"\n else:\n where += f\" A.[{filter_item['field']}] IN ({','.join( filter_item['values'])})\"\n\n # Add fixed condition\n if \"condition\" in entity.definition and entity.definition[\"condition\"]:\n if where == \"\":\n where = \" WHERE \"\n else:\n where += \" AND \"\n where += entity.definition[\"condition\"]\n\n sql = f\"SELECT {columnNames} FROM {entity_code} as A {leftJoin}\"\n if where != \"\":\n sql += where\n\n sql += f\" ORDER BY {sortBy}\"\n\n if \"fromReg\" in params and params[\"fromReg\"] > 0 and \"toReg\" in params and params[\"toReg\"] > 0:\n sql += F\" LIMIT {params['fromReg']-1}, {params['toReg']-params['fromReg']+1} \"\n\n cursor.execute(sql, param_list)\n for row in cursor:\n dic = {\"id\": row[\"id\"]}\n letter = \"B\"\n\n for column in entity.definition[\"columns\"]:\n\n if column[\"type\"] in [\"numeric\", \"text\", \"dateTime\", \"date\"]:\n dic[column[\"field\"]] = row[column[\"field\"]]\n elif column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n dic[column[\"field\"]] = f\"{row[column['field']]}|-|{row[f'{letter}_label']}\"\n letter = self.getNextLetter(letter)\n elif column[\"type\"] == \"departmentSelector\":\n dic[\"departments\"] = row[\"departments\"]\n elif column[\"type\"] == \"userSelector\":\n dic[\"users\"] = row[\"users\"]\n elif column[\"type\"] == \"relatedEntity\":\n dic[column[\"field\"]] = row[column[\"field\"]]\n letter = self.getNextLetter(letter)\n\n res.append(dic)\n\n finally:\n conn.close()\n\n return res", "def run_select_examples():\n table = \"actors\"\n select_fields = ['name', 'last_name', 'country']\n select_conds1 = {}\n select_conds2 = {'id': 3}\n select_conds3 = {'id': 3, 'name': \"Matt\"}\n print querify.select_from_dict(table, select_fields)\n print querify.select_from_dict(table, select_fields, select_conds1)\n print querify.select_from_dict(table, select_fields, select_conds2)\n print querify.select_from_dict(table, select_fields, select_conds3)", "def test_organization_get(self):\n name = 'spew'\n title = 'S.P.E.W'\n spew = models.Organization(name=name, title=title)\n db.session.add(spew)\n db.session.commit()\n # scenario 1: when neither name or buid are passed\n with self.assertRaises(TypeError):\n models.Organization.get()\n # scenario 2: when buid is passed\n buid = spew.buid\n get_by_buid = models.Organization.get(buid=buid)\n self.assertIsInstance(get_by_buid, models.Organization)\n assert title == get_by_buid.title\n # scenario 3: when username is passed\n get_by_name = models.Organization.get(name=name)\n self.assertIsInstance(get_by_name, models.Organization)\n assert title == get_by_name.title\n # scenario 4: when defercols is set to True\n get_by_name_with_defercols = models.Organization.get(name=name, defercols=True)\n self.assertIsInstance(get_by_name_with_defercols, models.Organization)\n assert title == get_by_name_with_defercols.title", "def test(self):\n\n # Easy: queries on multidimensional tables are not implemented yet!\n self.assertRaises(NotImplementedError, self.table.where, 'c_bool')", "def ddd():\n return get_data(db, MyTable)", "def test_execute(self):\n my_conn = MySQL(*self.conn_params)\n sql = f'''CREATE TABLE table1 (id integer, column1 varchar(100),\n column2 double)'''\n my_conn.execute(sql)\n table1 = my_conn.get_table('table1')\n self.assertEqual(table1.c.column1.name, 'column1')\n sql = \"INSERT INTO table1 (id, column1, column2) \" \\\n \"VALUES (1, 'Varchar text (100 char)', 123456789.0123456789)\"\n my_conn.execute(sql) # EXECUTE example\n # The select.columns parameter is not available in the method form of\n # select(), e.g. FromClause.select().\n # See https://docs.sqlalchemy.org/en/latest/core/selectable.html#\n # sqlalchemy.sql.expression.FromClause.select\n results = my_conn.engine.execute(\n select([table1.c.column1]).select_from(table1)).fetchall()\n expected = 'Varchar text (100 char)'\n current = results[0][0]\n # this returns a tuple inside a list and I dont know why\n self.assertEqual(expected, current)\n query = 'select * from table1 order by id'\n result = my_conn.execute(query)\n expected = 1\n current = len(result[0].index)\n self.assertEqual(expected, current)\n my_conn.drop('table1')", "def extract_data_from_DB(query, dao_object, *query_params):\n\n local_query = None\n\n if(len(query_params) == 0):\n local_query = query\n else:\n local_query = query % query_params\n\n #print(local_query)\n\n # Extract data\n #output_df = 0\n output_df = pd.DataFrame(dao_object.get(local_query))\n column_names = dao_object.get_column_name()\n output_df.columns = column_names\n\n return output_df", "def test_lookup_with_non_string_value(self):\n modeladmin = DepartmentFilterEmployeeAdmin(Employee, site)\n request = self.request_factory.get(\"/\", {\"department\": self.john.department.pk})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n\n queryset = changelist.get_queryset(request)\n\n self.assertEqual(list(queryset), [self.john])\n\n filterspec = changelist.get_filters(request)[0][-1]\n self.assertEqual(filterspec.title, \"department\")\n choices = list(filterspec.choices(changelist))\n self.assertEqual(choices[1][\"display\"], \"DEV\")\n self.assertIs(choices[1][\"selected\"], True)\n self.assertEqual(\n choices[1][\"query_string\"], \"?department=%s\" % self.john.department.pk\n )", "def test_data_dept_user(self):\n url = '/api/options/?list=dept_user'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # User 1 will be present in the response.\n self.assertContains(response, self.user1.email)\n # Make a user inactive to test excludion\n self.user1.active = False\n self.user1.save()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # User 1 won't be present in the response.\n self.assertNotContains(response, self.user1.email)", "def test_execute_transaction_2(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n statement = test_db_utils.domain_stmt(domain_data)\n statements = [statement]\n result = find_domains.execute_transaction(self.connection, statements)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)", "def test_query_wrapper_operational_error(self):\n\n _session = self.sessionmaker()\n\n _session.begin()\n self.addCleanup(_session.rollback)\n q = _session.query(self.Foo).filter(\n self.Foo.counter == sqla.func.imfake(123))\n matched = self.assertRaises(sqla.exc.OperationalError, q.all)\n self.assertIn(\"no such function\", str(matched))", "def test_table_false_positives(self):\n pass", "def testQueryColumns(self):\n scaffolder = plaso_sqlite.PlasoSQLiteScaffolder()\n test_string = (\n 'SELECT foobar as Foo, foobar.dot, random, reallylong AS long FROM '\n 'foobarengine WHERE foobar = 1')\n expected_columns = set(['foo', 'dot', 'random', 'long'])\n self._RunQueryTests(scaffolder, test_string, expected_columns)\n\n test_string = (\n 'select one, two as three, four as five, f.eight as EIGHTE FROM '\n 'foobar f, scode s WHERE f.id = s.id ORDER BY one')\n expected_columns = set(['one', 'three', 'five', 'eighte'])\n self._RunQueryTests(scaffolder, test_string, expected_columns)\n\n test_string = (\n 'this should not produce anything...')\n self._RunQueryTests(scaffolder, test_string, set())", "def test_iterating_query_with_arguments(self):\n with Database(connstr) as db:\n for row in db.query(\"\"\"select i, dc from test where i = %s or i = %s\"\"\", 2, 3):\n\n drow = row.as_dict\n i, dc = drow['i'], drow['dc']\n assert len(row) == 2\n assert dc == Decimal('0.{}'.format(i))\n assert repr(row) == '<Row {\"dc\": \"%s\", \"i\": %s}>' % (dc, i)", "def testQuery(self):\n # Clear anything first\n for i in range(10):\n row_name = \"aff4:/row:%s\" % i\n data_store.DB.Set(row_name, \"metadata:%s\" % i, str(i), timestamp=5,\n token=self.token)\n data_store.DB.Set(row_name, \"aff4:type\", \"test\", token=self.token)\n\n # Retrieve all subjects with metadata:5 set:\n rows = [row for row in data_store.DB.Query(\n [\"metadata:5\"], data_store.DB.filter.HasPredicateFilter(\"metadata:5\"),\n subject_prefix=\"aff4:/row:\", token=self.token)]\n\n self.assertEqual(len(rows), 1)\n self.assertEqual(rows[0][\"subject\"][0][0], \"aff4:/row:5\")\n self.assertEqual(rows[0][\"metadata:5\"][0][0], \"5\")\n self.assertEqual(rows[0][\"metadata:5\"][0][1], 5)", "def test_fetch_one():\n sample_uuid = get_sample_id()\n response = requests.get(f'http://localhost:5000/api/persons/{sample_uuid}')\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data", "def test_rowcount_select(cursor: pyodbc.Cursor):\n cursor.execute(\"create table t1(i int)\")\n count = 4\n for i in range(count):\n cursor.execute(\"insert into t1 values (?)\", i)\n cursor.execute(\"select * from t1\")\n assert cursor.rowcount == -1\n\n rows = cursor.fetchall()\n assert len(rows) == count\n assert cursor.rowcount == -1", "def test_result_query_no_run_id_exception(cbcsdk_mock):\n api = cbcsdk_mock.api\n result_query = api.select(Result)\n # raise ApiError when missing run_id (from the select statement)\n with pytest.raises(ApiError):\n result_query._count()\n assert result_query._run_id is None\n results = None\n with pytest.raises(ApiError):\n results = [res for res in result_query._perform_query()]\n assert results is None", "def test_table_has_no_rows(self):\n models.SourceDataset.objects.all().delete()\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertEqual(len(table.rows), 0)", "def mock_datatable_get(self, request):\n\n data = {\"rows\": DTResilientMock.get_datatable_rows(\n self.mock_data_table_rows)}\n\n return create_response(request,\n status_code=200,\n content=b(dumps(data)))", "def test_organization_all(self):\n gryffindor = models.Organization(name='gryffindor')\n ravenclaw = models.Organization(name='ravenclaw')\n db.session.add(gryffindor)\n db.session.add(ravenclaw)\n db.session.commit()\n # scenario 1: when neither buids nor names are given\n self.assertEqual(models.Organization.all(), [])\n # scenario 2: when buids are passed\n orglist = [gryffindor, ravenclaw]\n orgids = [gryffindor.buid, ravenclaw.buid]\n all_by_buids = models.Organization.all(buids=orgids)\n self.assertIsInstance(all_by_buids, list)\n self.assertCountEqual(all_by_buids, orglist)\n # scenario 3: when org names are passed\n names = [gryffindor.name, ravenclaw.name]\n all_by_names = models.Organization.all(names=names)\n self.assertIsInstance(all_by_names, list)\n self.assertCountEqual(all_by_names, orglist)\n # scenario 4: when defercols is set to True for names\n all_by_names_with_defercols = models.Organization.all(names=names)\n self.assertIsInstance(all_by_names_with_defercols, list)\n self.assertCountEqual(all_by_names_with_defercols, orglist)\n # scenario 5: when defercols is set to True for buids\n all_by_buids_with_defercols = models.Organization.all(buids=orgids)\n self.assertIsInstance(all_by_buids_with_defercols, list)\n self.assertCountEqual(all_by_buids_with_defercols, orglist)", "def test_retrieve_l_organization(self):\n pass", "def test_fetch(self):\n\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('_id')\n\n _id = IDField()\n name = StringField()\n age = IntField()\n\n foos = [{\n '_id': 'id_0',\n 'name': 'Bill',\n 'age': 10,\n }, {\n '_id': 'id_1',\n 'name': 'John',\n 'age': 30\n }, {\n '_id': 'id_2',\n 'name': 'Mary',\n 'age': 20\n }, {\n '_id': 'id_3',\n 'name': 'Tommy',\n 'age': 40\n }]\n db.foos.insert_many(foos)\n\n r = Foo.fetch({})\n self.assertEqual(r.total, 4)\n self.assertItemsEqual([f.name for f in r], [f['name'] for f in foos])\n\n r = Foo.fetch({'_id': 'id_2'})\n self.assertEqual(r.total, 1)\n self.assertEqual(r[0]._id, 'id_2')\n self.assertEqual(r[0].name, 'Mary')\n self.assertEqual(r[0].age, 20)\n\n r = Foo.fetch({'age': {'$gt': 20}})\n self.assertEqual(r.total, 2)\n self.assertTrue(r[0].age > 20)\n self.assertTrue(r[1].age > 20)\n\n r = Foo.fetch({'name': 'John'})\n self.assertEqual(r.total, 1)\n self.assertEqual(r[0].name, 'John')", "def test_no_results(self):\n self.mocked_cursor.description = None\n\n db = database.Database()\n result = db.execute(sql=\"SELECT * from FOO WHERE bar LIKE 'baz'\")\n self.assertTrue(isinstance(result, list))", "def test_entity_query_initialization(self, test_domain):\n dao = test_domain.repository_for(Person)._dao\n query = dao.query\n\n assert query is not None\n assert isinstance(query, QuerySet)\n assert query._criteria == QuerySet(dao, test_domain, Person)._criteria", "def test_employee_model(self):\n self.assertEqual(Employee.query.count(), 2)", "def test_execute_statement_7(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % domain\")\n domain_data[\"Description\"] = description\n statement = test_db_utils.domain_stmt(domain_data)\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n domain_table_results = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(result, 1)\n with self.subTest():\n self.assertFalse(ERROR_MSG in msg)\n with self.subTest():\n self.assertTrue(type_error)\n with self.subTest():\n self.assertFalse(value_error)", "def get_departments() -> list:\n return Department.query.all()", "def test_value_query(self):\n conn_object = ParentConnection()\n conn_object.create_tables()\n biom_query = \"INSERT INTO bioms (studyID,tax_num,sample_num) \" \\\n \"VALUES (%s,%s,%s)\"\n conn_object.value_query(biom_query, values=(\"test\", 300, 200))\n conn = psycopg2.connect(**{\"host\": \"localhost\",\n \"database\": \"test\",\n \"user\": \"test\",\n \"password\": \"test\"})\n cur = conn.cursor()\n cur.execute(\"SELECT studyID \"\n \"FROM bioms \"\n \"LIMIT 1;\")\n result = cur.fetchall()\n self.assertEqual(result[0][0], 'test')\n cur.close()\n conn.close()\n conn_object.delete_tables()", "def test_fetchall(self):\n result = export.processExport(houseId=1)\n #We should have 2 locations * 1 sensor * 10 days of data here\n # 2 * 1 * (288 * 10) == 5670\n #print result.shape\n\n #result.to_csv(\"temp.csv\")\n #Do we get the right object\n self.assertEqual(type(result), pandas.DataFrame)\n #And is it the right size\n self.assertEqual(result.shape, (2880, 2)) #So 2880 samples from two sensors\n #And the right range of data\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 01))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 10, 23, 55))", "def test_get_df(mocker):\n spy_load_metadata = mocker.spy(MetaData, 'load_document')\n expected_df = pd.read_json('tests/odata/fixtures/records.json', orient='records')\n\n provider = ODataConnector(\n name='test',\n baseroute='http://services.odata.org/V4/Northwind/Northwind.svc/',\n auth={'type': 'basic', 'args': ['u', 'p']},\n )\n\n data_source = ODataDataSource(\n domain='test',\n name='test',\n entity='Orders',\n query={\n '$filter': \"ShipCountry eq 'France'\",\n '$orderby': 'Freight desc',\n '$skip': 50,\n '$top': 3,\n },\n )\n\n try:\n df = provider.get_df(data_source)\n sl = ['CustomerID', 'EmployeeID', 'Freight']\n assert df[sl].equals(expected_df[sl])\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')\n\n assert spy_load_metadata.call_count == 1\n args, _ = spy_load_metadata.call_args\n assert args[0].url.endswith('/$metadata')\n\n provider.auth = None\n try:\n provider.get_df(data_source)\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')", "def test_basic(self):\n m = mapper(User, users, properties = dict(\n addresses = relation(mapper(Address, addresses), lazy=None)\n ))\n q = create_session().query(m)\n l = [None]\n def go():\n x = q.filter(users.c.user_id == 7).all()\n x[0].addresses\n l[0] = x\n self.assert_sql_count(testing.db, go, 1)\n\n self.assert_result(l[0], User,\n {'user_id' : 7, 'addresses' : (Address, [])},\n )", "def test_select(self):\n db=Database(\"test.db\")\n db.query(\"insert into game (user_a, user_b, winner, board) values('a', 'b', 'sinner', 'asdf');\");\n items=db.query(\"select * from game\")\n for item in items:\n if item[\"user_a\"]==\"a\":\n self.assertEqual(True, True)\n return\n self.assertEqual(False, True)", "async def test_column_names(database_url, select_query):\n async with Database(database_url) as database:\n async with database.transaction(force_rollback=True):\n # insert values\n query = notes.insert()\n values = {\"text\": \"example1\", \"completed\": True}\n await database.execute(query, values)\n # fetch results\n results = await database.fetch_all(query=select_query)\n assert len(results) == 1\n\n assert sorted(results[0].keys()) == [\"completed\", \"id\", \"text\"]\n assert results[0][\"text\"] == \"example1\"\n assert results[0][\"completed\"] == True", "def get_all_departments(self):\n sql = 'SELECT name FROM department'\n try:\n self.control.execute(sql)\n except Error:\n print(f\"[X] {Error}\")\n return 1\n answer = {}\n counter = 1\n for i in self.control.fetchall():\n answer.update(\n {\n counter: str(i[0]).replace(\"'\", \"\")\n }\n )\n counter += 1\n return answer", "def test_get_rows_to_compute_direction_query(self):\n\n args = (\n 'source',\n 'signal',\n 'geo_type',\n 'time_value',\n 'geo_value',\n )\n mock_connector = MagicMock()\n database = Database()\n database.connect(connector_impl=mock_connector)\n\n result = database.get_rows_to_compute_direction(*args)\n\n self.assertIsInstance(result, list)\n\n connection = mock_connector.connect()\n cursor = connection.cursor()\n self.assertTrue(cursor.execute.called)\n\n sql, args = cursor.execute.call_args[0]\n expected_args = (\n 'time_value',\n 'source',\n 'signal',\n 'geo_type',\n 'geo_value',\n 'time_value',\n )\n self.assertEqual(args, expected_args)\n\n sql = sql.lower()\n self.assertIn('select', sql)\n self.assertIn('`covidcast`', sql)\n self.assertIn('datediff', sql)", "def __select(self, fields, tables, conditions, values, order):\n\n start_table = 0\n end_table = -1\n for i in range(0, len(tables)):\n if tables[i] == \"Varieties\":\n tables.pop(i)\n for j in range(0, len(conditions)):\n if conditions[j].startswith(\"resource_id\"):\n index = self._match_variety_table(values[j])\n if conditions[j].endswith(\" =\"):\n start_table = index\n end_table = index + 1\n elif conditions[j].endswith(\" <\") or conditions[j].endswith(\n \" <=\"\n ):\n end_table = index + 1\n elif conditions[j].endswith(\" >\") or conditions[j].endswith(\n \" >=\"\n ):\n start_table = index\n\n tables.extend(self.variety_tables[start_table:end_table])\n\n request = \"SELECT {fields} FROM {tables}{conditions}\"\n\n if conditions:\n cond_list = \" WHERE \"\n for index, cond in enumerate(conditions):\n cond_list += \"(\" + cond\n if values[index] == \"NULL\":\n cond_list += \" IS %s)\"\n values[index] = None\n elif values[index] == \"NOT NULL\":\n cond_list += \" IS NOT %s)\"\n values[index] = None\n else:\n cond_list += \" %s)\"\n if index < len(conditions) - 1:\n cond_list += \" AND \"\n else:\n cond_list = \"\"\n\n for table in tables:\n end_request = \" UNION \".join(\n [\n request.format(\n fields=\", \".join(fields),\n tables=table,\n conditions=cond_list,\n )\n for table in tables\n ]\n )\n if order:\n ord_list = \" ORDER BY {0}\".format(\", \".join(order))\n\n end_request = end_request + ord_list\n\n cursor = self.conn.cursor(dictionary=True)\n results = []\n _logger.debug(\"%r, %r\" % (end_request, values * len(tables)))\n try:\n if values:\n cursor.execute(end_request, tuple(values * len(tables)))\n else:\n cursor.execute(end_request)\n except Exception as error:\n _logger.exception(str(error))\n else:\n for row in cursor.fetchall():\n result = {}\n for key in row.keys():\n result[key] = row[key]\n if row[key] == \"NULL\":\n result[key] = None\n results.append(result)\n\n aux_results = copy(results)\n for i in range(0, len(aux_results)):\n aux_results[i].pop(\"hash\", None)\n aux_results[i].pop(\"content\", None)\n\n _logger.debug(\n \"SELECT REQUEST ON {0} OK. Results: {1}\".format(\n \", \".join(tables), aux_results\n )\n )\n cursor.close()\n return results", "def test_execute_statement_10(self):\n domain_data = test_data_utils.get_trixie_domain_data()\n # \"Description\": \"ParB-like nuclease domain\"\n description = domain_data[\"Description\"]\n description = description.replace(\"nuclease domain\", \"nuclease % wdomain\")\n domain_data[\"Description\"] = description\n statement = test_db_utils.domain_stmt(domain_data)\n statement = statement.replace(\"%\", \"%%\")\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n domain_table_results = test_db_utils.get_data(\n test_db_utils.domain_table_query)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertFalse(type_error)\n with self.subTest():\n self.assertFalse(value_error)", "def test_retrieve_l_organizations(self):\n pass", "def test_data_org_unit(self):\n url = '/api/options/?list=org_unit'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Org unit names will be present in the response.\n self.assertContains(response, self.dept.name)\n self.assertContains(response, self.div1.name)\n self.assertContains(response, self.div2.name)", "def test_count_all_rows_query(self):\n\n mock_connector = MagicMock()\n database = Database()\n database.connect(connector_impl=mock_connector)\n connection = mock_connector.connect()\n cursor = connection.cursor()\n cursor.__iter__.return_value = [(123,)]\n\n num = database.count_all_rows()\n\n self.assertEqual(num, 123)\n self.assertTrue(cursor.execute.called)\n\n sql = cursor.execute.call_args[0][0].lower()\n self.assertIn('select count(1)', sql)\n self.assertIn('from `covidcast`', sql)", "def query(self, sql):\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n return result", "def select_from_DB (itemToSearch, tableWhereToSearch):\n session = open_session()\n s = select([itemToSearch.tableWhereToSearch])\n result = session.execute(s)\n for row in result:\n print(row)", "def test_execute_statement_1(self):\n gene_table_results1 = test_db_utils.get_data(test_db_utils.gene_table_query)\n statement = get_gene_update_statement(1, TRIXIE_GENEID)\n results_tup = find_domains.execute_statement(self.connection, statement)\n result = results_tup[0]\n type_error = results_tup[1]\n value_error = results_tup[2]\n msg = results_tup[3]\n self.trans.commit()\n phage_table_results = test_db_utils.get_data(test_db_utils.phage_table_query)\n gene_table_results2 = test_db_utils.get_data(test_db_utils.gene_table_query)\n gene_domain_table_results = test_db_utils.get_data(test_db_utils.gene_domain_table_query)\n domain_table_results = test_db_utils.get_data(test_db_utils.domain_table_query)\n domain_status1 = gene_table_results1[0][\"DomainStatus\"]\n domain_status2 = gene_table_results2[0][\"DomainStatus\"]\n with self.subTest():\n self.assertEqual(len(phage_table_results), 1)\n with self.subTest():\n self.assertEqual(len(gene_table_results2), 1)\n with self.subTest():\n self.assertEqual(len(domain_table_results), 0)\n with self.subTest():\n self.assertEqual(len(gene_domain_table_results), 0)\n with self.subTest():\n self.assertEqual(domain_status1, 0)\n with self.subTest():\n self.assertEqual(domain_status2, 1)\n with self.subTest():\n self.assertEqual(result, 0)\n with self.subTest():\n self.assertFalse(type_error)\n with self.subTest():\n self.assertFalse(value_error)\n with self.subTest():\n self.assertIsInstance(msg, str)", "def test_scalar_attribute() -> None:\n expected_column_val = \"test\"\n mock_session = UnifiedAlchemyMagicMock(\n data=[\n (\n [\n mock.call.query(Model.name),\n mock.call.filter(Model.pk1 == 3),\n ],\n [(expected_column_val,)],\n )\n ]\n )\n data_column = mock_session.query(Model.name).filter(Model.pk1 == 3).scalar()\n assert expected_column_val == data_column", "def test_get_table_description(self):\n db_introspection = DatabaseIntrospection(self.connection)\n cursor = mock.MagicMock()\n\n def description(*args, **kwargs):\n return [[\"name\", TypeCode.STRING], [\"age\", TypeCode.INT64]]\n\n def get_table_column_schema(*args, **kwargs):\n column_details = {}\n column_details[\"name\"] = ColumnDetails(\n null_ok=False, spanner_type=\"STRING(10)\"\n )\n column_details[\"age\"] = ColumnDetails(\n null_ok=True, spanner_type=\"INT64\"\n )\n return column_details\n\n cursor.get_table_column_schema = get_table_column_schema\n cursor.description = description()\n table_description = db_introspection.get_table_description(\n cursor=cursor, table_name=\"Table_1\"\n )\n if USING_DJANGO_3:\n self.assertEqual(\n table_description,\n [\n FieldInfo(\n name=\"name\",\n type_code=TypeCode.STRING,\n display_size=None,\n internal_size=10,\n precision=None,\n scale=None,\n null_ok=False,\n default=None,\n collation=None,\n ),\n FieldInfo(\n name=\"age\",\n type_code=TypeCode.INT64,\n display_size=None,\n internal_size=None,\n precision=None,\n scale=None,\n null_ok=True,\n default=None,\n collation=None,\n ),\n ],\n )\n else:\n self.assertEqual(\n table_description,\n [\n FieldInfo(\n name=\"name\",\n type_code=TypeCode.STRING,\n display_size=None,\n internal_size=10,\n precision=None,\n scale=None,\n null_ok=False,\n default=None,\n ),\n FieldInfo(\n name=\"age\",\n type_code=TypeCode.INT64,\n display_size=None,\n internal_size=None,\n precision=None,\n scale=None,\n null_ok=True,\n default=None,\n ),\n ],\n )" ]
[ "0.6264874", "0.6200702", "0.61946857", "0.60861903", "0.605207", "0.60488313", "0.6044188", "0.6007789", "0.58853686", "0.5846672", "0.5814985", "0.5803768", "0.5793906", "0.5727903", "0.5715468", "0.5707922", "0.57078373", "0.570377", "0.56463915", "0.5636926", "0.5601166", "0.5573394", "0.55727124", "0.555254", "0.5552519", "0.5551694", "0.5546026", "0.5540913", "0.5539858", "0.55334187", "0.55328727", "0.5529244", "0.55290186", "0.5527385", "0.5504175", "0.55022913", "0.55019575", "0.54937166", "0.5482038", "0.54770905", "0.5474473", "0.54692274", "0.5467923", "0.5465868", "0.54402643", "0.5435337", "0.54177445", "0.5408411", "0.5408038", "0.5406473", "0.53925747", "0.5390134", "0.53885657", "0.5384256", "0.5383007", "0.53690296", "0.53630185", "0.5360358", "0.53592515", "0.5353378", "0.53449166", "0.53395665", "0.5326418", "0.5325922", "0.5318409", "0.5309808", "0.5305753", "0.53052", "0.5299189", "0.5278259", "0.5277863", "0.52749485", "0.52749413", "0.527112", "0.5267187", "0.5254789", "0.5243725", "0.52427596", "0.5240139", "0.52396107", "0.523955", "0.5239042", "0.5233371", "0.5231557", "0.52314216", "0.52292216", "0.52238864", "0.52238363", "0.5218909", "0.52175343", "0.52169025", "0.52168673", "0.5216239", "0.5215736", "0.5213128", "0.52129644", "0.52081877", "0.5206341", "0.5204767", "0.52033323", "0.52026707" ]
0.0
-1
Helper method to get baseline file.
def get_test_baseline(self, file_name): return os.path.abspath( os.path.join( os.path.abspath(__file__), u'..', u'baselines', file_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _GetBaseline(self, filename, directory, upstream_only = False):\n\n local_filename = os.path.join(directory, filename)\n local_directory = local_filename[:local_filename.rfind(\"/\")]\n if upstream_only:\n last_index = local_filename.rfind(\".\")\n if last_index > -1:\n local_filename = (local_filename[:last_index] +\n UPSTREAM_IMAGE_FILE_ENDING)\n\n download_file_modifiers = \"\"\n if local_filename.endswith(\".png\"):\n download_file_modifiers = \"b\" # binary file\n\n if not self.dont_download:\n CreateDirectory(local_directory)\n\n local_baseline = None\n url_of_baseline = None\n\n if self.use_local_baselines:\n test_path_key = self._NormalizeBaselineIdentifier(filename)\n dict = self.baseline_dict\n if upstream_only:\n dict = self.webkit_baseline_dict\n if test_path_key in dict:\n local_baseline = dict[test_path_key]\n url_of_baseline = local_baseline\n shutil.copy(local_baseline, local_directory)\n elif self.verbose:\n print \"Baseline %s does not exist in the index.\" % test_path_key\n else:\n index = 0\n possible_files = self._GetPossibleFileList(filename, upstream_only)\n # Download the baselines from the webkit.org site.\n while local_baseline == None and index < len(possible_files):\n local_baseline = self._DownloadFile(possible_files[index],\n local_filename,\n download_file_modifiers,\n True)\n if local_baseline:\n url_of_baseline = possible_files[index]\n index += 1\n\n if not local_baseline:\n if self.verbose:\n print \"Could not find any baseline for %s\" % filename\n else:\n local_baseline = os.path.normpath(local_baseline)\n if local_baseline and self.verbose:\n print \"Found baseline: %s\" % url_of_baseline\n\n return BaselineCandidate(local_baseline, url_of_baseline)", "def baseline(self):\n return self.data[self.data['treatment'] == 'Baseline']", "def read_base_test(base_file):\n with open(base_file) as f:\n contents = f.read()\n return contents", "def findBaseline(filename, projectSource):\n status = False \n filePath = checkPath(filename, projectSource)\n baseFileRead = open(filePath, \"r\") \n for line in baseFileRead.readlines():\n if re.search(\"Objects \\*{25}\", line) != None:\n status = True\n if status == False:\n sys.stderr.write(\"Warning: Expected Base file content not found\")\n baseFileRead.close()\n return filePath", "def _load_baseline(lang: str='en', model_name_or_path: Optional[str]=None, baseline_path: Optional[str]=None, baseline_url: Optional[str]=None) ->Optional[Tensor]:\n if baseline_path:\n baseline: Optional[Tensor] = _read_csv_from_local_file(baseline_path)\n elif baseline_url:\n baseline = _read_csv_from_url(baseline_url)\n elif lang and model_name_or_path:\n _URL_BASE = 'https://raw.githubusercontent.com/Tiiiger/bert_score/master/bert_score/rescale_baseline'\n baseline_url = f'{_URL_BASE}/{lang}/{model_name_or_path}.tsv'\n baseline = _read_csv_from_url(baseline_url)\n else:\n baseline = None\n warn('Baseline was not successfully loaded. No baseline is going to be used.')\n return baseline", "def GetBaseFile(self, filename):\r\n\r\n raise NotImplementedError(\r\n \"abstract method -- subclass %s must override\" % self.__class__)", "def GetBaseFile(self, filename):\n\n raise NotImplementedError(\n \"abstract method -- subclass %s must override\" % self.__class__)", "def _read_baseline(self, path):\n base_rmsd = dict()\n fin = open(path,'r')\n for line in fin:\n if line == '\\s' or line == '' or line == '\\n':\n continue\n k, v = line.split()\n base_rmsd[k.strip()] = float(v.strip())\n return base_rmsd", "def bbl_file(self, base_file):\n bbl_path = os.path.abspath(os.path.splitext(base_file)[0]) + '.bbl'\n return self.open_encode_safe(bbl_path).readlines()", "def get_baseline_output_id(self) -> int:\n pass", "def base_filename(self):\n return self.filename.split('.')[0]", "def __file__(self):\n\t\treturn __file__", "def __file__(self):\n\t\treturn __file__", "def __file__(self):\n\t\treturn __file__", "def getBaseSrcFile(self) -> List[int]:\n ...", "def __file__(self):\n return __file__", "def baseline(self):\n if getattr(self, \"_baseline\", None) is None:\n self._baseline = (self.container.height - 1) / 2\n return self._baseline", "def base(self):\n return os.path.basename(self.path)", "def _getfilename(self):\n pass", "def get_iaq_baseline(self) -> List[int]:\n # name, command, signals, delay\n return self._run_profile((\"iaq_get_baseline\", [0x20, 0x15], 2, 0.01))", "def _getBaselineThresh(self):\n print('Calculating 10% baseline')\n self.baseline = obrienBaseline.obrienBaseline(\n self.d['dos1rate'], timeWidth=5.0, \n cadence=0.1)\n self.peak_std = ( (self.d['dos1rate'][self.peakInd]/10 - \n self.baseline[self.peakInd]/10)/ \n np.sqrt(self.d['dos1rate'][self.peakInd]/10))\n return", "def baseline_TVOC(self) -> int:\n return self.get_iaq_baseline()[1]", "def get_base_path(self) -> str:\n raise NotImplementedError()", "def get_source_file(self):\n return self.get_attribute(\"source_file\")", "def get_inifile(self):\n return self.inifile", "def get_ap_file(self):\n with open(self.trendfile, 'r') as readfile:\n data = json.load(readfile)\n return data['trendtable']", "def _get_filename():\n dirname = os.path.dirname(__file__)\n return os.path.join(dirname, 'occulttraining.txt')", "def __init__(self, filepath, baseline_name=BASELINE_FILE_NAME,\n filename=FILE_NAME, sway_name=FILE_NAME_S):\n self.filepath = filepath\n self.baseline_name = baseline_name\n self.filename = filename\n self.sway_name = sway_name\n self.XSCALE = 22.5\n self.YSCALE = 13.\n self.lim_X = 20\n self.lim_Y = 20\n self.get_baseline_points()", "def test_get_result_top_file(self):\n pass", "def getCurrentFilePath(self):\n return os.path.abspath(self.filePath)", "def GetRerunContextFile(self):\n if not self.prev_test_context or not self.prev_test_context.test_resources:\n return None\n return self.prev_test_context.test_resources[0]", "def get_root_filename(self):\n pass", "def create_baseline_json(baseline_json_file):\n # TODO: Update this... Technically shouldn't need to sort it again since it was sorted when written to the results\n # directory\n baseline_json_file = baseline_json_file.replace(' ', '')\n if 'json' not in baseline_json_file:\n baseline_json_file += \".json\"\n with open(baseline_json_file, 'r') as json_file:\n try:\n json_object = json.load(json_file)\n except:\n print(\"Failed to open {}\".format(json_file))\n return\n\n # cmd = \"cat output/results/\" + baseline_json_file + \" | jq --sort-keys 'if .sysmlid? != null then sort_by(.sysmlId)? else . end' > output/baseline/\" + baseline_json_file.replace(\n # '_orig', '').replace('.json', '') + \"_sorted.json\"\n # print(commands.getoutput(cmd))", "def async_baseline(kwargs):\n # we can't pickle our objects for remote works so we pickle the raw request\n # and then load it here.\n data = baseline_input_schema.load(kwargs).data\n return baseline(**data)", "def tests_ti_file_get(self):\n super().indicator_get()", "def base():\n print(CFG.base.path)", "def getProgramFile(self) -> java.io.File:\n ...", "def base_path(self):\n return self._base_path", "def expected_output(self):\n expected_output_file = path.splitext(self.source_name)[0] + \".expected\"\n if not path.exists(expected_output_file):\n return None\n else:\n with open(expected_output_file, \"r\", encoding=\"utf8\") as f:\n return f.read()", "def getBase(self):\n return self.base", "def getBase(self):\n return self.base", "def getBase(self):\n return self.base", "def file(self):\r\n return self._get_instantiation()[0]", "def organise_baseline_data(self):\n self.baseline_data = {}\n for injkey in self.data_sets.keys():\n data = {}\n baseline_result = self.data_sets[injkey].pop('full_syst_baseline')\n datakey = baseline_result.keys()[0]\n baseline_data = self.systtest_fit_extract(\n fit_data=baseline_result[datakey],\n datakey=datakey,\n labels=self.labels[injkey]['full_syst_baseline'].dict\n )\n self.baseline_data[injkey] = baseline_data", "def __getBaselineList(self):\n\n # cumulative baseline selections do not reflect on the msselectedindices()\n if self._msTool is None:\n self.__selectMS()\n\n \n # If there are any previous antenna selections, use it\n if self._arg['antenna'] != '':\n baselineSelection = {'baseline':self._arg['antenna']}\n try:\n self._msTool.msselect(baselineSelection, onlyparse=False)\n # IMPORTANT: msselectedindices() will always say there are auto-correlation\n # baselines, even when there aren't. In the MMS case, the SubMS creation will\n # issue a MSSelectionNullSelection and not be created. \n baselinelist = self._msTool.msselectedindices()['baselines']\n except:\n baselinelist = []\n else:\n md = msmdtool()\n md.open(self._arg['vis'])\n baselines = md.baselines()\n md.close()\n import numpy as np\n baselinelist = np.vstack(np.where(np.triu(baselines))).T \n \n\n return baselinelist.tolist()", "def base_path(self):\n return self.setup.base_path", "def test_get_infile(self):\r\n pass # not practically testable, but obvious file I/O\r", "def get_file(self):\n return self.theFile", "def createBaseLine(arguments): \n projectSource, projectName = \"\", \"\"\n projectSource, projectName = checkOS(arguments)\n testTempFile = tempfile.TemporaryFile()\n outputFile_name = \"RUN_\" + projectName + \"-planner_g_rt.\" + projectName + \"-initial-state.nddl.PlannerConfig.xml.output\"\n outputFile_path = search_file(outputFile_name, projectSource)\n if outputFile_path == None:\n sys.stderr.write(\"Error: file does not exist try running make in \" + projectSource)\n sys.exit(1)\n filePath = checkPath(outputFile_path, projectSource) \n parsePlanOutput(filePath, testTempFile)\n baseFile_name = os.path.join(projectSource, projectName + \"_Base.output\")\n baseFile = open(baseFile_name, \"w\")\n testTempFile.seek(0)\n for line in testTempFile.readlines():\n baseFile.write(line)\n baseFile.close()", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def getInFile( self ):\n return self.file", "def get_file(self) -> int:\r\n return self.file", "def baseline(path = \"\"):\n model = create_baseline(48, 0.5, None, None)\n layer_name=[]\n files = os.listdir(path)\n\n for layer in model.layers:\n check = path + \"/\" + layer.name + \".png\"\n if check in [path+'/'+f for f in files]:\n if 'conv' in layer.name and (not 'bn' in layer.name) and (not 'pad' in layer.name) and (not 'relu' in layer.name):\n layer_name.append([check,[layer.name ,str(layer.kernel_size[0]) + ' x ' + str(layer.kernel_size[1]), '-']])\n else:\n layer_name.append([check,[layer.name,'-', '-']])\n return layer_name", "def test_get_filepath(self):\r\n filepath = self.profile.get_filepath('testing.db')\r\n self.assertTrue(filepath.startswith(self.profile_path))", "def sample_file(self) -> str:\n return self._sample_file", "def get_resultfile(self):\r\n return self._resultfile", "def file(self):\n return self.__file", "def test_kyc_get_file(self):\n pass", "def getCurrentFileName(self):\n return os.path.basename(self.filePath)", "def baselineFrames(self):\n frames=[]\n for tag,T1,T2 in [x for x in self.tags if x[0]=='baseline']:\n for i,timePoint in enumerate(self.conf['times']):\n if timePoint>=T1*60 and timePoint<=T2*60:\n frames.append(i)\n return frames\n else:\n return [0]", "def baseline(ant_ID1, ant_ID2):\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]", "def all_baselines():\n for i in range(len(active_ants)):\n ID1 = active_ants[i]\n for j in range(i + 1, len(active_ants[i + 1:])):\n ID2 = active_ants[j]\n print(\"Baseline between antennae \" + str(ID1) + \\\n \" and \" + str(ID2) + \" = \" + str(ant.baseline(ID1, ID2)))", "def GetResultFile(self):\n\n file_path = self.configfile.map['ResultFilePath']\n\n # Check if several entrie\n if file_path is not None:\n if len(file_path) > 1:\n warning(\n 'Many path for the result file are setted ({}), I will take the first one'\n .format(file_path))\n file_path = file_path[0]\n\n # If the storing file is elsewhere\n if file_path != \"#\":\n sys.path.insert(0, file_path)\n base = DBASE.open('Anna')\n\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None\n\n else:\n base = DBASE.open('Anna')\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None", "def _BuildBaselineIndexes(self):\n if self.verbose:\n print \"Building index of all local baselines...\"\n\n self.baseline_dict = {}\n self.webkit_baseline_dict = {}\n\n base = os.path.abspath(os.path.curdir)\n webkit_base = path_utils.PathFromBase('third_party', 'Webkit',\n 'LayoutTests')\n chromium_base = path_utils.PathFromBase('webkit', 'data', 'layout_tests')\n chromium_base_platform = os.path.join(chromium_base, PLATFORM)\n webkit_base_platform = os.path.join(webkit_base, PLATFORM)\n\n possible_chromium_files = []\n possible_webkit_files = []\n\n if IsMacPlatform(self.platform):\n self._AddBaselinePaths(possible_chromium_files, chromium_base_platform,\n CHROMIUM_MAC_PLATFORM_DIRS)\n self._AddBaselinePaths(possible_chromium_files, webkit_base_platform,\n WEBKIT_MAC_PLATFORM_DIRS)\n self._AddBaselinePaths(possible_webkit_files, webkit_base_platform,\n WEBKIT_MAC_PLATFORM_DIRS)\n elif IsLinuxPlatform(self.platform):\n self._AddBaselinePaths(possible_chromium_files, chromium_base_platform,\n CHROMIUM_LINUX_PLATFORM_DIRS)\n else:\n self._AddBaselinePaths(possible_chromium_files, chromium_base_platform,\n CHROMIUM_WIN_PLATFORM_DIRS)\n\n if not IsMacPlatform(self.platform):\n self._AddBaselinePaths(possible_webkit_files, webkit_base_platform,\n WEBKIT_WIN_PLATFORM_DIRS)\n\n possible_webkit_files.append(webkit_base)\n\n self._PopulateBaselineDict(possible_webkit_files, self.webkit_baseline_dict)\n self._PopulateBaselineDict(possible_chromium_files, self.baseline_dict)\n for key in self.webkit_baseline_dict.keys():\n if not key in self.baseline_dict:\n self.baseline_dict[key] = self.webkit_baseline_dict[key]\n\n return True", "def baseline_eCO2(self) -> int:\n return self.get_iaq_baseline()[0]", "def version_file(self) -> Optional[Path]:\n for path in [self.path, self.path.parent]:\n test_path = path / TF_VERSION_FILENAME\n if test_path.is_file():\n LOGGER.debug(\"using version file: %s\", test_path)\n return test_path\n return None", "def _NormalizeBaselineIdentifier(self, test_path):\n\n for regex in LOCAL_BASELINE_REGEXES:\n value = ExtractFirstValue(test_path, regex)\n if value:\n return value\n return test_path", "def export_hr_above_baseline_results(self, base_path: path_t, prefix: Optional[str] = None):\n self._export_results(base_path, prefix, self.hr_above_baseline_results)", "def rosbase(fname,checkfs=True):\n\tif checkfs: assert os.path.exists(fname)\n\tif checkfs: fname = os.path.abspath(fname)\n\tmark = \"rosetta_source/src\"\n\tassert fname.find(mark) > 0\n\treturn fname[fname.find(mark)+15:]", "def GetBase(self, fname, suffix):\n wds = fname.split('/')\n suff = suffix.replace('.BRIK','')\n suff = suff.replace('.HEAD','')\n if len(wds) > 1:\n return '.../%s' % '/'.join(wds[-2:]) + suff\n else:\n return fname + suff", "def run_baseline_simulation(self):\n n_days_base = 1 # Only consider 1 day simulation, self.n_days_base\n sim_time = 24*3600 # one day in seconds\n \n print(\"Running day-ahead baseline simulation ...\") \n print(\"Running baseline right away charging strategy ...\")\n baseline_soc, baseline_std_soc, baseline_power, baseline_cycles, baseline_Tin, baseline_std_Tin, baseline_Tin_max, baseline_Tin_min = self.run_baseline_right_away(n_days_base, sim_time)\n \n print(\"Exported baseline soc, Temperatures, power and HVAC cycles ...\")\n \n base_path = dirname(abspath(__file__))\n path = join(base_path,'data')\n \n # Already saved inside the right away function\n # baseline_soc.to_csv(join(path, r'SOC_baseline.csv'), index = False)\n # baseline_power.to_csv(join(path, r'power_baseline.csv'), index = False)\n # baseline_Tin.to_csv(join(path, r'Tin_baseline.csv'), index = False)\n # baseline_Tin_max.to_csv(join(path, r'Tin_max_baseline.csv'), index = False)\n # baseline_Tin_min.to_csv(join(path, r'Tin_min_baseline.csv'), index = False)\n print(\"Exported\")", "def evalBaseline(self, df = None):\n \n if (df is None):\n self.r_b = self.df.merge(self.df_user[[\"user ind\", \"b_u\"]], on = \"user ind\")\n self.r_b = self.r_b.merge(self.df_item[[\"item ind\", \"b_i\"]], on = \"item ind\")\n self.r_b[\"baseline\"] = self.r_mean + self.r_b[\"b_u\"] + self.r_b[\"b_i\"]\n \n \n return self.r_b[[\"user id\", \"item id\", \"baseline\"]]\n \n else:\n df = df.merge(self.df_user, on = \"user id\").merge(self.df_item, on = \"item id\")\n df[\"baseline\"] = self.r_mean + df[\"b_u\"] + df[\"b_i\"]\n \n # clip the score to the interval [1, 5]\n df[\"baseline\"] = np.minimum(np.maximum(df[\"baseline\"], 1), 5)\n \n return df[[\"user id\", \"item id\", \"baseline\"]]", "def get_base_dir(self):\n return self._config_dict['output']['@baseDirectory']", "def _get_source_file(self):\n file_name = get_file_join_name(self._input_path, self._source_file_target)\n if not file_name:\n file_name = get_file_join_name(self._input_path, self._source_file_target_old)\n if not file_name:\n data_path = os.path.join(self._input_path, \"data\")\n file_name = get_file_join_name(data_path, self._source_file_target)\n if not file_name:\n file_name = get_file_join_name(data_path, self._source_file_target_old)\n return file_name", "def get_error_file(self):\n pass", "def get_file(self):\n return self.dir + self.file_name + self.extension", "def base_path(self):\n return Path(self.path)", "def baseline(self) -> List[PredictionsDatapoints]:\n return self._baseline", "def get_base_logfile():\n return \"baseLog\" + get_day() + \".log\"", "def test_get_file_with_git_and_base_commit_id(self):\n self._test_get_file(\n tool_name='Git',\n revision='123',\n base_commit_id='456',\n expected_revision='123')", "def get_resource_base_path(self): # real signature unknown; restored from __doc__\n return \"\"", "def get_lineage_trace(self) -> str:\n # TODO why do we not want to store the results? The execution script will should stay the same\n # therefore we could cache the result.\n raise NotImplementedError", "def _get_base_url():\n base_url = \"\"\n\n with open('./deploy/terraform.tfstate') as json_file:\n data = json.load(json_file)\n try:\n base_url = str(data['outputs']['base_url']['value'])\n print(\"using API URL: \"+base_url)\n except:\n print(\"Could not find outputs.base_url.value in terraform.tfstate file. Did you successfully deploy the infrastructure?\")\n exit()\n return base_url", "def testbaselinelc(self):\n a_in = os.path.join(self.datadir,\n 'monol_testA_E3-50_lc' + HEN_FILE_EXTENSION)\n out = os.path.join(self.datadir, 'monol_test_baselc')\n command = '{0} -o {1} -p 0.001 --lam 1e5'.format(a_in, out)\n\n hen.lcurve.baseline_main(command.split())\n out_lc = hen.io.load_lcurve(out + '_0' + HEN_FILE_EXTENSION)\n assert hasattr(out_lc, 'base')\n gti_to_test = hen.io.load_events(self.first_event_file).gti\n assert np.allclose(gti_to_test, out_lc.gti)", "def read_fname(self):\n return self.read_value()", "def get_data_file():\n base_folder = os.path.dirname(__file__)\n # print(base_folder)\n return os.path.join(base_folder, 'data', 'Sacramentorealestatetransactions.csv')\n # print(filename)", "def _get_base_command(self):\n import inspect\n import os\n # get current script directory path. We are in /an/unknown/path/kalliope/core\n cur_script_directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n # get parent dir. Now we are in /an/unknown/path/kalliope\n parent_dir = os.path.normpath(cur_script_directory + os.sep + os.pardir)\n # we add the kalliope.py file name\n real_entry_point_path = parent_dir + os.sep + KALLIOPE_ENTRY_POINT_SCRIPT\n # We test that the file exist before return it\n logger.debug(\"Real Kalliope.py path: %s\" % real_entry_point_path)\n if os.path.isfile(real_entry_point_path):\n crontab_cmd = \"python %s start --brain-file %s --run-synapse \" % (real_entry_point_path,\n self.brain.brain_file)\n return crontab_cmd\n raise IOError(\"kalliope.py file not found\")", "def report_path(self, base: str = None) -> str:\n if base is not None:\n return os.path.join(base, \"oltp_v2.json\")\n return os.path.join(self.report_dir, \"oltp_v2.json\")", "def getInitfile(self):\n return self._client.getInitfile()", "def baseline2txt(Plate, generate_Prism_output=False):\n fileprefix = Plate.filename.with_suffix(\"\").name\n filepath = Plate.filename.with_suffix(\"\")\n if not filepath.exists():\n filepath.mkdir(exist_ok=True, parents=True)\n output_file_fmt = str(filepath / (fileprefix + \"_baseline_{:s}.txt\"))\n\n if generate_Prism_output:\n prism_output_file = str(filepath / (fileprefix + \"_baseline.pzfx\"))\n prism_container = PrismFile()\n\n for i, (ch_name, ch_data) in enumerate(Plate.data):\n conjugate_names = np.unique(Plate.Read_Plate.substance)\n bardata = []\n bar_labels = []\n for conjugate in conjugate_names:\n conjugate_mask = Plate.Read_Plate.substance == conjugate\n for concentration in np.unique(Plate.Read_Plate.concentration):\n conc_mask = Plate.Read_Plate.concentration == concentration\n submask = conjugate_mask & conc_mask\n if submask.sum() > 0:\n unit = np.unique(Plate.Read_Plate.unit[submask])[0]\n # get the first timepoint, which is the 'B1' read\n _data = ch_data[0, submask]\n bardata.append(_data)\n bar_labels.append(\n \"{:s} ({:0.2f} {:s})\".format(\n conjugate, concentration, unit\n )\n )\n\n # save to prism file\n if generate_Prism_output:\n col_list = []\n for data, label in zip(bardata, bar_labels):\n # for bargraphs, no X-axis is used, so a 'Column' is used\n col = createPrismColumn(data, title=label, column_type=\"Y\")\n col_list.append(col)\n prism_table = assembleColumnPrismTable(\n col_list,\n name=\"{:s} channel\".format(ch_name),\n ID=\"Table{:d}\".format(i),\n )\n prism_container.append(prism_table)\n\n # convert bargraph data into numpy array\n bar_data_array = np.array(bardata)\n\n # unequal Npoints for each conjugate will not form a 2-d array\n if bar_data_array.ndim == 2:\n Npts = bar_data_array.shape[1]\n else:\n # amend this by filling in with nans\n bar_data_array = boolean_indexing(bar_data_array)\n Npts = bar_data_array.shape[1]\n\n # write raw data to text file. bardata is a list of numpy arrays\n maxtitlelength = max([len(label) for label in bar_labels])\n Ndata = len(bar_labels)\n labelfmt = \"{{:>{:d}s}}\".format(maxtitlelength)\n datafmt = \"{{:>{:d}.1f}}\".format(maxtitlelength)\n columnfmt = (labelfmt + \",\") * (Ndata - 1) + labelfmt + \"\\n\"\n rowfmt = (datafmt + \",\") * (Ndata - 1) + datafmt + \"\\n\"\n\n with open(output_file_fmt.format(ch_name), \"wt\") as fhd:\n fhd.write(columnfmt.format(*bar_labels))\n for n in range(Npts):\n fhd.write(rowfmt.format(*bar_data_array[:, n]))\n\n if generate_Prism_output:\n prism_container.write(prism_output_file)", "def _GetExpectationFileTagHeader(self, expectation_file: str) -> str:\n raise NotImplementedError()", "def base_dir(self):\n pass", "def file_path(self) -> Path:\n return self._input_file", "def get_output_file(self):\n\t\treturn call_sdk_function('PrlVmDev_GetOutputFile', self.handle)", "def _get_source_rd(self):\n return self.__source_rd", "def first_line(self):\n with open(self.file_path) as file:\n return file.readline()", "def _save_baseline_data(self, data, modifier, generate_new_baseline=True):\n\n port = self._port\n fs = port._filesystem\n if generate_new_baseline:\n relative_dir = fs.dirname(self._testname)\n baseline_path = port.baseline_path()\n output_dir = fs.join(baseline_path, relative_dir)\n output_file = fs.basename(fs.splitext(self._filename)[0] +\n \"-expected\" + modifier)\n fs.maybe_make_directory(output_dir)\n output_path = fs.join(output_dir, output_file)\n _log.debug('writing new baseline result \"%s\"' % (output_path))\n else:\n output_path = port.expected_filename(self._filename, modifier)\n _log.debug('resetting baseline result \"%s\"' % output_path)\n\n port.update_baseline(output_path, data)", "def export_hrv_above_baseline_results(self, base_path: path_t, prefix: Optional[str] = None):\n self._export_results(base_path, prefix, self.hrv_above_baseline_results)", "def _load_next_file(self):\n\n gains = super()._load_next_file()\n self._time_ptr = 0\n\n return gains", "def baseline_statistics(self, **_):\n raise NotImplementedError(\"{} doesn't support statistics.\".format(__class__.__name__))" ]
[ "0.72251785", "0.66584283", "0.65599346", "0.6401578", "0.6180834", "0.60123277", "0.5969603", "0.5948323", "0.59022737", "0.5869274", "0.583606", "0.5801047", "0.5801047", "0.5801047", "0.57897425", "0.57875085", "0.56959826", "0.56877124", "0.56490695", "0.56345946", "0.56341195", "0.56069237", "0.5555994", "0.554183", "0.5504578", "0.5499079", "0.54968023", "0.5479849", "0.54668957", "0.54073495", "0.5399663", "0.53863704", "0.5368781", "0.53663766", "0.5355925", "0.53537345", "0.5344124", "0.53337634", "0.5328549", "0.5318538", "0.5318538", "0.5318538", "0.53182185", "0.53036547", "0.52975816", "0.5270791", "0.5265415", "0.52567536", "0.5254525", "0.52516264", "0.5248707", "0.52414775", "0.52263343", "0.52227664", "0.5217678", "0.5203837", "0.5195816", "0.5193792", "0.5188645", "0.5184235", "0.51745766", "0.51611096", "0.5160883", "0.51601857", "0.5154443", "0.51543796", "0.5154325", "0.51513743", "0.5151253", "0.5139844", "0.513628", "0.51315546", "0.51288736", "0.5120559", "0.51141036", "0.51080287", "0.5107726", "0.5105562", "0.5102713", "0.5100588", "0.50946105", "0.5088201", "0.5083916", "0.5082311", "0.5073881", "0.50666535", "0.5065768", "0.506042", "0.5057528", "0.50479066", "0.50428534", "0.50409573", "0.5039504", "0.50354755", "0.50183886", "0.501819", "0.5017485", "0.5015981", "0.50121546", "0.50059354" ]
0.79847074
0
self,x_cells, self.y_cells = np.meshgrid(x_spacings, y_spacings, sparse=True)
def __init__(self, x_spacings, y_spacings): self.x_spacings = x_spacings.astype(np.float32) self.y_spacings = y_spacings.astype(np.float32) self.nx = len(x_spacings) self.ny = len(y_spacings) self.n_cells = self.nx * self.ny
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_meshgrid(self):\n x = np.linspace(self.limits[0], self.limits[1], self.resolution)\n y = np.linspace(self.limits[2], self.limits[3], self.resolution)\n X, Y = np.meshgrid(x, y)\n return X, Y", "def make_meshgrid(x_min,x_max,y_min,y_max, h=.02):\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x_min,x_max,y_min,y_max, h=.02):\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=.02):\r\n x_min, x_max = x.min() - 1, x.max() + 1\r\n y_min, y_max = y.min() - 1, y.max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))\r\n return xx, yy", "def make_meshgrid(x, y, h=.02):\r\n x_min, x_max = x.min() - 1, x.max() + 1\r\n y_min, y_max = y.min() - 1, y.max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))\r\n return xx, yy", "def make_meshgrid(x, y,h=0.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=.02):\r\n x_min, x_max = x.min() - 1, x.max() + 1\r\n y_min, y_max = y.min() - 1, y.max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\r\n np.arange(y_min, y_max, h))\r\n return xx, yy", "def _build_grid(self):\n n = self.params['n']\n\n x_min, x_max = min(self.node[:, 0]), max(self.node[:, 0])\n y_min, y_max = min(self.node[:, 1]), max(self.node[:, 1])\n xv = np.linspace(x_min, x_max, num=n, endpoint=True)\n yv = np.linspace(y_min, y_max, num=n, endpoint=True)\n xg, yg = np.meshgrid(xv, yv, sparse=False, indexing='xy')\n\n return xg, yg", "def make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=.02):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def cells_list(self):\n xx, yy = np.meshgrid(self.x_spacings, self.y_spacings)\n return np.vstack([yy.ravel(), xx.ravel()]).transpose()", "def make_meshgrid(x, y, h = 5):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def create_meshgrid(x, y, h=0.015):\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_meshgrid(x, y, h=0.02):\n space = 0.3\n x_min, x_max = x.min() - space, x.max() + space\n y_min, y_max = y.min() - space, y.max() + space\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n return xx, yy", "def make_grid(self, nx, ny):\n nx_vec = np.arange(nx)\n ny_vec = np.arange(ny)\n yv, xv = np.meshgrid(ny_vec, nx_vec)\n grid = np.stack((yv, xv), axis=2)\n grid = grid.reshape(1, 1, ny, nx, 2)\n return grid", "def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)", "def meshgrid(x,y):\n x = asarray(x)\n y = asarray(y)\n numRows, numCols = len(y), len(x) # yes, reversed\n x = x.reshape(1,numCols)\n X = x.repeat(numRows, axis=0)\n\n y = y.reshape(numRows,1)\n Y = y.repeat(numCols, axis=1)\n return X, Y", "def test_Grid_creates_array_space():\n\n # create dummy meshgrid\n img_dim, spacing = (193, 193), 64\n x_vec = np.arange(0, img_dim[1], spacing)\n y_vec = np.arange(0, img_dim[0], spacing)\n xx, yy = np.meshgrid(x_vec, y_vec)\n\n # create Grid\n g = mg.Grid(img_dim, spacing)\n\n assert g.ny == len(y_vec)\n assert g.nx == len(x_vec)", "def makeGrid(self):\n self.h = self.step_x\n self.k = self.step_t\n self.t, self.x = np.meshgrid(np.arange(self.min_t, self.max_t, self.step_t), np.arange(self.min_x, self.max_x\n , self.step_x))", "def xy_mesh(nx, ny, x_min=0, x_max=1, y_min=0, y_max=1):\n\n\tx = np.linspace(x_min, x_max, nx)\n\ty = np.linspace(y_min, y_max, ny)\n\txv, yv = np.meshgrid(x, y)\n\t\n\treturn xv, yv", "def xy_coordinates(self):\n\n return np.meshgrid(self.x_coord, self.y_coord)", "def create(x: Bounds, y: Bounds, grid_spacing):\n # Calculate grid bounds\n x0, y0 = Grid.bounding_box(x, y, grid_spacing)\n # print(f\"Grid.create: bounding box: x: {x0} y: {y0}\" )\n\n # Generate vectors of grid centers\n # Cell center offset\n cell_center_offset = grid_spacing/2\n x_vals = np.arange(x0.min + cell_center_offset, x0.max, grid_spacing)\n y_vals = np.arange(y0.max - cell_center_offset, y0.min, -grid_spacing)\n\n return x_vals, y_vals", "def meshgrid(self):\n vecs = self.coord_vecs\n return np.meshgrid(*vecs, indexing='ij')", "def meshup2d(self, ind='ij'):\r\n\r\n xv, yv, _ = self.vec()\r\n x_reg, y_reg = np.meshgrid(xv, yv, indexing=ind)\r\n\r\n return x_reg, y_reg", "def meshgrid(x, y, row_major=True):\n # type: (int, int, bool)->Tensor\n a = torch.arange(0, x)\n b = torch.arange(0, y)\n xx = a.repeat(y).view(-1, 1).float()\n yy = b.view(-1, 1).repeat(1, x).view(-1, 1).float()\n return torch.cat([xx, yy], 1) if row_major else torch.cat([yy, xx], 1)", "def set_grid(self, bounds, dx):\n self.dx = dx\n self.X1 = np.arange(bounds[0], bounds[1], dx)\n self.X = np.array(list(it.product(*[self.X1]*self.n_dim)))", "def grid(x, y):\n return product(xrange(1, x+1), xrange(1, y+1))", "def get_par_meshgrid(self, copy=False, sparse=False): \n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')", "def mesh_grid(self,width,height):\n # get\n \n x_linspace=tf.linspace(-self.cx_,1-self.cx_,width)\n y_linspace=tf.linspace(-self.cy_,1-self.cy_,height)\n \n# x_cord,y_cord=tf.meshgrid(x_linspace,y_linspace)\n y_cord,x_cord=tf.meshgrid(y_linspace,x_linspace)\n \n \n x_cord=tf.reshape(x_cord,[-1])\n y_cord=tf.reshape(y_cord,[-1])\n \n f_=tf.ones_like(x_cord)\n \n x_=tf.div(x_cord,self.cf)\n y_=tf.div(y_cord,self.cf)\n \n grid=tf.concat([x_,y_,f_],0)\n return grid", "def create_grid(xlim, ylim, step):\n x_range = np.arange(xlim[0], xlim[1], step)\n y_range = np.arange(ylim[0], ylim[1], step)\n return x_range, y_range", "def _buildGridPoints(self):\n self.spacings = []\n for level in xrange(self.depth):\n levelSpacings = []\n refLevel = level + 1\n level = 2**level\n axisData = []\n for axis in self.size:\n spacing = axis / (level+1)\n levelSpacings.append(spacing)\n axisData.append([gridValue*spacing for gridValue in xrange(1, level+1)])\n pointList = [((i, j, k), np.array([axisData[0][i], axisData[1][j], axisData[2][k]]))\n for i in xrange(level)\n for j in xrange(level)\n for k in xrange(level)]\n self.grid[refLevel] = {point[0]: point[1] for point in pointList}\n self.spacings.append(levelSpacings)", "def cmesh(self):\n return numpy.meshgrid(*self.centers, indexing='ij')", "def _grid(m, dtype=np.float32):\n M = m**2\n x = np.linspace(0, 1, m, dtype=dtype)\n y = np.linspace(0, 1, m, dtype=dtype)\n xx, yy = np.meshgrid(x, y)\n z = np.empty((M, 2), dtype)\n z[:, 0] = xx.reshape(M)\n z[:, 1] = yy.reshape(M)\n return z", "def create_mesh_grid(val_loc, linspace, frame=None, method='cubic'):\n locations = val_loc[:,0:2]\n values = val_loc[:,2]\n if frame == None:\n frame = create_frame_for(locations)\n assert len(frame) == 2, \"Precondition violation\"\n assert len(frame[0]) == 2 and len(frame[1]) == 2, \"Preconditio violation\"\n assert frame[0][0] <= frame[0][1] and frame[1][0] <= frame[1][1], \"Precondition violation\"\n assert len(linspace) == 2, \"Precondition violatio\"\n assert linspace[0] > 0 and linspace[1] > 0, \"Precondition violation\"\n assert method in ['cubic','linear','nearest'], \"Precondition violaiton\"\n mesh_X, mesh_Y = np.meshgrid(\n np.linspace(frame[0][0],frame[0][1],linspace[0]),\n np.linspace(frame[1][0],frame[1][1],linspace[1]),\n indexing='xy'\n )\n mesh_Z = scipy.interpolate.griddata(\n locations, values, (mesh_X, mesh_Y), method=method, fill_value=0\n )\n return mesh_X, mesh_Y, mesh_Z", "def makemesh_regular(data,vecs,grid):\n\tdata = beyonder(data,vecs,growsize=0.1)\n\txypts = np.array([[i,j] for i in np.linspace(0,vecs[0],grid[0].astype(int)) \n\t\tfor j in np.linspace(0,vecs[1],grid[1].astype(int))])\n\tinterp = scipy.interpolate.LinearNDInterpolator(data[:,0:2],data[:,2],fill_value=0.0)\n\tbilinear_pts = np.array([[i[0],i[1],interp(i[0],i[1])] for i in xypts])\n\tresult = scipy.interpolate.griddata(bilinear_pts[:,0:2],bilinear_pts[:,2],bilinear_pts[:,0:2],\n\t\tmethod='cubic')\n\t#---observed that griddata returns points where we cycle through the points in the following\n\t#---...order:x0,y0),(x0,y1),...(x0,yn),(x1,y0),... and so on, suggesting that the following \n\t#---...reshape command (which reshape function claims to use the \"C\" programming language convention\n\t#---...for reshaping objects by default, which convention has the last index changing \"fastest\")\n\txyz_pts = np.array([[bilinear_pts[i,0],bilinear_pts[i,1],result[i]] for i in range(len(result))])\n\treturn np.reshape(xyz_pts[:,2],grid.astype(int))", "def box_meshgrid(func, xbound, ybound, nx=50, ny=50):\n \n # form a test location grid to try \n minx, maxx = xbound\n miny, maxy = ybound\n loc0_cands = np.linspace(minx, maxx, nx)\n loc1_cands = np.linspace(miny, maxy, ny)\n lloc0, lloc1 = np.meshgrid(loc0_cands, loc1_cands)\n # nd1 x nd0 x 2\n loc3d = np.dstack((lloc0, lloc1))\n # #candidates x 2\n all_loc2s = np.reshape(loc3d, (-1, 2) )\n # evaluate the function\n func_grid = func(all_loc2s)\n func_grid = np.reshape(func_grid, (ny, nx))\n \n assert lloc0.shape[0] == ny\n assert lloc0.shape[1] == nx\n assert np.all(lloc0.shape == lloc1.shape)\n \n return lloc0, lloc1, func_grid", "def gen_grids(self):\n self.dx = self.grid_width / self.grid_resol\n self.dk = 2 * np.pi/self.grid_width\n self.grid_x_shifted = -self.grid_width/2 + self.dx * np.arange(0, self.grid_resol)\n self.grid_x = self.grid_x_shifted + self.grid_center\n self.grid_k = - (np.pi * self.grid_resol)/self.grid_width + self.dk * np.arange(0, self.grid_resol)\n self.grid_k = np.roll(self.grid_k, int((self.grid_resol)/2))\n self.grid_kin = np.square(self.h)/ (2*self.m) * np.square(self.grid_k)", "def meshup(self, ind='ij'):\r\n xv, yv, zv = self.vec()\r\n x_reg, y_reg, z_reg = np.meshgrid(xv, yv, zv, indexing=ind)\r\n\r\n return x_reg, y_reg, z_reg", "def create_cells_from_dims(num_verts_x: int, num_verts_y: int):\n num_cells_x = num_verts_x - 1\n num_cells_y = num_verts_y - 1\n num_cells = num_cells_x*num_cells_y\n cell_array = np.zeros((num_cells, 4), dtype=int)\n cell_num = 0\n\n # I am sure this could be done in a more efficient way.\n for y_cell in range(num_cells_y):\n for x_cell in range(num_cells_x):\n cell_array[cell_num, 0] = x_cell + num_verts_x*y_cell\n cell_array[cell_num, 1] = cell_array[cell_num, 0] + 1\n cell_array[cell_num, 2] = cell_array[cell_num, 0] + num_verts_x + 1\n cell_array[cell_num, 3] = cell_array[cell_num, 0] + num_verts_x\n cell_num += 1\n\n return cell_array", "def _generate_grid(self, xyz, dx, dy):\n\n origin = np.amin(xyz,0)\n extent = np.amax(xyz,0)-origin\n ncells = (np.amax(xyz,0)-origin)//[dx,dy,1]\n\n # Account for remainder\n origin += [(extent[0] % dx) / 2, (extent[1] % dy) / 2, 0]\n\n xbnds = np.linspace(0, ncells[0] * dx, ncells[0] + 1)\n ybnds = np.linspace(0, ncells[1] * dy, ncells[1] + 1)\n\n return origin, xbnds, ybnds, extent[2]", "def test_init_multigrid():\n\n img_dim = (500, 750)\n h = 64\n\n # expected grid\n xv, yv = (np.arange(0, img_dim[1], h),\n np.arange(0, img_dim[0], h))\n x_exp, y_exp = np.meshgrid(xv, yv)\n\n # actual\n amg = mg.MultiGrid(img_dim, h, WS=127)\n x_act, y_act = amg.x, amg.y\n\n assert np.allclose(x_act, x_exp.ravel())\n assert np.allclose(y_act, y_exp.ravel())", "def grid(h, w, dtype=np.float32):\n M = h * w\n x = np.linspace(0, 1, w, dtype=dtype)\n y = np.linspace(0, 1, h, dtype=dtype)\n xx, yy = np.meshgrid(x, y)\n z = np.empty((M, 2), dtype)\n z[:, 0] = xx.reshape(M)\n z[:, 1] = yy.reshape(M)\n return z", "def create_grids(self):\n \n par = self.par\n\n # a. retirement\n \n # pre-decision states\n par.grid_m_ret = nonlinspace(par.eps,par.m_max_ret,par.Nm_ret,par.phi_m)\n par.Nmcon_ret = par.Nm_ret - par.Na_ret\n \n # post-decision states\n par.grid_a_ret = nonlinspace(0,par.a_max_ret,par.Na_ret,par.phi_m)\n \n # b. working: state space (m,n,k) \n par.grid_m = nonlinspace(par.eps,par.m_max,par.Nm,par.phi_m)\n\n par.Nn = par.Nm\n par.n_max = par.m_max + par.n_add\n par.grid_n = nonlinspace(0,par.n_max,par.Nn,par.phi_n)\n\n par.grid_n_nd, par.grid_m_nd = np.meshgrid(par.grid_n,par.grid_m,indexing='ij')\n\n # c. working: w interpolant (and wa and wb and wq)\n par.Na_pd = np.int_(np.floor(par.pd_fac*par.Nm))\n par.a_max = par.m_max + par.a_add\n par.grid_a_pd = nonlinspace(0,par.a_max,par.Na_pd,par.phi_m)\n \n par.Nb_pd = np.int_(np.floor(par.pd_fac*par.Nn))\n par.b_max = par.n_max + par.b_add\n par.grid_b_pd = nonlinspace(0,par.b_max,par.Nb_pd,par.phi_n)\n \n par.grid_b_pd_nd, par.grid_a_pd_nd = np.meshgrid(par.grid_b_pd,par.grid_a_pd,indexing='ij')\n \n # d. working: egm (seperate grids for each segment)\n \n if par.solmethod == 'G2EGM':\n\n # i. dcon\n par.d_dcon = np.zeros((par.Na_pd,par.Nb_pd),dtype=np.float_,order='C')\n \n # ii. acon\n par.Nc_acon = np.int_(np.floor(par.Na_pd*par.acon_fac))\n par.Nb_acon = np.int_(np.floor(par.Nb_pd*par.acon_fac))\n par.grid_b_acon = nonlinspace(0,par.b_max,par.Nb_acon,par.phi_n)\n par.a_acon = np.zeros(par.grid_b_acon.shape)\n par.b_acon = par.grid_b_acon\n\n # iii. con\n par.Nc_con = np.int_(np.floor(par.Na_pd*par.con_fac))\n par.Nb_con = np.int_(np.floor(par.Nb_pd*par.con_fac))\n \n par.grid_c_con = nonlinspace(par.eps,par.m_max,par.Nc_con,par.phi_m)\n par.grid_b_con = nonlinspace(0,par.b_max,par.Nb_con,par.phi_n)\n\n par.b_con,par.c_con = np.meshgrid(par.grid_b_con,par.grid_c_con,indexing='ij')\n par.a_con = np.zeros(par.c_con.shape)\n par.d_con = np.zeros(par.c_con.shape)\n \n elif par.solmethod == 'NEGM':\n\n par.grid_l = par.grid_m\n\n # e. shocks\n assert (par.Neta == 1 and par.var_eta == 0) or (par.Neta > 1 and par.var_eta > 0)\n\n if par.Neta > 1:\n par.eta,par.w_eta = log_normal_gauss_hermite(np.sqrt(par.var_eta), par.Neta)\n else:\n par.eta = np.ones(1)\n par.w_eta = np.ones(1)\n\n # f. timings\n par.time_work = np.zeros(par.T)\n par.time_w = np.zeros(par.T)\n par.time_egm = np.zeros(par.T)\n par.time_vfi = np.zeros(par.T)", "def tensor_grid(x):\n\treturn np.vstack(np.meshgrid(*x, indexing = 'ij')).reshape((len(x), -1)).T", "def get_grid_mesh_coordinates(bbox, spacings=(1,1,1), dot_spacing=1, include_borderline=True):\n\n xmin,xmax,ymin,ymax,zmin,zmax = bbox\n\n xdim, ydim, zdim = (xmax+1-xmin, ymax+1-ymin, zmax+1-zmin)\n\n xs = np.arange(0, xdim, spacings[0])\n ys = np.arange(0, ydim, spacings[1])\n zs = np.arange(0, zdim, spacings[2])\n\n vol = np.zeros((ydim, xdim, zdim), np.bool)\n xs = xs.astype(np.int)\n ys = ys.astype(np.int)\n zs = zs.astype(np.int)\n xs = xs[(xs >= 0) & (xs < xdim)]\n ys = ys[(ys >= 0) & (ys < ydim)]\n zs = zs[(zs >= 0) & (zs < zdim)]\n if include_borderline:\n if 0 not in xs:\n xs = np.r_[0, xs, xdim-1]\n else:\n xs = np.r_[xs, xdim-1]\n if 0 not in ys:\n ys = np.r_[0, ys, ydim-1]\n else:\n ys = np.r_[ys, ydim-1]\n if 0 not in zs:\n zs = np.r_[0, zs, zdim-1]\n else:\n zs = np.r_[zs, zdim-1]\n for y in ys:\n vol[y, xs, ::dot_spacing] = 1\n vol[y, ::dot_spacing, zs] = 1\n for x in xs:\n vol[ys, x, ::dot_spacing] = 1\n vol[::dot_spacing, x, zs] = 1\n for z in zs:\n vol[ys, ::dot_spacing, z] = 1\n vol[::dot_spacing, xs, z] = 1\n\n ys, xs, zs = np.nonzero(vol)\n\n return np.c_[xs, ys, zs] + (xmin,ymin,zmin)", "def __init__(self, grid, x, y, cols):\n self.grid = grid\n self.x = x\n self.y = y\n self.cols = cols", "def fill_grid_np(self):\n\n self.grid_np = [None for i in range(GRID_HEIGHT*GRID_HEIGHT*MAX_CELL_SIZE)]\n grid = self.grid_np\n # cell_size = self.cell_size\n for obj in self.levels[self.curient_level].objects:\n obj.position_grid[X], obj.position_grid[Y] = get_grid_xy(obj.position_np, ZOMBIE_SIZE)\n x, y = obj.position_grid[X], obj.position_grid[Y]\n grid[y*GRID_WIDTH + x] = obj\n # if cell_size[y*GRID_WIDTH + x] < MAX_CELL_SIZE:\n # cell_size[y*GRID_WIDTH + x] += 1", "def grid_coords(self):\n return [(x, y) for y in range(self.height) for x in range(self.width)]", "def nd_grid(*xg):\n grid_shape = [np.shape(xg1d)[0] for xg1d in xg] # shape of the grid\n d = np.size(grid_shape)\n N = np.product(grid_shape)\n X_mesh = np.empty(d, dtype=object)\n for i, xg1d in enumerate(xg): # for each 1d component\n if np.ndim(xg1d) > 1:\n assert np.shape(xg1d)[1] == 1, \"only currently support each grid dimension being 1d\"\n n = np.shape(xg1d)[0] # number of points along dimension of grid\n slice_shape = np.ones(d, dtype=int); slice_shape[i] = n # shape of the slice where xg1d fits\n stack_shape = np.copy(grid_shape); stack_shape[i] = 1 # shape of how the slice should be tiled\n X_mesh[i] = np.tile(xg1d.reshape(slice_shape), stack_shape) # this is the single dimension on the full grid\n return X_mesh", "def make_maps_of_2x1_pix_coordinates (sp) : \n x_rhs = np.arange(sp.colsh)*sp.pixs + sp.pixw - sp.pixsh\n x_rhs[0] = sp.pixwh # set x-coordinate of the wide pixel \n x_arr = np.hstack([-x_rhs[::-1],x_rhs])\n\n y_arr = np.arange(sp.rows) * sp.pixs\n y_arr -= y_arr[-1]/2 # move origin to the center of array\n\n sp.x_map2x1, sp.y_map2x1 = np.meshgrid(x_arr, y_arr)", "def make_coordinate_grid(spatial_size, type):\n h, w = spatial_size\n x = torch.arange(w).type(type)\n y = torch.arange(h).type(type)\n x = 2 * (x / (w - 1)) - 1\n y = 2 * (y / (h - 1)) - 1\n yy = y.view(-1, 1).repeat(1, w)\n xx = x.view(1, -1).repeat(h, 1)\n meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)\n return meshed", "def _get_grid_cell_indexes(proj, xs, ys, bounding_box):\n # Unpack values from the projection\n eq_rad = proj.semi_major_axis\n polar_rad = proj.semi_minor_axis\n h = proj.perspective_point_height + eq_rad\n lon0 = proj.longitude_of_projection_origin\n \n # Unpack values from the area we want to grab the data\n min_lat, min_lon = bounding_box.sw_corner()\n max_lat, max_lon = bounding_box.ne_corner()\n \n with np.errstate(invalid='ignore'):\n # Calculate the lat and lon grids\n xs, ys = np.meshgrid(xs, ys)\n a_vals = np.power(np.sin(xs), 2.0) + \\\n np.power(np.cos(xs), 2.0) * (np.power(np.cos(ys), 2.0) + \\\n eq_rad * eq_rad / polar_rad / polar_rad * np.power(np.sin(ys), 2.0))\n b_vals = -2 * h * np.cos(xs) * np.cos(ys)\n c_val = h * h - eq_rad * eq_rad\n \n rs = (-b_vals - np.sqrt(np.power(b_vals, 2.0) - 4 * a_vals * c_val)) / (2 * a_vals)\n \n sx = rs * np.cos(xs) * np.cos(ys)\n sy = -rs * np.sin(xs)\n sz = rs * np.cos(xs) * np.sin(ys)\n \n lats = np.arctan((eq_rad *eq_rad * sz) \\\n / (polar_rad * polar_rad * np.sqrt(np.power(h - sx, 2.0) + np.power(sy, 2.0))))\n lats = np.degrees(lats)\n \n lons = np.radians(lon0) - np.arctan(sy / (h - sx))\n lons = np.degrees(lons)\n \n # Flatten the arrays so we get a 1D list of indexes\n lats = lats.flatten()\n lons = lons.flatten()\n \n # Filter out values not in our bounding box\n lats = np.where(np.logical_and(lats >= min_lat, lats <= max_lat))[0]\n lons = np.where(np.logical_and(lons >= min_lon, lons <= max_lon))[0]\n idxs = list(set(lons).intersection(set(lats)))\n \n return idxs", "def initialize_grid(self):\n self.grid = np.zeros([self.N, self.N, self.N])\n return self.grid", "def flat_2D_grid(bounds, dx, dy):\n x = np.arange(bounds[0], bounds[1] + dx, dx)\n y = np.arange(bounds[2], bounds[3] + dy, dy)\n x_grid, y_grid = np.meshgrid(x, y)\n x_grid, y_grid = x_grid.flatten(), y_grid.flatten()\n\n return pd.DataFrame({'x': x_grid,\n 'y': y_grid,\n 'masked': np.zeros(x_grid.size, dtype='bool')})", "def get_xy_grid(nx, ny):\n\tfor n in [nx, ny]:\n\t\tif not isodd(n):\n\t\t\traise Exception(\"[get_xy_grid] only accept odd number\")\n\n\tx, y = np.mgrid[-(nx-1)/2:(nx+1)/2, -(ny-1)/2:(ny+1)/2]\n\n\treturn x, y", "def _cellTopology(self):\n cellTopology = numerix.empty((self.mesh.numberOfCells,), dtype=numerix.ubyte)\n cellTopology[:] = self._elementTopology[\"line\"]\n\n return cellTopology", "def mesh(self):\n return numpy.meshgrid(*self.edges, indexing='ij')", "def makeCMSgridNodes(x0, y0, azi, dx, dy, z):\n # convert from node calculation to centric calculation\n # first move origin from vertex of grid to center of first grid cell\n\n # first convert to FRF coordinates\n FRF = gp.FRFcoord(x0, y0, coordType='ncsp')\n # shift origin to cell center instead of cell vertex\n x0N = FRF['xFRF'] - dx[0]/2\n y0N = FRF['yFRF'] - dy[0]/2\n # create new dx/dy array spaced with half of each of the 2 cells\n dxN = dx[:-1] + np.diff(dx)/2\n dyN = dy[:-1] + np.diff(dy)/2 # new nodes at the grid center - needed to fit into\n # create new nodes in FRF x and FRF Y using cell centric locations for accurate interpolation\n outXfrf, outYfrf = createGridNodesinFRF(x0N, y0N, dxN, dyN, dx.shape[0], dy.shape[0])\n xFRF, yFRF = np.meshgrid(outXfrf, sorted(outYfrf))\n # new work no need to loop as above\n convert2 = gp.FRFcoord(xFRF.flatten(), yFRF.flatten(), coordType='FRF')\n lat = convert2['Lat'].reshape(xFRF.shape)\n lon = convert2['Lon'].reshape(xFRF.shape)\n easting = convert2['StateplaneE'].reshape(xFRF.shape)\n northing = convert2['StateplaneN'].reshape(yFRF.shape)\n # making i's and j's for cell numbers\n ii = np.linspace(1, xFRF.shape[1], xFRF.shape[1])\n jj = np.linspace(1, yFRF.shape[0], yFRF.shape[0])\n\n BathyPacket = {'i': ii,\n 'j': jj,\n 'latitude': lat,\n 'longitude': lon,\n 'easting': easting,\n 'northing': northing,\n 'xFRF': sorted(xFRF[0, :]),\n 'yFRF': yFRF[:, 0],\n 'azimuth': azi,\n 'x0': x0,\n 'y0': y0,\n 'DX': dxN,\n 'DY': dyN,\n 'ni': len(ii),\n 'nj': len(jj),\n 'elevation': z, # exported as [t, x,y] dimensions\n 'gridFname': 'CMS GRid',\n 'time': 0}\n\n return BathyPacket", "def __init__(self, x=None, mbar=10, eq=True, to_plot=False, mbar_min=1, xg=None, beyond_domain=None):\n logger.debug('Initializing inducing grid.')\n k = mbar; del mbar # mbar is an alias\n k_min = mbar_min; del mbar_min # mbar_min is an alias\n if xg is None: # then generate a grid from the scattered points x\n # deal with inputs\n assert isinstance(x,np.ndarray)\n assert x.ndim == 2\n self.eq = eq\n if not isinstance(k,(tuple,list,np.ndarray)):\n k = (k,)*x.shape[1]\n\n # get some statistics and counts (just assuming 1d along each dimension)\n (n_train, self.grid_dim) = x.shape # number of training points, number of grid dimensions\n self.grid_sub_dim = np.ones(self.grid_dim, dtype=int) # number of sub dimensions along each grid dim\n self.input_dim = np.sum(self.grid_sub_dim) # total number of dimensions\n self.grid_shape = np.zeros(self.grid_dim, dtype=int); # number of points along each sub dimension\n x_rng = np.vstack((np.amin(x,axis=0), np.amax(x,axis=0), np.ptp(x,axis=0))).T\n n_unq = np.array([np.unique(x[:,i]).size for i in range(self.grid_dim)])\n if not np.all(n_unq >= 2):\n logger.debug('some dimension have < 2 unique points')\n for i,ki in enumerate(k):\n if ki <= 1:\n self.grid_shape[i] = np.int32(np.maximum(np.ceil(ki*n_unq[i]),k_min));\n else:\n assert np.mod(ki,1) == 0, \"if k > 1 then it must be an integer\"\n # don't allow the number of points to be greater than n_unq\n self.grid_shape[i] = np.int32(np.maximum(np.minimum(ki, n_unq[i]), k_min));\n self.num_data = np.prod(np.float64(self.grid_shape)) # total number of points on the full grid\n\n # check if bounds are to be added, in which case I want to call recursively\n if beyond_domain is not None:\n assert np.all(self.grid_shape >= 2), \"bounds need at least 2 points per dim\"\n # get the grid with no bounds but 2 less points per dimension\n xg = InducingGrid(x=x, k=self.grid_shape-2, eq=eq, to_plot=False, k_min=0, xg=None, beyond_domain=None).xg\n for i in range(x.shape[1]):\n xg[i] = np.vstack((x_rng[i,0]-beyond_domain*x_rng[i,2], xg[i], x_rng[i,1]+beyond_domain*x_rng[i,2])) # add the points that go beyond domain\n # since xg is now specified, it will be added to the grid below\n else:\n #figure out if the grid should be on unique points\n on_unique = self.grid_shape == n_unq # whether or not the grid is exactly on unique values\n\n # create the grid\n # self.xg is a list of length n_dims which specifies the grid along each dimension.\n self.xg = np.empty(self.grid_dim, dtype=object)\n for i_d in range(self.grid_dim):\n if on_unique[i_d]: # then place the grid on the unique values\n self.xg[i_d] = np.unique(x[:,i_d]).reshape((-1,1))\n elif self.eq: # equally spaced grid points\n self.xg[i_d] = np.linspace(x_rng[i_d,0],x_rng[i_d,1],num=self.grid_shape[i_d]).reshape((-1,1))\n elif self.grid_shape[i_d] == 2: # then just place on the ends\n self.xg[i_d] = x_rng[i_d,:2].reshape((-1,1))\n else: # non equally spaced grid points\n \"\"\"\n do a two-pronged kmeans clustering strategy where you find clusters of clusters:\n 1) indentify clusters in the data, I don't want to reconsider points in the same cluster twice\n 1.5) filter any clusters which are close together\n 2) rerun kmeans using the cluster centers to get the grid points\n 2.5) filter any nodes which are close together\n This makes clusters which aren't too close together and also encourages spread throughout the space\n \"\"\"\n # TODO: it seems that it's actually important to bound clusters, not just have them nearby\n # I can try to implement this maybe\n\n node_tol = x_rng[i_d,2]/(3*self.grid_shape[i_d])\n # 1) identify clusters in x. Use more than the final number of grid points\n x_clusters = MiniBatchKMeans( # will be faster for large problems\n n_clusters=np.minimum(3*self.grid_shape[i_d],n_unq[i_d]), n_init=1, max_iter=100, tol=0.001,\n ).fit(np.unique(x[:,i_d]).reshape((-1,1)) # I don't want to recount duplicates more than once\n ).cluster_centers_.reshape((-1,1))\n\n # 1.5) remove clusters which are close together\n x_clusters = uniquetol(x_clusters.squeeze(),\n tol=node_tol/2, # set a loose tol here\n ).reshape((-1,1))\n self.grid_shape[i_d] = np.minimum(x_clusters.size, self.grid_shape[i_d])\n\n if self.grid_shape[i_d] == x_clusters.size: # then place the nodes on the clusters\n self.xg[i_d] = x_clusters\n elif self.grid_shape[i_d] > 2: # perform the second kmeans clustering\n # 2) get the final grid points\n self.xg[i_d] = KMeans(\n n_clusters=self.grid_shape[i_d]-2, n_init=1, max_iter=100, tol=0.001, verbose=False,\n ).fit(np.vstack((x_rng[i_d,0], x_clusters, x_rng[i_d,1])) # add the extreme values back to bias the nodes\n ).cluster_centers_.reshape((-1,1))\n\n # 2.5) remove nodes which are close together\n self.xg[i_d] = uniquetol(self.xg[i_d].squeeze(), tol=node_tol).reshape((-1,1))\n else: # initiaze empty grid, extreme values will be added later\n self.xg[i_d] = np.zeros((0,1))\n\n # sort the inducing points and place nodes at the extreme values\n self.xg[i_d].sort(axis=0)\n self.xg[i_d] = np.vstack((x_rng[i_d,0],self.xg[i_d],x_rng[i_d,1]))\n if np.abs(self.xg[i_d][1,0] - self.xg[i_d][0,0]) < node_tol: #check if too close together at ends\n self.xg[i_d] = np.delete(self.xg[i_d],1,axis=0)\n if np.abs(self.xg[i_d][-1,0] - self.xg[i_d][-2,0]) < node_tol: #check if too close together at ends\n self.xg[i_d] = np.delete(self.xg[i_d],-2,axis=0)\n assert x_rng[i_d,0] == self.xg[i_d][0,0] and x_rng[i_d,1] == self.xg[i_d][-1,0], \"extremum values didn't make it into set\"\n self.grid_shape[i_d] = self.xg[i_d].size\n if xg is not None: # a grid has already been specified so use this instead\n self.xg = np.asarray(xg)\n self.grid_dim = self.xg.shape[0] # number of grid dimensions\n self.grid_shape = np.zeros(self.grid_dim, dtype=int) # number of points along each sub dimension\n self.grid_sub_dim = np.zeros(self.grid_dim, dtype=int) # number of sub dimensions along each grid dim\n for i,X in enumerate(self.xg): # loop over grid dimensions\n assert X.ndim == 2, \"each element in xg must be a 2d array\"\n self.grid_sub_dim[i] = X.shape[1]\n self.grid_shape[i] = X.shape[0]\n self.input_dim = np.sum(self.grid_sub_dim) # total number of dimensions\n self.num_data = np.prod(np.float64(self.grid_shape)) # total number of points on the full grid\n self.eq = None\n\n # plot the grid\n if to_plot is True:\n self.plot(x)\n elif isinstance(to_plot, str):\n self.plot(x, fname=to_plot)", "def make_cells(self, limits):\n low, high = limits\n xs = range(low[0], high[0])\n ys = range(low[1], high[1])\n for x in xs:\n col = []\n for y in ys:\n indices = (x, y)\n self.cells[indices] = Patch(self, indices)", "def grid_evaluation(self, x, y, size=256):\n\t\t# Create matrix x and y coordinates\n\t\tL = self.L\n\t\t[xx, yy] = meshgrid(linspace(-L, L, size), linspace(-L, L, size))\n\t\tpts = np.array([xx, yy])\n\t\tux = batch_eval(x, pts)\n\t\tuy = batch_eval(y, pts)\n\t\treturn xx, yy, ux, uy", "def make_grid(N):\n\n x = np.linspace(-2. , 2 , N)\n y = np.linspace(-2. , 2 , N)\n # two evenly spaced grids from -2 to 2\n\n return x, y", "def coord_vecs(self):\n return [np.linspace(x0, x1, nx) for x0, x1, nx in zip(self.mins, self.maxs, self.shape)]", "def __init__(self, grid_height, grid_width):\n self._grid_height = grid_height\n self._grid_width = grid_width\n self._cells = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]", "def _griddata(self):\n res = self.cfg.resolution\n\n # Get area of data\n xmin, xmax = np.nanmin(self.x), np.nanmax(self.x)\n ymin, ymax = np.nanmin(self.y), np.nanmax(self.y)\n\n # Add padding\n width = xmax-xmin\n height = ymax-ymin\n pad = np.amax([self.cfg.grid_pad_fraction*width, self.cfg.grid_pad_fraction*height])\n xmin = np.floor(xmin - pad)\n xmax = np.ceil(xmax + pad)\n ymin = np.floor(ymin - pad)\n ymax = np.ceil(ymax + pad)\n\n # Create Grid and no data mask\n self.lrx = np.arange(xmin, xmax+res, res)\n self.lry = np.arange(ymin, ymax+res, res)\n self.dem_x, self.dem_y = np.meshgrid(self.lrx, self.lry)\n self.nonan = np.where(np.logical_or(np.isfinite(self.x), np.isfinite(self.y)))\n\n # Create regular grid\n gridding_algorithm = self.cfg.griddata[\"algorithm\"]\n if gridding_algorithm == \"scipy.griddata\":\n self.dem_z = griddata((self.x[self.nonan].flatten(), self.y[self.nonan].flatten()),\n self.als.elevation[self.nonan].flatten(),\n (self.dem_x, self.dem_y),\n **self.cfg.griddata[\"keyw\"])\n else:\n raise NotImplementedError(\"Gridding algorithm: %s\" % gridding_algorithm)\n\n self.dem_z = np.ma.array(self.dem_z)\n self.dem_mask = np.zeros(self.dem_z.shape, dtype=np.bool)", "def test_Grid_get_x_vec_and_y_vec(mock_grid):\n\n # img_dim = (193, 193)\n # spacing = 64\n\n exp = [0, 64, 128]\n assert np.allclose(exp, mock_grid.x_vec)\n\n exp = [0, 64, 128, 192]\n assert np.allclose(exp, mock_grid.y_vec)", "def ndgrid(*args,**kwargs):\n kwargs['indexing'] = 'ij'\n return meshgrid(*args,**kwargs)", "def grid_unit_cell(self):\n from cctbx import uctbx\n a = self.unit_cell_parameters[0] / self.unit_cell_grid[0]\n b = self.unit_cell_parameters[1] / self.unit_cell_grid[1]\n c = self.unit_cell_parameters[2] / self.unit_cell_grid[2]\n alpha,beta,gamma = self.unit_cell_parameters[3:6]\n return uctbx.unit_cell((a,b,c,alpha,beta,gamma))", "def _createGrid(self, dimensions, density):\n import math\n\n xmin, xmax = dimensions[0], dimensions[1]\n imin, imax = dimensions[2], dimensions[3]\n\n hsteps = math.ceil((xmax - xmin)/density)\n vsteps = math.ceil((imax - imin)/density)\n\n hgrids = int(math.ceil(hsteps/self.gridsize))\n vgrids = int(math.ceil(vsteps/self.gridsize))\n\n grid_inc = density * self.gridsize\n \n #Add one inside the range() because you want to include the last one\n horizontal = [[xmin + (x * grid_inc), xmin + ((x+1) * grid_inc)] for x in range(hgrids)]\n vertical = [[imin + (im * grid_inc), imin + ((im+1) * grid_inc)] for im in range(vgrids)]\n\n #This makes the negative to positive less confusing, positive is at index = 0\n vertical.reverse()\n\n grid_map = []\n\n for im in vertical:\n temp = []\n for x in horizontal:\n my_x = list(x)\n my_x.extend(im)\n temp.append(my_x)\n grid_map.append(temp)\n\n return grid_map", "def grids(self):\n x = self.xvalues\n if self.ndim == 1:\n return x\n if self.ndim == 2:\n return x[None, :], x[:, None]\n if self.ndim == 3:\n return x[None, :, None], x[:, None, None], x[None, None, :]", "def cell_edges2d_cartesian(self, axis2):", "def make_grid(data=None, xmin=-5, xmax=5, ymin=-5, ymax=5, n_points = 400):\n if data is not None:\n xmin, ymin = np.min(data, axis = 0)\n xmax, ymax = np.max(data, axis = 0)\n\n plt.ylim(ymin, ymax)\n plt.xlim(xmin, xmax)\n\n x, y = np.meshgrid(np.linspace(xmin, xmax, n_points), np.linspace(ymin, ymax, n_points))\n grid = np.c_[x.ravel(), y.ravel()] # grid has n_points ^2 row and 2 columns\n return x, y, grid", "def create_grid(grid):\r\n for i in range (4):\r\n grid.append ([])\r\n for j in range (4):\r\n grid[i].append (0)", "def _make_rectangular_grid(self, x_range, y_range, step):\n if not isinstance(x_range, (tuple, list, np.ndarray)):\n raise TypeError((f\"x_range must be a tuple, list or NumPy array, \"\n f\"not {type(x_range)}.\"))\n if not isinstance(y_range, (tuple, list, np.ndarray)):\n raise TypeError((f\"y_range must be a tuple, list or NumPy array, \"\n f\"not {type(y_range)}.\"))\n if len(x_range) != 2 or len(y_range) != 2:\n raise ValueError(\"x_range and y_range must have 2 elements.\")\n if isinstance(step, (tuple, list, np.ndarray)):\n if len(step) != 2:\n raise ValueError(f\"If 'step' is a tuple, it must provide \"\n f\"two values (x_step, y_step), not \"\n f\"{len(step)}.\")\n x_step = step[0]\n y_step = step[1]\n else:\n x_step = y_step = step\n # Build the grid from `x_range`, `y_range`. If the range is 0, make\n # sure that the number of steps is 1, because linspace(0, 0, num=5)\n # will return a 1x5 array:\n xdiff = np.abs(np.diff(x_range))\n nx = int(np.round(xdiff / x_step) + 1) if xdiff != 0 else 1\n self._xflat = np.linspace(*x_range, num=nx, dtype=np.float32)\n ydiff = np.abs(np.diff(y_range))\n ny = int(np.round(ydiff / y_step) + 1) if ydiff != 0 else 1\n self._yflat = np.linspace(*y_range, num=ny, dtype=np.float32)\n self.x, self.y = np.meshgrid(self._xflat, self._yflat, indexing='xy')\n self.shape = self.x.shape\n self.reset()", "def _register_grid(\n self, grid: Grid, x: ListOrSlice, y: ListOrSlice\n ):\n self.grid = grid\n self.grid.objects.append(self)\n if self.name is not None:\n if not hasattr(grid, self.name):\n setattr(grid, self.name, self)\n else:\n raise ValueError(\n f\"The grid already has an attribute with name {self.name}\"\n )\n self.x = self._handle_slice(x, max_index=self.grid.Nx)\n self.y = self._handle_slice(y, max_index=self.grid.Ny)\n\n self.Nx = abs(self.x.stop - self.x.start)\n self.Ny = abs(self.y.stop - self.y.start)\n\n self.grid._set_material_properties((self.permittivity, self.permeability), positions=(x,y)) \n # set the permittivity values of the object at its border to be equal\n # to the grid permittivity. This way, the object is made symmetric.\n # no idea what this is. so delete for clutter\n # ik snap waarom: eps_x aan linkerkant van object is eps_object maar aan rechterkant is eps_x,y,z allemaal van grid. dat y en z van grid zijn is oké maar eps_x aan rechterrand toch best wijzen IN het object (het is niet alsof eps_x een vectoriele afhankelijkheid heeft hé)\n \n #self.grid.permittivity[self.x, self.y] = np.inf ", "def discretegrid(xy, w, nt):\n # Make grid\n x = nnp.linspace(w[0], w[1], nt[0] + 1)\n y = nnp.linspace(w[2], w[3], nt[1] + 1)\n X, Y = nnp.meshgrid(x, y)\n\n # Count points\n N = nnp.zeros([nt[1], nt[0]])\n for i in range(nt[0]):\n for j in range(nt[1]):\n ind = (xy[:, 0] >= x[i]) & (xy[:, 0] < x[i + 1]) & (xy[:, 1] >= y[j]) & (xy[:, 1] < y[j + 1])\n N[j, i] = nnp.sum(ind)\n return X[:-1, :-1].T, Y[:-1, :-1].T, N.T", "def initialise_grid(self, y, x, starting_value):\n # Create a grid of the specified size\n self.grid = np.zeros( (y, x), np.int8, 'C')\n \n # Record the sizes in the class variables\n self.x_len = x\n self.y_len = y\n \n # Set the initial values of the array\n self.grid += starting_value", "def set_cells(self, val=None):\t\r\n self._cells = \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def grid(x, y, z, resX=100, resY=100):\n x = x.flatten()\n y = y.flatten()\n z = z.flatten()\n xi = linspace(min(x), max(x), resX)\n yi = linspace(min(y), max(y), resY)\n zi = griddata(x, y, z, xi, yi, interp='linear')\n return xi, yi, zi", "def make_grid(grid_size=(10, 10)):\n return np.zeros(grid_size, dtype=np.int16)", "def define_grid(self):\n self.h_shape = int(\n np.round((self.h_stop - self.h_start) / self.h_step, 2)) + 1\n self.k_shape = int(\n np.round((self.k_stop - self.k_start) / self.k_step, 2)) + 1\n self.l_shape = int(\n np.round((self.l_stop - self.l_start) / self.l_step, 2)) + 1\n self.grid_origin = [self.h_start, self.k_start, self.l_start]\n self.grid_step = [int(np.rint(1.0/self.h_step)),\n int(np.rint(1.0/self.k_step)),\n int(np.rint(1.0/self.l_step))]\n self.grid_shape = [self.h_shape, self.k_shape, self.l_shape]\n self.grid_basis = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]", "def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.amin(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.amax(data[:, 0] + data[:, 3]))\n print(0, north_max - north_min)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.amin(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.amax(data[:, 1] + data[:, 4]))\n print(0, east_max - east_min)\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Populate the grid with obstacles\n print(data.shape[0])\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n # Determine which cells contain obstacles\n nc = int(north - north_min)\n ec = int(east - east_min)\n dn = int(d_north)\n de = int(d_east)\n sd = int(safety_distance)\n x0 = int(ec - (de + sd))\n y0 = int(nc - (dn + sd))\n xm = int(ec + (de + sd))\n ym = int(nc + (dn + sd))\n nm = north_max - north_min\n em = east_max - east_min\n for e in range(x0, xm):\n for n in range(y0, ym):\n # skip out of range conditions\n if e < 0:\n continue\n if e >= em:\n continue\n if n < 0:\n continue\n if n >= nm:\n continue\n if (alt + d_alt + safety_distance) <= drone_altitude:\n continue\n # plot it\n grid[n][e] = 1\n\n return grid", "def __init__(self, dx = 1., dy = 1., nx = 1, ny = 1,\n _RepresentationClass=_Grid2DRepresentation, _TopologyClass=_Mesh2DTopology):\n\n self.args = {\n 'dx': dx,\n 'dy': dy,\n 'nx': nx,\n 'ny': ny\n }\n\n self.nx = nx\n self.ny = ny\n\n self.numberOfHorizontalFaces = self.nx * (self.ny + 1)\n self.numberOfVerticalFaces = self.ny * (self.nx + 1)\n self.numberOfEachDiagonalFaces = self.nx * self.ny\n\n self.dx = PhysicalField(value = dx)\n scale = PhysicalField(value = 1, unit = self.dx.unit)\n self.dx /= scale\n\n self.dy = PhysicalField(value = dy)\n if self.dy.unit.isDimensionless():\n self.dy = dy\n else:\n self.dy /= scale\n\n self.numberOfCornerVertices = (self.nx + 1) * (self. ny + 1)\n self.numberOfCenterVertices = self.nx * self.ny\n self.numberOfTotalVertices = self.numberOfCornerVertices + self.numberOfCenterVertices\n\n self.offset = (0, 0)\n\n vertices = self._createVertices()\n faces = self._createFaces()\n\n cells = self._createCells()\n cells = numerix.sort(cells, axis=0)\n\n Mesh2D.__init__(self, vertices, faces, cells,\n _RepresentationClass=_RepresentationClass, _TopologyClass=_TopologyClass)\n\n self.scale = scale", "def make_xy_grid(samples_x, samples_y=None, radius=1):\n if samples_y is None:\n samples_y = samples_x\n x = e.linspace(-radius, radius, samples_x, dtype=config.precision)\n y = e.linspace(-radius, radius, samples_y, dtype=config.precision)\n xx, yy = e.meshgrid(x, y)\n return xx, yy", "def make_grid(x1_points, **kwargs):\n x2_points = kwargs.pop('x2_points', x1_points)\n x1min = kwargs.pop('x1min', 0.0)\n x1max = kwargs.pop('x1max', 1.0)\n x2min = kwargs.pop('x2min', 0.0)\n x2max = kwargs.pop('x2max', 1.0)\n if kwargs:\n raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))\n x1_setup = np.linspace(x1min, x1max, num=x1_points)\n # flip x2 order to have y increacing on plots' verticle axis\n x2_setup = np.linspace(x2min, x2max, num=x2_points)[::-1]\n x1_grid, x2_grid = np.meshgrid(x1_setup, x2_setup)\n return x1_grid, x2_grid", "def to_cell_coordinates(self,ax):\n self.xx_cells = self._Mesh.x2cell(self.xx_cells)", "def _generate_cells(self) -> None:\n for i in range(15):\n for j in range(15):\n c = Cell(x=i, y=j)\n c.answer = self.puzzle.solution[j*self.width+i]\n self.cells[(j, i)] = c # row, col", "def cell_from_xy(self,x,y):\n return self.cell_array.item((x,y))", "def mesh(self, centered=True):\n xx = np.linspace(0, self.Lx, self.Nx, endpoint=False)\n yy = np.linspace(0, self.Ly, self.Ny, endpoint=False)\n\n if centered:\n xx += self.hx/2\n yy += self.hy/2\n\n return np.meshgrid(xx, yy, indexing=\"ij\")", "def buildGrid(self, plot=False):\r\n\r\n print(\"Constructing grid\")\r\n # print(\"Grid dims\", self.ne, self.nn, self.nz)\r\n # print(\"Num points\", 2*(self.ne+1)*(self.nn+1)*3, len(self.coords))\r\n\r\n # number of edges\r\n self.ndx = self.ne + 1\r\n self.ndy = self.nn + 1\r\n self.ndz = self.nz + 1\r\n\r\n # extract the triplets\r\n self.points = {}\r\n self.points[\"e\"] = self.coords[0::3]\r\n self.points[\"n\"] = self.coords[1::3]\r\n self.points[\"z\"] = self.coords[2::3]\r\n\r\n print('points e')\r\n print(self.points[\"e\"])\r\n\r\n # Here are the coordinates\r\n self.X0 = np.reshape(self.points[\"e\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Y0 = np.reshape(self.points[\"n\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Z0 = np.reshape(self.points[\"z\"][0::2] , (self.ndx,self.ndy), order=\"F\")\r\n\r\n self.X1 = np.reshape(self.points[\"e\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Y1 = np.reshape(self.points[\"n\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n self.Z1 = np.reshape(self.points[\"z\"][1::2] , (self.ndx,self.ndy), order=\"F\")\r\n #\r\n # # visualize\r\n # if plot:\r\n # print(\"plotting\")\r\n # fig = plt.figure()\r\n # ax = fig.add_subplot(111, projection='3d')\r\n # ax.plot_wireframe(f2m*self.X0, f2m*self.Y0, f2m*self.Z0, rstride=1, cstride=1)\r\n # ax.plot_wireframe(f2m*self.X1, f2m*self.Y1, f2m*self.Z1, rstride=1, cstride=1)\r\n # plt.show()\r", "def __init__(self) -> None:\n self.row = 6\n self.col = 7\n self.grid = []\n\n for y in range(self.row):\n temp_row = []\n for x in range(self.col):\n temp_row.append(\" \")\n self.grid.append(temp_row)", "def __init__(self, width, height):\r\n\t\tself.grid = np.zeros(width*height, dtype=np.bool).reshape(width,height)\r\n\t\tself.width = width\r\n\t\tself.height = height", "def getGrid(x,y,w,h,x_step=1, y_step=1):\n X,Y = np.mgrid[x:x+w:x_step, y:y+h:y_step]\n return np.array(np.vstack((X.flatten(),Y.flatten())).transpose(), dtype=np.float32)", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def generate_all_locations(grid, shape):", "def _cellTopology(self):\n facesPerCell = self.mesh._facesPerCell\n nodesPerFace = self.mesh._nodesPerFace\n\n def faceCountsMatch(targetCounts):\n if len(targetCounts) > nodesPerFace.shape[0]:\n # pad nodesPerFace with zeros\n paddedNodesPerFace = numerix.zeros((len(targetCounts), nodesPerFace.shape[1]), dtype=numerix.INT_DTYPE)\n paddedNodesPerFace[:nodesPerFace.shape[0],:] = nodesPerFace\n\n paddedTargetCounts = numerix.array(targetCounts)[..., numerix.newaxis]\n else:\n # pad target face node count with zeros\n paddedTargetCounts = numerix.concatenate((targetCounts,\n [0] * (self.mesh._maxFacesPerCell - len(targetCounts))))\n paddedTargetCounts = paddedTargetCounts[..., numerix.newaxis]\n\n paddedNodesPerFace = nodesPerFace\n\n return ((facesPerCell == len(targetCounts))\n & (paddedNodesPerFace == paddedTargetCounts).all(axis=0))\n\n cellTopology = numerix.empty((self.mesh.numberOfCells,), dtype=numerix.ubyte)\n\n t = self._elementTopology\n\n if self.mesh.dim == 1:\n cellTopology[:] = t[\"line\"]\n elif self.mesh.dim == 2:\n cellTopology[:] = t[\"polygon\"]\n cellTopology[faceCountsMatch([2, 2, 2])] = t[\"triangle\"]\n cellTopology[faceCountsMatch([2, 2, 2, 2])] = t[\"quadrangle\"]\n else:\n cellTopology[:] = t[\"unknown\"]\n cellTopology[faceCountsMatch([3, 3, 3, 3])] = t[\"tetrahedron\"]\n cellTopology[faceCountsMatch([4, 4, 4, 4, 4, 4])] = t[\"hexahedron\"]\n cellTopology[faceCountsMatch([4, 4, 4, 3, 3])] = t[\"prism\"]\n cellTopology[faceCountsMatch([4, 3, 3, 3, 3])] = t[\"pyramid\"]\n\n return cellTopology", "def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")", "def xstagg_xy_coordinates(self):\n\n x_s = self.corner_grid.x0 + np.arange(self.nx+1) * self.dx\n y = self.center_grid.y0 + np.arange(self.ny) * self.dy\n return np.meshgrid(x_s, y)" ]
[ "0.75481325", "0.7334674", "0.7334674", "0.72951424", "0.72951424", "0.7236079", "0.72319734", "0.72218466", "0.7145317", "0.7145317", "0.7145317", "0.7145317", "0.71451616", "0.71191305", "0.704927", "0.7037136", "0.7016178", "0.6942421", "0.68876344", "0.68683", "0.68584913", "0.67791545", "0.67647004", "0.67285097", "0.6723542", "0.6721065", "0.6674511", "0.6573166", "0.6565801", "0.6548803", "0.65273714", "0.64235616", "0.63706243", "0.6369578", "0.6327459", "0.6318851", "0.62674206", "0.6267352", "0.6243586", "0.6242952", "0.6215329", "0.6206554", "0.6195258", "0.6188214", "0.61878115", "0.6166253", "0.6153337", "0.61457044", "0.6132005", "0.6130558", "0.6128079", "0.6112199", "0.6110731", "0.6106297", "0.6105902", "0.6102363", "0.6087405", "0.60793084", "0.6077537", "0.6063585", "0.6059555", "0.60564107", "0.60423857", "0.6040372", "0.6037874", "0.60216475", "0.5980036", "0.59763527", "0.5975973", "0.59688187", "0.5954", "0.5948338", "0.5947915", "0.59468657", "0.5945454", "0.5945112", "0.5942673", "0.59409076", "0.5929926", "0.59291613", "0.59286207", "0.5925809", "0.59221166", "0.5918879", "0.5916874", "0.59167206", "0.5908571", "0.59027874", "0.589365", "0.58934504", "0.58902055", "0.5882642", "0.58720154", "0.5870334", "0.5864912", "0.5857886", "0.5848895", "0.5844711", "0.58430326", "0.58376527" ]
0.69724435
17
Convert two dimensional index to 1d.
def one_dim_index(self, i, j): return int(i + j * self.nx)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index_to_ijk(self, index):\n return self.indices_to_ijk_array([index])[0]", "def to_sparse(a):\n flat = a.flatten()\n indices = np.nonzero(flat)\n values = flat[indices]\n return indices[0], values", "def from_2D_to_1D(constant):\n if isinstance(constant, np.ndarray) and constant.ndim == 2:\n return np.asarray(constant)[:, 0]\n else:\n return constant", "def indXtoJ(indX):\n return np.unravel_index(indX % xx.size, xx.shape)", "def flat_to_one_hot(val, ndim):\n shape =np.array(val).shape\n v = np.zeros(shape + (ndim,))\n if len(shape) == 1:\n v[np.arange(shape[0]), val] = 1.0\n else:\n v[val] = 1.0\n return v", "def reconstruct_input(self, ix):", "def index_to_single_index(a,b, resolution):\n return a*resolution+b", "def normalize_index(x):\n if x.__class__ in native_types:\n return x\n elif x.__class__ in sequence_types:\n # Note that casting a tuple to a tuple is cheap (no copy, no\n # new object)\n x = tuple(x)\n elif hasattr(x, '__iter__') and isinstance(x, collections_Sequence):\n if isinstance(x, string_types):\n # This is very difficult to get to: it would require a user\n # creating a custom derived string type\n return x\n sequence_types.add(x.__class__)\n x = tuple(x)\n else:\n return x\n\n x_len = len(x)\n i = 0\n while i < x_len:\n _xi = x[i]\n _xi_class = _xi.__class__\n if _xi_class in native_types:\n i += 1\n elif _xi_class in sequence_types:\n x_len += len(x[i]) - 1\n # Note that casting a tuple to a tuple is cheap (no copy, no\n # new object)\n x = x[:i] + tuple(x[i]) + x[i + 1:]\n elif _xi_class is not tuple and isinstance(_xi, collections_Sequence):\n if isinstance(_xi, string_types):\n # This is very difficult to get to: it would require a\n # user creating a custom derived string type\n i += 1\n else:\n sequence_types.add(_xi_class)\n x_len += len(x[i]) - 1\n x = x[:i] + tuple(x[i]) + x[i + 1:]\n else:\n i += 1\n\n if x_len == 1:\n return x[0]\n return x", "def indices_to_one_hot(data, nb_classes):\n\ttargets = np.array(data).reshape(-1)\n\treturn np.eye(nb_classes)[targets]", "def to_flat_index(self) -> Index: # type: ignore[override]\n return Index(self._values, tupleize_cols=False)", "def oneHot(index, n):\n x = np.zeros(n)\n x[index] = 1\n return x", "def state_from_id(index, dims_state_grid):\n\n entries = [index] * len(dims_state_grid)\n for i in range(1, len(dims_state_grid)):\n value = 1\n for j in range(i, len(dims_state_grid)):\n value *= dims_state_grid[j]\n for k in range(i - 1, len(dims_state_grid)):\n if k == i - 1:\n entries[k] //= value\n else:\n entries[k] %= value\n\n out = np.array(object=entries)\n\n return out", "def indexes_to_one_hot(indexes, n_dims=None):\n indexes = indexes.type(torch.int64).view(-1, 1)\n n_dims = n_dims if n_dims is not None else int(torch.max(indexes)) + 1\n one_hots = torch.zeros(indexes.size()[0], n_dims).scatter_(1, indexes, 1)\n one_hots = one_hots.view(*(indexes.shape-1))\n return one_hots.type(torch.LongTensor)", "def todense(self):\n d = np.zeros(self.shape)\n for index,value in zip(self.index, self.value):\n d[index] = value\n return d", "def one_hot(index, dims, dtype=np.uint8):\n if isinstance(index, int):\n ret = np.zeros((dims,), dtype)\n ret[index] = 1\n elif isinstance(index, (list, tuple)):\n seq_len = len(index)\n ret = np.zeros((seq_len, dims), dtype)\n ret[range(seq_len), index] = 1.0\n else:\n raise ValueError('index should be int or list(tuple) of int.')\n return ret", "def turn_to_one_hot(self,length,ind):\n one_hot = [0 for _ in range(length)]\n if (ind >= len(one_hot)):\n one_hot[-1] = 1\n else:\n one_hot[ind] = 1\n return one_hot", "def qindex2index(index):\n r = index.row()\n c = index.column()\n if c > 0x10:\n return (0x10 * r) + c - 0x11\n else:\n return (0x10 * r) + c", "def encode_to_flat_array_index(row, column, matrix):\n return row * matrix.cols + column", "def single_index_to_index(x, resolution):\n return x//resolution, x%resolution", "def index2d(src, idx):\n broadcast_to = P.BroadcastTo(idx.shape)\n offs = broadcast_to(P.range(Tensor(0, mindspore.int32),\n Tensor(idx.shape[0], mindspore.int32),\n Tensor(1, mindspore.int32))[:, None])\n idx = idx + (offs()) * idx.shape[1]\n\n return src.view(-1)[idx.view(-1)].view(idx.shpe)", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def indices_to_one_hot(data, nb_classes):\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]", "def _array_name_ND_to_1D(self, array_name):\n\n if array_name in self._split_arrays:\n array_name_1D = self._split_arrays[array_name]\n else:\n array_name_1D = [array_name + \"_\" + i for i in ('x', 'y', 'z')]\n\n return array_name_1D", "def one_hot(index, dims, dtype=np.uint8):\n\n seq_len = len(index)\n ret = np.zeros((seq_len, dims), dtype)\n for i in range(seq_len):\n ret[i][index[i]]=1\n\n return ret", "def convert_to_one_hot(a):\n a = a[:, 0]\n a = a.astype(int)\n A = np.zeros((len(a), config.num_classes))\n A[np.arange(len(a)), a] = 1\n return A", "def to_1d_array(self):\n return reshape_fns.to_1d(self._obj, raw=True)", "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def as_one_hot(ind, n):\n vec = np.zeros(n)\n vec[ind] = 1\n return vec", "def one_hot(index):\n\toutput = np.zeros(100)\n\toutput[index] = 1\n\treturn output", "def one_hot_to_flat(val):\n idxs = np.array(np.where(val == 1.0))[-1]\n if len(val.shape) == 1:\n return int(idxs)\n return idxs", "def convert_index(idx, decomposition, mode='glb_to_loc'):\n if is_integer(idx) or isinstance(idx, slice):\n return decomposition(idx, mode=mode)\n elif isinstance(idx, (tuple, list)):\n return [decomposition(i, mode=mode) for i in idx]\n elif isinstance(idx, np.ndarray):\n return np.vectorize(lambda i: decomposition(i, mode=mode))(idx)\n else:\n raise ValueError(\"Cannot convert index of type `%s` \" % type(idx))", "def to_sparse_representation(label, batch_idx):\n indices = []\n vals = []\n\n for i, idx in enumerate(batch_idx):\n for j, c in enumerate(label[idx]):\n indices.append([i, j])\n vals.append(c)\n\n shape = [len(batch_idx), np.max(indices, axis=0)[1] + 1]\n\n return np.array(indices), np.array(vals), np.array(shape)", "def retrun_1(x):\n ret = np.ones(len(x))\n return ret", "def _simplify_index(indices, shape):\n # First clean up and check indices, unpacking ellipsis and boolean arrays\n indices = da.slicing.normalize_index(indices, shape)\n out = []\n axis = 0\n for index in indices:\n if index is not np.newaxis:\n length = shape[axis]\n axis += 1\n # If there is 1-D fancy index on this axis, try to convert to slice\n if isinstance(index, np.ndarray) and index.ndim == 1:\n try:\n index = _range_to_slice(index)\n except ValueError:\n pass\n else:\n index = da.slicing.normalize_slice(index, length)\n out.append(index)\n return tuple(out)", "def to_one_hot(v):\n n = len(v)\n m = max(v) + 1\n out = np.zeros((n, m))\n out[np.arange(n), v] = 1\n return out", "def to_categorical(index_label, num_classes):\n return index_label, np.eye(num_classes, dtype='uint8')[index_label]", "def _binary_2d_label_to_sparse_value(labels):\n indices = []\n values = []\n batch = 0\n for row in labels:\n label = 0\n xi = 0\n for x in row:\n if x == 1:\n indices.append([batch, xi])\n values.append(label)\n xi += 1\n else:\n assert x == 0\n label += 1\n batch += 1\n shape = [len(labels), len(labels[0])]\n return sparse_tensor.SparseTensorValue(\n np.array(indices, np.int64), np.array(values, np.int64),\n np.array(shape, np.int64))", "def flatten_idx(idx, axis=-1):\n idx = numpy.asanyarray(idx)\n if not idx.dtype.kind in ('i', 'u'):\n idx = idx.astype(int)\n preshape = idx.shape[:axis]\n postshape = idx.shape[axis:]\n stride = int(numpy.product(postshape[1:])) #1 if applied to empty\n #The index on this axis moves stride elements in flat\n outidx = idx.flatten() * stride #makes a copy\n #First add the offsets to get us to [..., idx @ axis = 0, 0...)\n outidx += numpy.repeat(\n numpy.arange(0, len(outidx), int(numpy.product(postshape)),\n dtype=idx.dtype),\n numpy.product(postshape))\n #Now offsets for non-zero on the trailing axes [0, 0, ... 0@axis, ...]\n outidx += numpy.tile(numpy.arange(0, stride, dtype=idx.dtype),\n int(numpy.product(preshape)) * idx.shape[axis])\n return outidx", "def normalize_index(x, input_scale, output_scale):\n return np.round(x / input_scale[2] * output_scale[2])", "def aind(x):\n\treturn tuple(x.T)", "def to_onehot(indexes, num, dtype=None):\n if dtype is None:\n dtype = indexes.dtype\n onehot = torch.zeros(indexes.shape + (num,),\n dtype=dtype, device=indexes.device)\n onehot.scatter_(-1, indexes.unsqueeze(-1).type(torch.long), 1)\n return onehot", "def test_single_index(self):\n dset = self.f.create_dataset('x', (1,), dtype='i1')\n out = dset[0]\n self.assertIsInstance(out, np.int8)", "def to_sparse(x):\n x_typename = torch.typename(x).split('.')[-1]\n sparse_tensortype = getattr(torch.sparse, x_typename)\n\n indices = torch.nonzero(x)\n if len(indices.shape) == 0: # if all elements are zeros\n return sparse_tensortype(*x.shape)\n indices = indices.t()\n values = x[tuple(indices[i] for i in range(indices.shape[0]))]\n return sparse_tensortype(indices, values, x.size())", "def _to_flat_index(self, idx_in):\n idx_in = tuple([np.array(z, ndmin=1, copy=False) for z in idx_in])\n msk = np.all(np.stack([t < n for t, n in zip(idx_in, self.shape)]), axis=0)\n idx = np.ravel_multi_index(\n tuple([t[msk] for t in idx_in]), self.shape, mode=\"wrap\"\n )\n\n return idx, msk", "def operator_1_to_01(operator1):\n N = len(operator1)\n operator01 = np.zeros((N + 1, N + 1))\n operator01[1:, 1:] = operator1\n return operator01", "def column_convertor(x):\n x.shape = (1, x.shape[0])\n return x", "def onehot(index):\n classNum=2#1\n onehot = np.zeros(classNum)#这代表种类类型\n onehot[index] = 1.0\n return onehot", "def indices_to_one_hot(cls_indeces, nb_classes):\n targets = np.array(cls_indeces).reshape(-1)\n return np.eye(nb_classes)[targets]", "def to_onehot(value, dim):\n one_hot = torch.zeros(value.shape[0], dim)\n one_hot[torch.arange(value.shape[0]), value.long()] = 1\n return one_hot", "def binary_to_one_hot(arr: np.ndarray) -> np.ndarray:\n res = np.zeros((arr.shape[0], 2))\n res[np.where(arr == 1)[0], 0] = 1\n res[np.where(arr == 0)[0], 1] = 1\n return res", "def _normalize_index(self, index: int):\n if index < 0:\n return len(self) + index\n else:\n return index", "def one_hot(indices, depth):\n\n encoded_indicies = torch.zeros(indices.size() + torch.Size([depth]))\n if indices.is_cuda:\n encoded_indicies = encoded_indicies.cuda() \n index = indices.view(indices.size()+torch.Size([1]))\n encoded_indicies = encoded_indicies.scatter_(1,index,1)\n\n return encoded_indicies", "def ijk_to_index(self, ijk):\n return self.ijk_array_to_indices([ijk])[0]", "def one_hot(indices, depth):\n # print(indices)\n encoded_indices = torch.zeros(indices.size() + torch.Size([depth])).cuda()\n index = indices.view(indices.size()+torch.Size([1]))\n encoded_indices = encoded_indices.scatter_(1,index,1)\n \n return encoded_indices", "def map_whole_index_to_train(train_idx, index_in_whole):\n if isinstance(index_in_whole, MultiLabelIndexCollection):\n ind_type = 2\n elif isinstance(index_in_whole, IndexCollection):\n ind_type = 1\n else:\n raise TypeError(\"index_in_whole must be one of {IndexCollection, MultiLabelIndexCollection} type.\")\n\n tr_ob = []\n for entry in index_in_whole:\n if ind_type == 2:\n assert entry[0] in train_idx\n ind_in_train = np.argwhere(train_idx == entry[0])[0][0]\n tr_ob.append((ind_in_train, entry[1]))\n else:\n assert entry in train_idx\n tr_ob.append(np.argwhere(train_idx == entry)[0][0])\n if ind_type == 2:\n return MultiLabelIndexCollection(tr_ob)\n else:\n return IndexCollection(tr_ob)", "def get(self, idx_in):\n shape_out = idx_in[0].shape\n idx_flat_in, msk_in = self._to_flat_index(idx_in)\n idx, msk = find_in_array(idx_flat_in, self.idx)\n val_out = np.full(shape_out, self._fill_value)\n val_out.flat[np.flatnonzero(msk_in)[msk]] = self._data[idx[msk]]\n return np.squeeze(val_out)", "def f1to2(x):\n assert_equal(x.ndim, 1)\n return (x[::-1] * x[1:,None]).view(cls)", "def _binary_2d_label_to_sparse(labels):\n return sparse_tensor.SparseTensor.from_value(\n _binary_2d_label_to_sparse_value(labels))", "def conver1D(array):\n l = array.shape\n total = np.zeros((0, l[1] * l[2]), dtype=np.float32)\n i = 0\n for i in range(24):\n tempData = array[i]\n array1D = []\n for x in tempData:\n for s in x:\n array1D.append(s)\n total = np.insert(total, i, array1D, axis=0)\n return total", "def _getitem_1d(self, pos):\n # Check if pos has multiple elements.\n if isinstance(pos, OneDimGrid):\n return self._getitem_1d(pos=pos.elements)\n elif isinstance(pos, slice):\n return self._getitem_1d(_conv_slice_to_list(slice_obj=pos, stop_def=self.master.shape[0]))\n elif isinstance(pos, np.ndarray):\n return self._getitem_1d(pos.tolist())\n elif isinstance(pos, list):\n return np.array([self._getitem_1d(p) for p in pos])\n elif pos is None:\n raise TypeError(\"_AbsToPhyConverter._getitem_1d does not accept None as its input.\")\n else:\n # pos is a single element.\n for i, e in np.ndenumerate(self.master.elements):\n if (pos - e) % self.master.width == 0:\n return int(round((pos - e) / self.master.width)) * self.master.elements.shape[0] + i[0]\n return None # no matched coordinate", "def transform(self, x):\n res = [x[i] for i in range(len(x))\n if i not in self.index_value_pairs]\n return res if isinstance(x, list) else np.asarray(res)", "def check_index(i):\n\n i = asarray(i)\n if (i.ndim > 1) or (size(i) < 1):\n raise Exception(\"Index must be one-dimensional and non-singleton\")\n\n return i", "def compute_unary_set_mappings(indexing, counts): \n ret = np.zeros_like(indexing)-1\n for vertex,index in enumerate(indexing):\n if counts[index] == 1:\n ret[index] = vertex\n return ret", "def To1hot(label,num_class):\n onehot = np.zeros(num_class)\n onehot[label] = 1\n return onehot", "def _view_(a):\r\n return a.view((a.dtype[0], len(a.dtype.names)))", "def inversion(self, index=0):\r\n i = [(j[0]+index) % 12 for j in self.__full]\r\n return TWToneMatrix(i)", "def pndindex(*args):\r\n return np.ndindex(*args)", "def get_one_hot(size, ind):\n one_hot = np.zeros((size,))\n one_hot[ind] = 1\n return one_hot", "def from_1D_to_2D(constant):\n if isinstance(constant, np.ndarray) and constant.ndim == 1:\n return np.mat(constant).T\n else:\n return constant", "def xy_to_index(x, y):\n index = y * columns + x\n return index", "def unstacked_index(size, index):\n return index % size, index // size", "def vector_indx_to_map_matrix_indx(index,senzory_map):\n xs = dict(zip(np.unique(senzory_map[:,0]), it.count()))\n ys = dict(zip(np.negative(np.unique(senzory_map[:,1])), it.count()))\n x, y = senzory_map[index]\n return ys[y],xs[x]", "def indices_one_hot(labels_indices, num_classes=10):\n\n num_labels = labels_indices.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_indices.ravel()] = 1\n\n return labels_one_hot", "def indexTranslate(idx,M):\n\tB, I, J, K, S, _ = idx.shape\n\t# each idx entries grid-index\n\tgrid_idx = torch.arange(0,I*J,device=idx.device).repeat_interleave(S*S).reshape(1,I,J,1,S,S).repeat_interleave(K, dim=3)\n\t# grid index row and column (inter-window)\n\tgi, gj = grid_idx//J, grid_idx%J\n\t# window index row and column (intra-window)\n\t#wi, wj = idx//S, idx%S\n\twi, wj = idx//M, idx%M\n\t# global index row and column\n\tm, n = wi+gi*S, wj+gj*S\n\t# global flattened index\n\tp = J*S*m + n\n\t# stack to tile (unstack requires float)\n\treturn unstack(p.float()).long()", "def indices_one_hot(labels_indices, num_classes=10):\n \n num_labels = labels_indices.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_indices.ravel()] = 1\n \n return labels_one_hot", "def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n #return self.labels[index, :, :]\n return self.labels[index]", "def _normalize_index(oid, index, queried_oid):\n prefix = oid[:len(queried_oid)]\n # TODO: This is hacky and should probably be fixed in the normalize_oid function of easysnmp\n if oid != prefix:\n index = oid[len(queried_oid) + 1:] + '.' + index\n\n return index", "def get_index(ks):\n unq_vals, unq_ix = np.unique(ks[:, 0], return_index=True)\n return np.vstack([unq_vals, unq_ix]).T", "def direct_obs_matrix(Nx,obs_inds):\n Ny = len(obs_inds)\n H = zeros((Ny,Nx))\n H[range(Ny),obs_inds] = 1\n return H", "def mapping(index: Union[int, List[int]]) -> Union[int, List[int]]:\n if isinstance(index, int):\n return indexMapping[index]\n else:\n mappedList = []\n for item in index:\n mappedList.append(indexMapping[item])\n return mappedList", "def ones():\n return Vec2d(1, 1)", "def _reindex2d(self, index, dst_nodata=np.nan):\n # create new DataArray for output\n dst_coords = {d: self._obj.coords[d] for d in self._obj.dims}\n ys, xs = index.raster.ycoords, index.raster.xcoords\n dst_coords.update({self.y_dim: ys, self.x_dim: xs})\n da_reproject = full(\n dst_coords,\n nodata=dst_nodata,\n dtype=self._obj.dtype,\n name=self._obj.name,\n attrs=self._obj.attrs,\n crs=index.raster.crs,\n shape=index.raster.shape\n if self.dim0 is None\n else (self._obj.shape[0], *index.raster.shape),\n dims=self.dims if self.dim0 is None else (self.dim0, *self.dims),\n )\n # reproject by indexing\n shape2d = (self._obj.shape[0] if self.dim0 else 1, self.size)\n src_data = self._obj.load().data.reshape(shape2d)\n idxs = index.values\n valid = idxs >= 0\n if self.dim0:\n da_reproject.data[:, valid] = src_data[:, idxs[valid]]\n else:\n da_reproject.data[valid] = src_data[:, idxs[valid]].squeeze()\n return da_reproject", "def dense_to_one_hot(labels_dense, label):\n num_labels = len(labels_dense)\n index_offset = list(labels_dense).index(label)\n labels_one_hot = np.zeros(num_labels)\n labels_one_hot[index_offset] = 1\n return labels_one_hot", "def pndindex(*args):\n return np.ndindex(*args)", "def _asarray1d(arr, copy=False):\n if copy:\n return asarray(arr).flatten()\n else:\n return asarray(arr).ravel()", "def format_as_index(indices):\r\n\r\n if not indices:\r\n return \"\"\r\n return \"[%s]\" % \"][\".join(repr(index) for index in indices)", "def _getitem_1d(self, pos):\n # Check if pos has multiple elements.\n if isinstance(pos, slice):\n return self._getitem_1d(_conv_slice_to_list(slice_obj=pos, stop_def=self.master.shape[0]))\n elif isinstance(pos, np.ndarray):\n return self._getitem_1d(pos.tolist())\n elif isinstance(pos, list):\n return np.array([self._getitem_1d(p) for p in pos])\n elif pos is None:\n raise TypeError(\"_AbsToPhyConverter._getitem_1d does not accept None as its input.\")\n else:\n # pos is a single element. Compute quotient and modulo for grid extension.\n quo = 0\n mod = int(round(pos))\n if pos >= self.master.shape[0]:\n mod = int(round(pos % self.master.shape[0]))\n quo = int(round((pos-mod) / self.master.shape[0]))\n elif pos < 0:\n mod = int(round(pos % self.master.shape[0]))\n quo = int(round((pos-mod)) / self.master.shape[0])\n return quo * self.master.range[1] + self.master.elements[mod]\n # the following command cannot handle the size extension of the grid, disabled.\n # return self.master.elements.take(pos, mode='wrap')", "def de_project(np_arr):\n item = (np_arr +1)*255 / 2\n return item.astype(np.int32, copy=True)", "def one_hot_encode(x):\n # TODO: Implement Function\n x_l = list(x)\n for index in np.arange(len(x_l)):\n x_l[index] = get_one_hot_vector(x[index])[x[index]]\n return np.array(x_l)", "def pick_samples_1D(arr, indices, dtype = np.float32):\n\n n_samples = len(indices)\n\n arr_samples = np.zeros((n_samples), dtype = dtype)\n\n for i, index in enumerate(indices):\n arr_samples[i] = arr[index]\n\n return arr_samples", "def index_to_selector(cls, idx):\n\n if isinstance(idx, pd.MultiIndex):\n return idx.tolist()\n else:\n return [(i,) for i in idx.tolist()]", "def serialize_index(index):\n writer = faiss.VectorIOWriter()\n faiss.write_index(index, writer)\n return faiss.vector_to_array(writer.data)", "def id_to_index(self, id):\n raise NotImplementedError", "def ones_like(data):\n return _make.ones_like(data)", "def dyad_to_index(j, k):\n return 2**j + k", "def convert_to_one_hot(a,max_val=None):\n N = a.size\n data = np.ones(N,dtype=int)\n sparse_out = sparse.coo_matrix((data,(np.arange(N),a.ravel())), shape=(N,max_val))\n return np.array(sparse_out.todense())", "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)", "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)" ]
[ "0.63026196", "0.6043091", "0.6035447", "0.5884787", "0.5873872", "0.58113074", "0.58112794", "0.5807854", "0.5798596", "0.5788235", "0.576793", "0.57582265", "0.5724153", "0.5716051", "0.57072705", "0.5707207", "0.57035434", "0.5701338", "0.56998104", "0.5698102", "0.5695544", "0.5695544", "0.5695544", "0.5695544", "0.5666648", "0.56541073", "0.56460404", "0.5638028", "0.5634038", "0.5627856", "0.5625826", "0.5624823", "0.5607967", "0.5597523", "0.5574427", "0.5543751", "0.55353105", "0.5522", "0.55111367", "0.5504637", "0.5480434", "0.5470063", "0.5450132", "0.5445474", "0.54304725", "0.54259187", "0.5376806", "0.5369785", "0.5366596", "0.53553724", "0.53512996", "0.5347587", "0.5346204", "0.53390646", "0.53389305", "0.5336596", "0.5324077", "0.5322626", "0.53104967", "0.5299145", "0.5290016", "0.5277242", "0.52708346", "0.52593356", "0.5258377", "0.5256176", "0.5255908", "0.52402866", "0.5237446", "0.5235315", "0.52312994", "0.52231514", "0.522141", "0.52188504", "0.5218413", "0.5203323", "0.5195495", "0.5194423", "0.51872164", "0.51860076", "0.5181139", "0.51768917", "0.5164936", "0.5161655", "0.5146903", "0.51364625", "0.5133534", "0.5131159", "0.5130133", "0.5120984", "0.5115907", "0.5112415", "0.511167", "0.5110878", "0.5109056", "0.51085144", "0.51029307", "0.5102416", "0.5101609", "0.5101609" ]
0.59991854
3
Converts a one dimensional index to 2d.
def two_dim_index(self, k): ind_x = k % self.nx ind_y = (k - ind_x) / self.nx return (int(ind_y), int(ind_x))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index2d(src, idx):\n broadcast_to = P.BroadcastTo(idx.shape)\n offs = broadcast_to(P.range(Tensor(0, mindspore.int32),\n Tensor(idx.shape[0], mindspore.int32),\n Tensor(1, mindspore.int32))[:, None])\n idx = idx + (offs()) * idx.shape[1]\n\n return src.view(-1)[idx.view(-1)].view(idx.shpe)", "def make_2d(x):\n return x.reshape((1, len(x)))", "def reindex2d(self, index):\n ds_out = xr.Dataset(attrs=self._obj.attrs)\n for var in self.vars:\n ds_out[var] = self._obj[var].raster.reindex2d(index=index)\n return ds_out", "def ind2sub(index,dims):\n subs = []\n ii = 0\n for y in range(dims[1]):\n for x in range(dims[0]):\n if index==ii:\n subs = [x,y]\n ii +=1\n return subs", "def index_to_xy(index):\n x = index % columns\n y = index // columns\n return x, y", "def indXtoJ(indX):\n return np.unravel_index(indX % xx.size, xx.shape)", "def convert_index(idx, decomposition, mode='glb_to_loc'):\n if is_integer(idx) or isinstance(idx, slice):\n return decomposition(idx, mode=mode)\n elif isinstance(idx, (tuple, list)):\n return [decomposition(i, mode=mode) for i in idx]\n elif isinstance(idx, np.ndarray):\n return np.vectorize(lambda i: decomposition(i, mode=mode))(idx)\n else:\n raise ValueError(\"Cannot convert index of type `%s` \" % type(idx))", "def makeIndexMap(self):\n\t\tn = self.numRects\n\t\thalfList = [[(j,n-1-i+j) for j in range(i+1)] for i in range(n)]\n\t\tfullList = halfList + [[(j[1],j[0]) for j in i] for i in halfList[n-2::-1]]\n\t\treturn fullList", "def ind2coord(self, index):\n\n # assert (index >= 0)\n # assert(index < self.n - 1)\n\n col = index // self.rows\n row = index % self.rows\n\n return [row, col]", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def one_dim_index(self, i, j):\n return int(i + j * self.nx)", "def _getitem2d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iz = index[1]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[2]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n nz = hivects[1,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[1] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iz1 = max(izstart, lovects[1,i])\n iz2 = min(izstop, lovects[1,i] + fields[i].shape[1])\n\n if ix1 < ix2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iz1 - lovects[1,i], iz2 - lovects[1,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iz, slice):\n sss[1] = 0\n\n return resultglobal[tuple(sss)]", "def getShortArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def state_from_id(index, dims_state_grid):\n\n entries = [index] * len(dims_state_grid)\n for i in range(1, len(dims_state_grid)):\n value = 1\n for j in range(i, len(dims_state_grid)):\n value *= dims_state_grid[j]\n for k in range(i - 1, len(dims_state_grid)):\n if k == i - 1:\n entries[k] //= value\n else:\n entries[k] %= value\n\n out = np.array(object=entries)\n\n return out", "def xy_to_index(x, y):\n index = y * columns + x\n return index", "def reconstruct_input(self, ix):", "def todense(self):\n d = np.zeros(self.shape)\n for index,value in zip(self.index, self.value):\n d[index] = value\n return d", "def sub2ind(self, ix, iy):\n idx = np.ravel_multi_index((ix, iy), self.shape)\n return idx", "def index_to_ijk(self, index):\n return self.indices_to_ijk_array([index])[0]", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def aind(x):\n\treturn tuple(x.T)", "def encode_to_flat_array_index(row, column, matrix):\n return row * matrix.cols + column", "def convertFrom1Dto2D(coord, num_cols):\n y = int(np.floor(coord/num_cols))\n x = coord % num_cols\n return (y,x)", "def make_indices(dimensions):\n\n level = len(dimensions)\n\n if level == 1:\n return range(dimensions[0])\n\n indices = [[]]\n\n while level:\n\n _indices = []\n\n for j in range(dimensions[level - 1]):\n\n _indices += [[j] + i for i in indices]\n\n indices = _indices\n\n level -= 1\n\n try:\n return [tuple(i) for i in indices]\n except TypeError:\n return indices", "def _reindex2d(self, index, dst_nodata=np.nan):\n # create new DataArray for output\n dst_coords = {d: self._obj.coords[d] for d in self._obj.dims}\n ys, xs = index.raster.ycoords, index.raster.xcoords\n dst_coords.update({self.y_dim: ys, self.x_dim: xs})\n da_reproject = full(\n dst_coords,\n nodata=dst_nodata,\n dtype=self._obj.dtype,\n name=self._obj.name,\n attrs=self._obj.attrs,\n crs=index.raster.crs,\n shape=index.raster.shape\n if self.dim0 is None\n else (self._obj.shape[0], *index.raster.shape),\n dims=self.dims if self.dim0 is None else (self.dim0, *self.dims),\n )\n # reproject by indexing\n shape2d = (self._obj.shape[0] if self.dim0 else 1, self.size)\n src_data = self._obj.load().data.reshape(shape2d)\n idxs = index.values\n valid = idxs >= 0\n if self.dim0:\n da_reproject.data[:, valid] = src_data[:, idxs[valid]]\n else:\n da_reproject.data[valid] = src_data[:, idxs[valid]].squeeze()\n return da_reproject", "def _index(tensor_3d, tensor_2d):\n x, y, z = tensor_3d.size()\n t = tensor_3d.reshape(x * y, z)\n tt = tensor_2d.reshape(x * y)\n v = t[torch.arange(x * y), tt]\n v = v.reshape(x, y)\n return v", "def indexTranslate(idx,M):\n\tB, I, J, K, S, _ = idx.shape\n\t# each idx entries grid-index\n\tgrid_idx = torch.arange(0,I*J,device=idx.device).repeat_interleave(S*S).reshape(1,I,J,1,S,S).repeat_interleave(K, dim=3)\n\t# grid index row and column (inter-window)\n\tgi, gj = grid_idx//J, grid_idx%J\n\t# window index row and column (intra-window)\n\t#wi, wj = idx//S, idx%S\n\twi, wj = idx//M, idx%M\n\t# global index row and column\n\tm, n = wi+gi*S, wj+gj*S\n\t# global flattened index\n\tp = J*S*m + n\n\t# stack to tile (unstack requires float)\n\treturn unstack(p.float()).long()", "def f1to2(x):\n assert_equal(x.ndim, 1)\n return (x[::-1] * x[1:,None]).view(cls)", "def sub2ind( sizes, multi_index ):\r\n num_dims = sizes.shape[0]\r\n index = 0\r\n shift = 1\r\n for i in range( num_dims ):\r\n index += shift * multi_index[i]\r\n shift *= sizes[i]\r\n return index", "def xy2ind(x, y, xdim):\n if isinstance(x, np.ndarray):\n return x + (y * xdim)\n else:\n return int(x) + int(y) * xdim", "def flat_to_2d(data, det_width):\n return data.reshape((data.shape[0], data.shape[1], det_width, det_width))", "def index_col(self, i0, i1, j0, j1):\n edges = self.h5['indexes']['bin1_offset'][i0:i1 + 1]\n index = []\n for lo1, hi1 in zip(edges[:-1], edges[1:]):\n if hi1 - lo1 > 0:\n bin2 = self.h5['pixels']['bin2_id'][lo1:hi1]\n mask = (bin2 >= j0) & (bin2 < j1)\n index.append(lo1 + np.flatnonzero(mask))\n if not index:\n return np.array([], dtype=int)\n else:\n return np.concatenate(index, axis=0)", "def to_flat_index(self) -> Index: # type: ignore[override]\n return Index(self._values, tupleize_cols=False)", "def _simplify_index(indices, shape):\n # First clean up and check indices, unpacking ellipsis and boolean arrays\n indices = da.slicing.normalize_index(indices, shape)\n out = []\n axis = 0\n for index in indices:\n if index is not np.newaxis:\n length = shape[axis]\n axis += 1\n # If there is 1-D fancy index on this axis, try to convert to slice\n if isinstance(index, np.ndarray) and index.ndim == 1:\n try:\n index = _range_to_slice(index)\n except ValueError:\n pass\n else:\n index = da.slicing.normalize_slice(index, length)\n out.append(index)\n return tuple(out)", "def decode_to_matrix_cell(index, matrix):\n row = index / matrix.cols\n col = index - (matrix.cols * row)\n return row, col,", "def _setitem2d(self, index, value):\n ix = index[0]\n iz = index[2]\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[2]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n # --- Add extra dimensions so that the input has the same number of\n # --- dimensions as array.\n if isinstance(value, np.ndarray):\n value3d = np.array(value, copy=False)\n sss = list(value3d.shape)\n if not isinstance(ix, slice): sss[0:0] = [1]\n if not isinstance(iz, slice): sss[1:1] = [1]\n value3d.shape = sss\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n if isinstance(value, np.ndarray):\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iz1 - izstart, iz2 - izstart))\n fields[i][sss] = value3d[vslice]\n else:\n fields[i][sss] = value", "def unstacked_index(size, index):\n return index % size, index // size", "def flatten(self):\n xv, yv = np.meshgrid(self.columns, self.index, indexing='xy')\n return np.array([xv.ravel(), yv.ravel(), self.values.ravel()])", "def qindex2index(index):\n r = index.row()\n c = index.column()\n if c > 0x10:\n return (0x10 * r) + c - 0x11\n else:\n return (0x10 * r) + c", "def mapping(index: Union[int, List[int]]) -> Union[int, List[int]]:\n if isinstance(index, int):\n return indexMapping[index]\n else:\n mappedList = []\n for item in index:\n mappedList.append(indexMapping[item])\n return mappedList", "def meshup2d(self, ind='ij'):\r\n\r\n xv, yv, _ = self.vec()\r\n x_reg, y_reg = np.meshgrid(xv, yv, indexing=ind)\r\n\r\n return x_reg, y_reg", "def pndindex(*args):\r\n return np.ndindex(*args)", "def transform(self, x: Array2D) -> Array2D:", "def __getitem__(self, index):\n try:\n i, j = index\n except (AssertionError, TypeError):\n raise IndexError('invalid index')\n\n if not np.isscalar(i) and np.isscalar(j):\n warn('Indexing into a lil_matrix with multiple indices is slow. '\n 'Pre-converting to CSC or CSR beforehand is more efficient.',\n SparseEfficiencyWarning)\n\n if np.isscalar(i):\n if np.isscalar(j):\n return self._get1(i, j)\n if isinstance(j, slice):\n j = self._slicetoseq(j, self.shape[1])\n if issequence(j):\n return self.__class__([[self._get1(i, jj) for jj in j]])\n elif issequence(i) and issequence(j):\n return self.__class__([[self._get1(ii, jj) for (ii, jj) in zip(i, j)]])\n elif issequence(i) or isinstance(i, slice):\n if isinstance(i, slice):\n i = self._slicetoseq(i, self.shape[0])\n if np.isscalar(j):\n return self.__class__([[self._get1(ii, j)] for ii in i])\n if isinstance(j, slice):\n j = self._slicetoseq(j, self.shape[1])\n if issequence(j):\n return self.__class__([[self._get1(ii, jj) for jj in j] for ii in i])\n else:\n raise IndexError", "def to_mapping(self, dim):\n mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_SERIES')\n mim.series_exponent = 0\n mim.series_start = self.start\n mim.series_step = self.step\n mim.number_of_series_points = self.size\n mim.series_unit = self.unit\n return mim", "def from_1D_to_2D(constant):\n if isinstance(constant, np.ndarray) and constant.ndim == 1:\n return np.mat(constant).T\n else:\n return constant", "def getLongArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def getArray2d(self):\n\t\treturn self.array2d", "def pndindex(*args):\n return np.ndindex(*args)", "def vector_indx_to_map_matrix_indx(index,senzory_map):\n xs = dict(zip(np.unique(senzory_map[:,0]), it.count()))\n ys = dict(zip(np.negative(np.unique(senzory_map[:,1])), it.count()))\n x, y = senzory_map[index]\n return ys[y],xs[x]", "def ind2sub( sizes, index, num_indices ):\r\n\r\n denom = num_indices\r\n num_dims = sizes.shape[0]\r\n multi_index = np.empty( ( num_dims ), np.int32 )\r\n for i in range( num_dims - 1, -1, -1 ):\r\n denom /= sizes[i]\r\n multi_index[i] = index / denom\r\n index = index % denom\r\n return multi_index", "def grid_to_index(mapdata, x, y):\n i = (y * mapdata.info.width) + x\n return int (i)", "def _getitem_2d(self, pos):\n if isinstance(pos, list):\n if isinstance(pos[0], (int, np.integer)): # single point\n return self[pos[0], pos[1]]\n else:\n return [self[p] for p in pos]\n elif isinstance(pos, np.ndarray):\n if isinstance(pos[0], (int, np.integer)): # single point\n return np.array(self[pos[0], pos[1]])\n else:\n return np.array([self[p] for p in pos])\n # compute coordinates from OneDimGrids of its master.\n x = self.master.x[pos[0]]\n y = self.master.y[pos[1]]\n # TODO: Refactor the following code to avoid the use of double for loops and list comprehensions.\n if (not isinstance(x, np.ndarray)) and (not isinstance(y, np.ndarray)): # x and y are scalars.\n return np.array([x, y])\n if not isinstance(x, np.ndarray): # x is a scalar.\n return np.array([np.array([x, _y]) for _y in y])\n elif not isinstance(y, np.ndarray): # y is a scalar.\n return np.array([np.array([_x, y]) for _x in x])\n else:\n xy = []\n for _x in x: # vectorize this operation.\n row = []\n for _y in y:\n row.append(np.array([_x, _y]))\n xy.append(np.array(row))\n return np.array(xy)", "def rev_index(idx, axis=-1):\n #Want an idx2 such that x[idx][idx2] == x\n #idx is position to value map\n #Populate every POSITION in idx2 with the POSITION in idx that\n #has the VALUE of the idx2 position\n #searchsorted on range?\n idx_out = numpy.empty_like(idx).ravel()\n idx_out[flatten_idx(idx, axis)] = axis_index(idx.shape, axis).ravel()\n return idx_out.reshape(idx.shape)", "def index_to_slices(index):\r\n\r\n #contruct the return structure\r\n ind = np.asarray(index,dtype=np.int64)\r\n ret = [[] for i in range(ind.max()+1)]\r\n\r\n #find the switchpoints\r\n ind_ = np.hstack((ind,ind[0]+ind[-1]+1))\r\n switchpoints = np.nonzero(ind_ - np.roll(ind_,+1))[0]\r\n\r\n [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))]\r\n return ret", "def index_to_slices(index):\r\n\r\n #contruct the return structure\r\n ind = np.asarray(index,dtype=np.int64)\r\n ret = [[] for i in range(ind.max()+1)]\r\n\r\n #find the switchpoints\r\n ind_ = np.hstack((ind,ind[0]+ind[-1]+1))\r\n switchpoints = np.nonzero(ind_ - np.roll(ind_,+1))[0]\r\n\r\n [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))]\r\n return ret", "def slice_data_to_2D(x, y):\n if(x.shape != y.shape):\n print(\"Error: Images and Labels do not have the same shape\")\n else:\n x = np.array([(x[i, :, :, z]) for i in range(x.shape[0]) for z in range(x.shape[3])])\n y = np.array([(y[i, :, :, z]) for i in range(y.shape[0]) for z in range(y.shape[3])])\n return x,y", "def normalize_index(x):\n if x.__class__ in native_types:\n return x\n elif x.__class__ in sequence_types:\n # Note that casting a tuple to a tuple is cheap (no copy, no\n # new object)\n x = tuple(x)\n elif hasattr(x, '__iter__') and isinstance(x, collections_Sequence):\n if isinstance(x, string_types):\n # This is very difficult to get to: it would require a user\n # creating a custom derived string type\n return x\n sequence_types.add(x.__class__)\n x = tuple(x)\n else:\n return x\n\n x_len = len(x)\n i = 0\n while i < x_len:\n _xi = x[i]\n _xi_class = _xi.__class__\n if _xi_class in native_types:\n i += 1\n elif _xi_class in sequence_types:\n x_len += len(x[i]) - 1\n # Note that casting a tuple to a tuple is cheap (no copy, no\n # new object)\n x = x[:i] + tuple(x[i]) + x[i + 1:]\n elif _xi_class is not tuple and isinstance(_xi, collections_Sequence):\n if isinstance(_xi, string_types):\n # This is very difficult to get to: it would require a\n # user creating a custom derived string type\n i += 1\n else:\n sequence_types.add(_xi_class)\n x_len += len(x[i]) - 1\n x = x[:i] + tuple(x[i]) + x[i + 1:]\n else:\n i += 1\n\n if x_len == 1:\n return x[0]\n return x", "def single_index_to_index(x, resolution):\n return x//resolution, x%resolution", "def flatten_idx(idx, axis=-1):\n idx = numpy.asanyarray(idx)\n if not idx.dtype.kind in ('i', 'u'):\n idx = idx.astype(int)\n preshape = idx.shape[:axis]\n postshape = idx.shape[axis:]\n stride = int(numpy.product(postshape[1:])) #1 if applied to empty\n #The index on this axis moves stride elements in flat\n outidx = idx.flatten() * stride #makes a copy\n #First add the offsets to get us to [..., idx @ axis = 0, 0...)\n outidx += numpy.repeat(\n numpy.arange(0, len(outidx), int(numpy.product(postshape)),\n dtype=idx.dtype),\n numpy.product(postshape))\n #Now offsets for non-zero on the trailing axes [0, 0, ... 0@axis, ...]\n outidx += numpy.tile(numpy.arange(0, stride, dtype=idx.dtype),\n int(numpy.product(preshape)) * idx.shape[axis])\n return outidx", "def make_idx2word():\n idx2word = {}\n d = train_data.shared['word2idx']\n for word, idx in d.items():\n print(word)\n idx2word[idx] = word\n if config.use_glove_for_unk:\n d2 = train_data.shared['new_word2idx']\n for word, idx in d2.items():\n print(word)\n idx2word[idx+len(d)] = word\n return idx2word", "def __getitem__(self, index):\n return (index, self.data_cube[0, index, :])", "def get_node_indices_and_levels(nd: np.ndarray):\n indices = []\n lvs = []\n for j in range(1, nd.shape[0]):\n if j == 1:\n indices = nd[j]\n lvs = nd[j + 1]\n elif j % 2 != 0 and j > 1:\n indices = np.append(indices, nd[j])\n elif j % 2 == 0 and j > 2:\n lvs = np.append(lvs, nd[j])\n return indices, lvs", "def index_to_selector(cls, idx):\n\n if isinstance(idx, pd.MultiIndex):\n return idx.tolist()\n else:\n return [(i,) for i in idx.tolist()]", "def get_lvl_index2id(self, ion):\n\n q_ion_lvls = self.session.query(Level.level_id.label(\"id\"),\n Level.level_index.label(\"index\")). \\\n filter(and_(Level.ion == ion,\n Level.data_source == self.data_source))\n\n lvl_index2id = list()\n for id, index in q_ion_lvls:\n lvl_index2id.append((index, id))\n\n lvl_index2id_dtype = [(\"index\", np.int), (\"id\", np.int)]\n lvl_index2id = np.array(lvl_index2id, dtype=lvl_index2id_dtype)\n lvl_index2id = pd.DataFrame.from_records(lvl_index2id, index=\"index\")\n\n return lvl_index2id", "def _getitem_2d(self, pos):\n # If pos contains multiple coordinates (or objects), convert recursively.\n if isinstance(pos, list):\n if isinstance(pos[0], (int, np.integer)): # It's actually a single coordinate.\n return self[pos[0], pos[1]]\n else:\n return [self[p] for p in pos]\n elif isinstance(pos, np.ndarray):\n if isinstance(pos[0], (int, np.integer)): # It's actually a single coordinate.\n return np.array(self[pos[0], pos[1]])\n else:\n return np.array([self[p] for p in pos])\n # If pos contains only one physical object, convert its bounding box to abstract coordinates\n if (pos.__class__.__name__ == 'PhysicalObject') or (issubclass(pos.__class__, laygo2.object.PhysicalObject)):\n return self.bbox(pos)\n # If pos contains only one coordinate, convert it to abstract grid.\n m = self.master.x == pos[0]\n n = self.master.y == pos[1]\n # refactor the following code to avoid the use of double for-loops and list comprehensions.\n if (not isinstance(m, np.ndarray)) and (not isinstance(n, np.ndarray)): # x and y are scalars.\n return np.array([m, n])\n if not isinstance(m, np.ndarray): # x is a scalar.\n return np.array([np.array([m, _n]) for _n in n])\n elif not isinstance(n, np.ndarray): # y is a scalar.\n return np.array([np.array([_m, n]) for _m in m])\n else:\n mn = []\n for _m in m: # vectorize this operation.\n row = []\n for _n in n:\n row.append(np.array([_m, _n]))\n mn.append(np.array(row))\n return np.array(mn)", "def sub2ind(sizes, multi_index):\n num_sets = len(sizes)\n scalar_index = 0\n shift = 1\n for ii in range(num_sets):\n scalar_index += shift * multi_index[ii]\n shift *= sizes[ii]\n return scalar_index", "def index(i, j):\n return i * N + j", "def index_object(idxs=None):", "def indxmap_diff(Nd): \n\n ndims = len(Nd)\n Ndprod = numpy.prod(Nd)\n mylist = numpy.arange(0, Ndprod).astype(numpy.int32)\n mylist = numpy.reshape(mylist, Nd)\n d_indx = []\n dt_indx = []\n for pp in range(0, ndims):\n d_indx = d_indx + [ numpy.reshape( numpy.roll( mylist, +1 , pp ), (Ndprod,) ,order='C').astype(numpy.int32) ,]\n dt_indx = dt_indx + [ numpy.reshape( numpy.roll( mylist, -1 , pp ) , (Ndprod,) ,order='C').astype(numpy.int32) ,]\n\n return d_indx, dt_indx", "def column_convertor(x):\n x.shape = (1, x.shape[0])\n return x", "def getByteArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def xy2ind(self, x, y):\n return self.sub2ind(*self.xy2sub(x, y))", "def convert_gather_nd(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n shape = infer_shape(index)\n perm = list(range(0, len(shape) - 1))\n perm.insert(0, len(shape) - 1)\n index = _op.transpose(index, axes=perm)\n out = _op.gather_nd(x, index, 0, shape[-1])\n g.add_node(op.output(\"Out\")[0], out)", "def idx_to_grid(n):\n\n x = n % MAX_Y\n y = int(n / MAX_X)\n return(x, y)", "def map_whole_index_to_train(train_idx, index_in_whole):\n if isinstance(index_in_whole, MultiLabelIndexCollection):\n ind_type = 2\n elif isinstance(index_in_whole, IndexCollection):\n ind_type = 1\n else:\n raise TypeError(\"index_in_whole must be one of {IndexCollection, MultiLabelIndexCollection} type.\")\n\n tr_ob = []\n for entry in index_in_whole:\n if ind_type == 2:\n assert entry[0] in train_idx\n ind_in_train = np.argwhere(train_idx == entry[0])[0][0]\n tr_ob.append((ind_in_train, entry[1]))\n else:\n assert entry in train_idx\n tr_ob.append(np.argwhere(train_idx == entry)[0][0])\n if ind_type == 2:\n return MultiLabelIndexCollection(tr_ob)\n else:\n return IndexCollection(tr_ob)", "def _to_flat_index(self, idx_in):\n idx_in = tuple([np.array(z, ndmin=1, copy=False) for z in idx_in])\n msk = np.all(np.stack([t < n for t, n in zip(idx_in, self.shape)]), axis=0)\n idx = np.ravel_multi_index(\n tuple([t[msk] for t in idx_in]), self.shape, mode=\"wrap\"\n )\n\n return idx, msk", "def ssk_from_indices( indices_l, indices_r ):\n return mat[ [[int(il)] for il in indices_l], [int(ir) for ir in indices_r] ]", "def one_hot(indices, depth):\n\n encoded_indicies = torch.zeros(indices.size() + torch.Size([depth]))\n if indices.is_cuda:\n encoded_indicies = encoded_indicies.cuda() \n index = indices.view(indices.size()+torch.Size([1]))\n encoded_indicies = encoded_indicies.scatter_(1,index,1)\n\n return encoded_indicies", "def indexreshape(n, m):\n if n < m:\n raise ValueError('m must be lower or equal to n')\n\n delta = (n % m) // 2\n end = n - (n % m)\n step = end // m\n r = tuple((i + delta, i + delta + step - 1) for i in range(0, end, step))\n return r", "def get_2d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['2d'][:, to_select, :][:, to_sort, :]", "def convert_index_select(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"dim\")\n out = _op.transform.take(x, index, axis, mode=\"wrap\")\n g.add_node(op.output(\"Out\")[0], out)", "def __getitem__(self, index):\n if isinstance(index, (tuple, list)) and len(index) == 2:\n return self.cells[index[1]][index[0]]\n return self.cells[index]", "def to_categorical(index_label, num_classes):\n return index_label, np.eye(num_classes, dtype='uint8')[index_label]", "def to_sparse_representation(label, batch_idx):\n indices = []\n vals = []\n\n for i, idx in enumerate(batch_idx):\n for j, c in enumerate(label[idx]):\n indices.append([i, j])\n vals.append(c)\n\n shape = [len(batch_idx), np.max(indices, axis=0)[1] + 1]\n\n return np.array(indices), np.array(vals), np.array(shape)", "def to2D(geometry):\n\n return LineString(np.column_stack(geometry.xy))", "def space_2d(X, Y):\n return list(map(lambda x: Mapping.one_value_to_many(x, Y), X))", "def tree_idx2(treeG,k1,J1,J2):\n g = treeG[J1]['clusters'][k1]\n if(J1>J2+1):\n for j in np.arange(J2+1, J1)[::-1]:\n g1 = []\n for i in np.arange(0,len(g),1):\n g1 = np.array(np.append(g1,treeG[j]['clusters'][g[i]]), dtype = int)\n g = g1\n y = g\n return y", "def row_to_indices(row):\r\n return [(row, col) for col in range(0, 9)]", "def _ivector_index_to_node(i_components,_intermediate_node): \n return Vector(\n index= [\n _intermediate_node[uid_i]\n for uid_i in i_components.iterkeys()\n \n ],\n value=i_components._value\n )", "def idx2pixel(idx, image_size):\n\tassert idx < image_size**2, \"index {} too large for image size {}\".format(idx, image_size)\n\ttmp = np.zeros(image_size**2)\n\ttmp[idx] = 1\n\ttmp = tmp.reshape(image_size, image_size)\n\ti, j = np.where(tmp==1)\n\treturn i[0], j[0]", "def convert_index(index:int, total_columns:int)->Tuple[int,int]:\n\tindex_row, index_column = divmod(index, total_columns)\n\n\treturn index_row, index_column-1", "def to_2dnp_array(X):\r\n if isinstance(X, np.ndarray):\r\n if X.ndim == 1:\r\n return X.reshape((-1, 1))\r\n if X.ndim == 2:\r\n return X\r\n if isinstance(X, Number):\r\n X = [X]\r\n X = np.array(X)\r\n X = X.reshape([-1, np.prod(X.shape) // X.shape[0]])\r\n return X", "def _binary_2d_label_to_sparse_value(labels):\n indices = []\n values = []\n batch = 0\n for row in labels:\n label = 0\n xi = 0\n for x in row:\n if x == 1:\n indices.append([batch, xi])\n values.append(label)\n xi += 1\n else:\n assert x == 0\n label += 1\n batch += 1\n shape = [len(labels), len(labels[0])]\n return sparse_tensor.SparseTensorValue(\n np.array(indices, np.int64), np.array(values, np.int64),\n np.array(shape, np.int64))", "def _convert_col_index(self, index):\n if index is None or isinstance(index, int): return index\n if isinstance(index, str):\n find_index = self._varlist.index\n return [find_index(v) for v in self._find_vars(index)]\n if isinstance(index, collections.Iterable):\n new_index = []\n append = new_index.append\n find_vars = self._find_vars\n find_index = self._varlist.index\n for i in index:\n if isinstance(i, str):\n new_index += [find_index(i) for i in find_vars(i)]\n elif isinstance(i, int):\n append(i)\n else:\n msg = \"column iterable should contain only int or str\"\n raise TypeError(msg)\n if len(new_index) != len(set(new_index)):\n msg = \"columns cannot be repeated; use -clonevar- to copy\"\n raise ValueError(msg)\n return new_index\n if isinstance(index, slice):\n start, stop, step = index.start, index.stop, index.step\n if not isinstance(start, int) and start is not None:\n if isinstance(start, str):\n start = self._varlist.index(self._find_vars(start)[0])\n else:\n raise TypeError(\"column slice values must be str or int\")\n if not isinstance(stop, int) and stop is not None:\n if isinstance(stop, str):\n stop = self._varlist.index(self._find_vars(stop)[0])\n else:\n raise TypeError(\"column slice values must be str or int\")\n return slice(start, stop, step)\n msg = \"column should be index (int), name (str), slice, or iterable\"\n raise TypeError(msg)", "def transform(self, x):\n res = [x[i] for i in range(len(x))\n if i not in self.index_value_pairs]\n return res if isinstance(x, list) else np.asarray(res)", "def indices(self):\n return tuple([slice(*r) for r in self.location])", "def one_hot(indices, depth):\n # print(indices)\n encoded_indices = torch.zeros(indices.size() + torch.Size([depth])).cuda()\n index = indices.view(indices.size()+torch.Size([1]))\n encoded_indices = encoded_indices.scatter_(1,index,1)\n \n return encoded_indices", "def indexes_to_one_hot(indexes, n_dims=None):\n indexes = indexes.type(torch.int64).view(-1, 1)\n n_dims = n_dims if n_dims is not None else int(torch.max(indexes)) + 1\n one_hots = torch.zeros(indexes.size()[0], n_dims).scatter_(1, indexes, 1)\n one_hots = one_hots.view(*(indexes.shape-1))\n return one_hots.type(torch.LongTensor)" ]
[ "0.67064077", "0.6169856", "0.60942876", "0.60095567", "0.5960693", "0.5943522", "0.59234434", "0.5917022", "0.58836246", "0.58741325", "0.5864474", "0.58585775", "0.58431876", "0.58389384", "0.5818341", "0.5752824", "0.5739784", "0.5734386", "0.57313216", "0.57089096", "0.56932235", "0.567575", "0.56754905", "0.5648429", "0.56029564", "0.5602028", "0.5595475", "0.55895764", "0.55862206", "0.55594796", "0.5555942", "0.5550801", "0.55251753", "0.55185753", "0.55156636", "0.55027044", "0.5494473", "0.5493161", "0.5483294", "0.5461814", "0.54594934", "0.545685", "0.54286134", "0.5406565", "0.54004985", "0.53918034", "0.5390087", "0.5383695", "0.5381656", "0.5375533", "0.53721553", "0.53663266", "0.5357537", "0.5356517", "0.5349179", "0.53480786", "0.53480786", "0.5346625", "0.53332955", "0.5313154", "0.53093266", "0.5295047", "0.5291952", "0.529167", "0.5261882", "0.5251068", "0.52463675", "0.52439195", "0.5240694", "0.5235157", "0.52273786", "0.5220612", "0.52174646", "0.5217108", "0.52149075", "0.5211256", "0.5210246", "0.51840645", "0.51828575", "0.51785374", "0.51773924", "0.5173305", "0.51645446", "0.5162249", "0.51620334", "0.5160024", "0.51598626", "0.5155351", "0.5149629", "0.5147133", "0.5146407", "0.51352656", "0.513207", "0.5126706", "0.5123949", "0.51149786", "0.51142555", "0.51137054", "0.51127756", "0.51088876" ]
0.550443
35
Return a list of all the cells in the grid. We start increasing x first, i.e. 0th cell is the first cell, 1cell is the one with the next x in the list and y unchanged, .... Return array An array of size n_cells n_dims.
def cells_list(self): xx, yy = np.meshgrid(self.x_spacings, self.y_spacings) return np.vstack([yy.ravel(), xx.ravel()]).transpose()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_cells_from_dims(num_verts_x: int, num_verts_y: int):\n num_cells_x = num_verts_x - 1\n num_cells_y = num_verts_y - 1\n num_cells = num_cells_x*num_cells_y\n cell_array = np.zeros((num_cells, 4), dtype=int)\n cell_num = 0\n\n # I am sure this could be done in a more efficient way.\n for y_cell in range(num_cells_y):\n for x_cell in range(num_cells_x):\n cell_array[cell_num, 0] = x_cell + num_verts_x*y_cell\n cell_array[cell_num, 1] = cell_array[cell_num, 0] + 1\n cell_array[cell_num, 2] = cell_array[cell_num, 0] + num_verts_x + 1\n cell_array[cell_num, 3] = cell_array[cell_num, 0] + num_verts_x\n cell_num += 1\n\n return cell_array", "def cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n part = Partition(list(self))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def get_cells(self):\n self.list = [self.origin]\n\n for i in range(1, self.size):\n if(self.direction ==self.direction.RIGHT):\n self.list.append((self.origin[0], self.origin[1]+i))\n elif(self.direction ==self.direction.DOWN):\n self.list.append((self.origin[0]-i, self.origin[1]))\n\n return self.list", "def cell_list(self):\n lst_of_idx = []\n height = self.__height\n width = self.__width\n for i in range(width):\n for j in range(height):\n lst_of_idx.append((i,j))\n lst_of_idx.append((3,7))\n return lst_of_idx", "def all_cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.circle_star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def get_cells(self):\n cell_list = []\n for cell_row in self.board:\n for current_cell in cell_row:\n if current_cell is not None:\n cell_list.append(current_cell)\n return cell_list", "def makeStartingGrid(self):\n return util.make2DArray(self.xN, self.yN, False)", "def cells(self) -> List[Tuple[int, int]]:\n return self._cells", "def traverse_grid(self, start_cell, direction, num_steps):\n elements = []\n\n for step in range(num_steps):\n row = start_cell[0] + step * direction[0]\n col = start_cell[1] + step * direction[1]\n elements.append(self._grid[row][col])\n\n return elements", "def get_neighbours(self, cell):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-1,y+2) if 0<=i<width for j in range(x-1,x+2) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def get_adjcells(self,cell):\n adj_cells = []\n cells_xy = []\n if cell.x > 0:\n adj_cells.append(self.cell_array.item((cell.x-1,cell.y)))\n if cell.x < self.grid_size - 1:\n adj_cells.append(self.cell_array.item((cell.x+1,cell.y)))\n if cell.y > 0:\n adj_cells.append(self.cell_array.item((cell.x,cell.y-1)))\n if cell.y < self.grid_size - 1:\n adj_cells.append(self.cell_array.item((cell.x,cell.y+1)))\n return adj_cells", "def get_neighbors(grid, x, y):\n out = []\n if x > 0:\n out.append(grid[x-1, y])\n if y > 0:\n out.append(grid[x, y-1])\n if y < grid.shape[1] - 1:\n out.append(grid[x, y+1])\n if x < grid.shape[0] - 1:\n out.append(grid[x+1, y])\n return out", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def grid_coords(self):\n return [(x, y) for y in range(self.height) for x in range(self.width)]", "def cells(self):\n return copy.deepcopy(self._cells)", "def GLDAS025Cellgrid():\n return GLDAS025Grids(only_land=False)", "def generate_cells(self,n_c):\n self.n_c = n_c\n self.cells = [Cell() for i in range(n_c)]\n return self.cells", "def generate_grid(nrows, ncols, ndots):\n\n # Validation\n if nrows * ncols < ndots:\n raise Exception(\"ndots must be <= than grid size\")\n\n rows = np.arange(1, nrows + 1)\n cols = np.arange(1, ncols + 1)\n\n # Create empty matrix\n grid = np.empty((len(rows), len(cols), 2), dtype=np.intp)\n grid[..., 0] = rows[:, None]\n grid[..., 1] = cols \n\n return grid.reshape(nrows * ncols, -1)[:ndots]", "def cells(self):\n return ((row, col) for row in self.rows for col in self.cols)", "def get_start_grid(cols=4, rows=4):\n grid = [[0]*cols for i in range(rows)]\n for i in range(2):\n empties = get_empty_cells(grid)\n y,x = random.choice(empties)\n grid[y][x] = 2 if random.random() < 0.9 else 4\n return grid", "def get_cells(pts, inv_cell_width, Ny, Nx, log=sys.stdout):\n lib = _initlib()\n p = require(pts, dtype=float64, requirements=['C']) \n npts = p.shape[0]\n assert(p.shape ==(npts,3))\n out = empty(npts, dtype=int64)\n\n res = lib.find_lattice(p, npts, inv_cell_width, Ny, Nx, out)\n return out", "def get_visible_cells(self):\r\n ux, uy = self.GetScrollPixelsPerUnit()\r\n sx, sy = self.GetViewStart()\r\n w, h = self.GetGridWindow().GetClientSize().Get()\r\n sx *= ux\r\n sy *= uy\r\n start_col = self.XToCol(sx)\r\n start_row = self.YToRow(sy)\r\n end_col = self.XToCol(sx + w, True)\r\n end_row = self.YToRow(sy + h, True)\r\n return start_row, end_row, start_col, end_col", "def regex_grid(n):\n cx = 2 ** (n - 1)\n cy = 2 ** (n - 1)\n grid = [[grid_numbering(n, i , j, cx, cy) for i in range(2 ** n)] for j in range(2 ** n)]\n \n return grid", "def getCellRange(self, cellx, celly, size):\n y = int(celly - ((size -1) / 2))\n x = int(cellx - ((size -1) / 2))\n _y = int(celly + ((size -1) / 2))\n _x = int(cellx + ((size -1) / 2))\n return list(product(range(x, _x+1), range(y,_y+1)))", "def make_grid(self):\n\n\t\tinit_grid = (self.grid_width//2, self.grid_height//2)\n\t\tgrid_list = []\n\n\t\tfor i in range(self.canv_width//self.grid_width):\n\t\t\tfor j in range(self.canv_height//self.grid_height):\n\t\t\t\tif j == 0 or j%2 ==0:\n\t\t\t\t\tgrid_list.append((init_grid[0]+i*self.grid_width, init_grid[1]+j*self.grid_height))\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tgrid_list.append((grid_list[-1][0]+(self.grid_width//2), init_grid[1]+j*self.grid_height))\n\n\t\treturn grid_list", "def _create_grid_with_cells(self, width, height):\n grid = []\n for row in range(height):\n grid.append([])\n for column in range(width):\n if column % 2 == 1 and row % 2 == 1:\n grid[row].append(TILE_EMPTY)\n elif (\n column == 0 or row == 0 or column == width - 1 or row == height - 1\n ):\n grid[row].append(TILE_CRATE)\n else:\n grid[row].append(TILE_CRATE)\n grid[-2][-3] = TILE_EMPTY\n grid[1][0] = TILE_EMPTY\n return grid", "def get_start_grid(cols=4, rows=4):\n\tgrid = [[\"\"]*cols for i in range(rows)]\n\tfor i in range(2):\n\t\tempties = get_empty_cells(grid)\n\t\ty,x = random.choice(empties)\n\t\tgrid[y][x] = 2 if random.random() < prob_2 else 4\n\treturn grid", "def create_grid(self):\n return [[0] * self.width for _ in range(self.height)]", "def tensor_grid(x):\n\treturn np.vstack(np.meshgrid(*x, indexing = 'ij')).reshape((len(x), -1)).T", "def cells(self):\n return self.container['cells']", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def cells(self):\r\n return Cells(self)", "def get_cells(self):\n raise NotImplementedError", "def get_all_numbered_cells(self):\r\n num_rows = len(self.mine_field)\r\n num_cols = len(self.mine_field[0])\r\n numbered_cells = [(row, col)\r\n for row in range(num_rows)\r\n for col in range(num_cols)\r\n if self.mine_field[row][col] != '?' and \\\r\n self.mine_field[row][col] != 'x']\r\n return numbered_cells", "def grids(self):\n x = self.xvalues\n if self.ndim == 1:\n return x\n if self.ndim == 2:\n return x[None, :], x[:, None]\n if self.ndim == 3:\n return x[None, :, None], x[:, None, None], x[None, None, :]", "def get_all_spawnable_cells(self):\n spawnable_positions = []\n\n for i in range(self.grid.width):\n for j in range(self.grid.height):\n n_list = self.grid.get_cell_list_contents([(i, j)])\n\n if len(n_list) <= 0:\n spawnable_positions.append((i, j))\n elif len(n_list) > 0:\n n = n_list[0]\n if not any(map(lambda t: isinstance(n, t), self.not_spawnable_objects)):\n spawnable_positions.append((i, j))\n\n return spawnable_positions", "def generate_nearby_cells(self):\n for y in range(len(self.island_map)):\n for x in range(len(self.island_map[y])):\n list_of_nearby_cells = []\n\n if y != 0:\n self.generate_cell_above(x, y, list_of_nearby_cells)\n\n if x != 0:\n self.generate_cell_left(x, y, list_of_nearby_cells)\n\n if y != len(self.island_map)-1:\n self.generate_cell_below(x, y, list_of_nearby_cells)\n\n if x != len(self.island_map[y])-1:\n self.generate_cell_right(x, y, list_of_nearby_cells)\n\n self.island_map[y][x].nearby_cells = list_of_nearby_cells", "def get_xy_grid(nx, ny):\n\tfor n in [nx, ny]:\n\t\tif not isodd(n):\n\t\t\traise Exception(\"[get_xy_grid] only accept odd number\")\n\n\tx, y = np.mgrid[-(nx-1)/2:(nx+1)/2, -(ny-1)/2:(ny+1)/2]\n\n\treturn x, y", "def initialize_cells(self):\n for loc in np.ndindex(*self.shape): # TODO: see if nested for loop is faster than this\n c = Cell(loc, self)\n self.cells.append(c)", "def get_live_cell_coordinates(self):\n\n return np.array(np.where(self.board == 1)).transpose().tolist()", "def _build_grid(self):\n n = self.params['n']\n\n x_min, x_max = min(self.node[:, 0]), max(self.node[:, 0])\n y_min, y_max = min(self.node[:, 1]), max(self.node[:, 1])\n xv = np.linspace(x_min, x_max, num=n, endpoint=True)\n yv = np.linspace(y_min, y_max, num=n, endpoint=True)\n xg, yg = np.meshgrid(xv, yv, sparse=False, indexing='xy')\n\n return xg, yg", "def gridToList(grid):\n gridList =[]\n for i in range(grid.width):\n for j in range(grid.height):\n if grid[i][j] == True:\n gridList.append((i,j))\n return gridList", "def ij_coordinates(self):\n\n x = np.arange(self.nx)\n y = np.arange(self.ny)\n return np.meshgrid(x, y)", "def get_cells(self):\n return [\n cell for column in self.children for cell in column.get_cells()]", "def empty_cells(state):\r\n cells = []\r\n for x, row in enumerate(state):\r\n for y, cell in enumerate(row):\r\n if cell == 0:\r\n cells.append([x, y])\r\n\r\n return cells", "def _generate_cells(self) -> None:\n for i in range(15):\n for j in range(15):\n c = Cell(x=i, y=j)\n c.answer = self.puzzle.solution[j*self.width+i]\n self.cells[(j, i)] = c # row, col", "def make_grid(X,Y): \r\n grid = []\r\n for j in range(Y):\r\n row = []\r\n for i in range(X):\r\n row.append( block((i,j)) )\r\n grid.append(row)\r\n return grid", "def createGrid(nx, ny, include_center = False):\n direction = 0\n positions = []\n if (nx > 1) or (ny > 1):\n half_x = int(nx/2)\n half_y = int(ny/2)\n for i in range(-half_y, half_y+1):\n for j in range(-half_x, half_x+1):\n if ((i==0) and (j==0)) and not include_center:\n continue\n else:\n if ((direction%2)==0):\n positions.append([j,i])\n else:\n positions.append([-j,i])\n direction += 1\n return positions", "def get_neighbours(coords):\n\n dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n (-1,2),(0,2),(1,2),(0,0)]\n neighbours = []\n for dx, dy in dxdy:\n neighbour_coords = coords[0] + dx, coords[1] + dy\n if not (0 <= neighbour_coords[0] < nx and\n 0 <= neighbour_coords[1] < ny):\n # We're off the grid: no neighbours here.\n continue\n neighbour_cell = cells[neighbour_coords]\n if neighbour_cell is not None:\n # This cell is occupied: store this index of the contained point.\n neighbours.append(neighbour_cell)\n return neighbours", "def get_empty_cells(state):\n cells = []\n for row_index, row in enumerate(state.board):\n for col_index, cell in enumerate(row):\n if cell == 0:\n cells.append([row_index, col_index])\n return cells", "def iter_cells(self):\n\t\treturn iter(self._cells)", "def gridToList(self, grid):\n gridList =[]\n for i in range(grid.width):\n for j in range(grid.height):\n if grid[i][j] == True:\n # print \"grid[i][j]\", grid[i][j]\n gridList.append((i,j))\n return gridList", "def init_grid(self):\n grid = []\n for i in range(self.settings['grid_size']):\n grid.append([])\n for j in range(self.settings['grid_size']):\n if [j, i] in self.settings['walls']:\n grid[i].append(g.WALL)\n else:\n grid[i].append(g.EMPTY)\n return grid", "def fill_grid(self):\n\n for row_margin, row in enumerate(range(self.rows)):\n self.grid.append([])\n\n for col_margin, col in enumerate(range(self.cols)):\n x = col*self.cell_size + col_margin\n y = row*self.cell_size + row_margin\n\n rect = pygame.Rect(x, y, self.cell_size, self.cell_size)\n\n cell = Cell(row, col, rect)\n\n if row == 7 and col == 3:\n cell.root = True\n self.root = cell\n elif row == 7 and col == 16:\n cell.goal = True\n self.goal = cell\n\n self.grid[row].append(cell)", "def gen_grid(grid_width, grid_height):\n\n grid = []\n for x in range(0, grid_width):\n grid.append([])\n for y in range(0, grid_height):\n grid[x].append(False)\n return grid", "def initialize_grid(self):\n self.grid = np.zeros([self.N, self.N, self.N])\n return self.grid", "def randomGrid(N):\n grid = np.zeros((N,N), dtype=int)\n for i in range(N): \n for j in range(N): \n if np.random.uniform() < 0.2:\n # cell alive\n grid[i,j] = int(np.random.uniform(low=1, high=(256*256*256)-1))\n return grid", "def list_neighbors(current_row, current_col, grid_size):\n neighbors = []\n for row_offset, col_offset in [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1)]:\n new_row = current_row + row_offset\n new_col = current_col + col_offset\n if (new_row >= 0 and new_row < grid_size and new_col >= 0\n and new_col < grid_size):\n neighbors.append((new_row, new_col))\n return neighbors", "def get_neighbours(self, coords):\n\n\t dxdy = [(-1,-2),(0,-2),(1,-2),(-2,-1),(-1,-1),(0,-1),(1,-1),(2,-1),\n\t (-2,0),(-1,0),(1,0),(2,0),(-2,1),(-1,1),(0,1),(1,1),(2,1),\n\t (-1,2),(0,2),(1,2),(0,0)]\n\t neighbours = []\n\t for dx, dy in dxdy:\n\t neighbour_coords = coords[0] + dx, coords[1] + dy\n\t if not (0 <= neighbour_coords[0] < self.nx and\n\t 0 <= neighbour_coords[1] < self.ny):\n\t # We're off the grid: no neighbours here.\n\t continue\n\t neighbour_cell = self.cells[neighbour_coords]\n\t if neighbour_cell is not None:\n\t # This cell is occupied: store this index of the contained point.\n\t neighbours.append(neighbour_cell)\n\t return neighbours", "def make_grid(self, nx, ny):\n nx_vec = np.arange(nx)\n ny_vec = np.arange(ny)\n yv, xv = np.meshgrid(ny_vec, nx_vec)\n grid = np.stack((yv, xv), axis=2)\n grid = grid.reshape(1, 1, ny, nx, 2)\n return grid", "def getGrid(x,y,w,h,x_step=1, y_step=1):\n X,Y = np.mgrid[x:x+w:x_step, y:y+h:y_step]\n return np.array(np.vstack((X.flatten(),Y.flatten())).transpose(), dtype=np.float32)", "def grid(x, y):\n return product(xrange(1, x+1), xrange(1, y+1))", "def get_all_neighbors(self):\n m, n = self.board.shape\n return as_strided(self.expanded_board,\n shape = (m,n,3,3), \n strides = self.expanded_board.strides + self.expanded_board.strides)", "def get_empty_cells(grid):\n empty = []\n for j,row in enumerate(grid):\n for i,val in enumerate(row):\n if not val:\n empty.append((j,i))\n return empty", "def getNewGrid(self, _grid_size):\n grid_ = []\n for _ in range(_grid_size[0]):\n grid_ += [[ None for _ in range(_grid_size[1]) ]]\n return grid_", "def fill_grid_np(self):\n\n self.grid_np = [None for i in range(GRID_HEIGHT*GRID_HEIGHT*MAX_CELL_SIZE)]\n grid = self.grid_np\n # cell_size = self.cell_size\n for obj in self.levels[self.curient_level].objects:\n obj.position_grid[X], obj.position_grid[Y] = get_grid_xy(obj.position_np, ZOMBIE_SIZE)\n x, y = obj.position_grid[X], obj.position_grid[Y]\n grid[y*GRID_WIDTH + x] = obj\n # if cell_size[y*GRID_WIDTH + x] < MAX_CELL_SIZE:\n # cell_size[y*GRID_WIDTH + x] += 1", "def create_grid(size_x, size_y, default=None):\n return [[default for _x in range(size_y)] for _y in range(size_x)]", "def init_cells(self):\n state = list()\n width = WIDTH / CELL_SIZE\n height = HEIGHT / CELL_SIZE\n\n for index in range(0, width * height):\n if randint(1, 100) >= 100 - CELL_DENSITY:\n # Live cell.\n status = NORMAL\n state.append(1)\n else:\n # Dead cell.\n status = HIDDEN\n state.append(0)\n\n cell = self.canvas.create_rectangle((index % width) * CELL_SIZE, (index / width) * CELL_SIZE,\n ((index % width) + 1) * CELL_SIZE, ((index / width) + 1) * CELL_SIZE,\n fill=\"black\", state=status, outline=\"white\")\n self.cells.append(cell)\n\n return state", "def _build_point_grid(n_per_side: int) -> np.ndarray:\n offset = 1 / (2 * n_per_side)\n points_one_side = np.linspace(offset, 1 - offset, n_per_side)\n points_x = np.tile(points_one_side[None, :], (n_per_side, 1))\n points_y = np.tile(points_one_side[:, None], (1, n_per_side))\n points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)\n return points", "def create_subgrid(self)->list:\n return [subgrid.Subgrid(i) for i in range(0, 9)]", "def get_grid_coords(self, count, boundry_x, boundry_y, grid_size):\n\n coords = []\n\n boundry_x = int(boundry_x/10)\n boundry_y = int(boundry_y/10)\n\n while len(coords) < count:\n seed()\n\n\n x = randint(-boundry_x, boundry_x)\n y = randint(-boundry_y, boundry_y)\n\n if len(coords) == 0:\n coords.append((x*grid_size, y*grid_size))\n else:\n for coord in coords:\n if (x not in range(coord[0]-buffer*grid_size, coord[0]+buffer*grid_size)) and (y not in range(coord[1]-buffer, coord[1]+buffer)):\n pass\n else:\n break", "def grid_maker(width, height):\n grid = [['.' for i in range(width)] for j in range(height)]\n return grid", "def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid", "def make_game_grid(self):\n return numpy.array([[random.choice(string.ascii_uppercase) for breath in range(self.grid_size)] for depth in\n range(self.grid_size)])", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def get_empty_cells(grid):\n\tempty = []\n\tfor j,row in enumerate(grid):\n\t\tfor i,val in enumerate(row):\n\t\t\tif not val:\n\t\t\t\tempty.append((j,i))\n\treturn empty", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0]*4)\r\n return grid", "def create_grid(grid):\r\n for i in range (4):\r\n grid.append ([])\r\n for j in range (4):\r\n grid[i].append (0)", "def reset(self):\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n self.new_tile()\n self.new_tile()\n #return self._cells", "def cells_z(self):\n if self.is_depth:\n return list(reversed(self._cells[2]))\n return self._cells[2]", "def create_grid(size):\n grid = []\n for i in range(size):\n row = ['0']*size\n grid.append(row)\n\n return grid", "def getNeighbors(cell, all_living_cells, test=False, test_color=8):\n neighbors = []\n NeighborCellGrid = [ # all possible neighbor positions\n [cell.x - 1, cell.y - 1], # top left\n [cell.x, cell.y - 1], # top\n [cell.x + 1, cell.y - 1], # top right\n [cell.x - 1, cell.y], # left\n [cell.x + 1, cell.y], # right\n [cell.x - 1, cell.y + 1], # bottom left\n [cell.x, cell.y + 1], # bottom\n [cell.x + 1, cell.y + 1] # bottom right\n ]\n count = 0\n for i in all_living_cells:\n count+=1\n if i.id != cell.id and i.alive == True: # not self and pixel is alive\n if [i.x, i.y] in NeighborCellGrid: # next to\n neighbors.append(i)\n if test:\n for i in NeighborCellGrid:\n g = simCell(i[0], i[1], color=test_color)\n test_cells.append(g)\n return neighbors", "def create_grid(grid):\r\n inner = [0]*4\r\n for i in range(4):\r\n grid.append(inner[:])", "def generate_grid(height, width):\n return [[random.randint(0, 9) for _ in range(width)] for _ in range(height)]", "def neighbour_cells(id, Nx):\n r = cell_coord(id, Nx)\n neighs = []\n tmp = np.arange(3) - 1\n for p in itertools.product(tmp, tmp, tmp):\n neigh = (r + p) % Nx\n neighs.append(neigh)\n return [id_from_coord(neigh, Nx) for neigh in neighs]", "def nd_grid(*xg):\n grid_shape = [np.shape(xg1d)[0] for xg1d in xg] # shape of the grid\n d = np.size(grid_shape)\n N = np.product(grid_shape)\n X_mesh = np.empty(d, dtype=object)\n for i, xg1d in enumerate(xg): # for each 1d component\n if np.ndim(xg1d) > 1:\n assert np.shape(xg1d)[1] == 1, \"only currently support each grid dimension being 1d\"\n n = np.shape(xg1d)[0] # number of points along dimension of grid\n slice_shape = np.ones(d, dtype=int); slice_shape[i] = n # shape of the slice where xg1d fits\n stack_shape = np.copy(grid_shape); stack_shape[i] = 1 # shape of how the slice should be tiled\n X_mesh[i] = np.tile(xg1d.reshape(slice_shape), stack_shape) # this is the single dimension on the full grid\n return X_mesh", "def cells(self):\n return chain.from_iterable(self.cols)", "def get_cells(self, row, col):\r\n surrounding_cells = self.get_surrounding_cells(row, col)\r\n closed_cells = self.filter_cells(surrounding_cells, '?')\r\n mine_cells = self.filter_cells(surrounding_cells, 'x')\r\n numbered_cells = list(set(surrounding_cells).difference(closed_cells))\r\n numbered_cells = list(set(numbered_cells).difference(mine_cells))\r\n return surrounding_cells, closed_cells, mine_cells, numbered_cells", "def inner_cells(w, h):\n a = create_board(w, h)\n\n for row in range(h):\n for col in range(w):\n if 0 < row < h - 1 and 0 < col < w - 1:\n a[row][col] = 1\n else:\n a[row][col] = 0\n\n return a", "def grid(self) -> aa.Grid2D:\r\n return self.analysis.dataset.grid", "def stripToGrid(pixelCount, columnCount):\n rowCount = int(pixelCount/columnCount)\n grid = [[0 for x in range(rowCount)] for y in range(columnCount)]\n\n pixel = 0\n for y in range(rowCount):\n for x in range(columnCount): \n column = x if y%2 == 0 else columnCount-1-x\n grid[column][y] = pixel \n pixel += 1 \n\n return grid", "def create_sudoku(self)->list:\n grid = [[None for x in range(9)] for row in range(9)]\n for row in range(0,9):\n for column in range(0,9):\n if row <= 2 and column <=2:\n grid[row][column] = cell.Cell(0)\n elif row <= 2 and 3 <= column <= 5:\n grid[row][column] = cell.Cell(1)\n elif row <= 2 and 6 <= column <= 8:\n grid[row][column] = cell.Cell(2)\n elif 3 <= row <= 5 and column <= 2:\n grid[row][column] = cell.Cell(3)\n elif 3 <= row <= 5 and 3 <= column <= 5:\n grid[row][column] = cell.Cell(4)\n elif 3 <= row <= 5 and 6 <= column <= 8:\n grid[row][column] = cell.Cell(5)\n elif 6 <= row <= 8 and column <= 2:\n grid[row][column] = cell.Cell(6)\n elif 6 <= row <= 8 and 3 <= column <= 5:\n grid[row][column] = cell.Cell(7)\n elif 6 <= row <= 8 and 6 <= column <= 8:\n grid[row][column] = cell.Cell(8)\n return grid", "def cell_coord(id, Nx):\n nx = id // (Nx**2)\n ny = (id - nx * Nx**2) // Nx\n nz = id - nx * Nx**2 - ny * Nx\n return np.array([nx, ny, nz])", "def empty_cells(self) -> List[Cell]:\n return list(ob.pos[0] for ob in self.new_obs())", "def GetCells(self):\n if not self.VTKObject.GetCells():\n return None\n return vtkDataArrayToVTKArray(\n self.VTKObject.GetCells().GetData(), self)", "def create_grid(height, width):\n grid = []\n \n for r in range(height):\n row = [0] * width # a row containing width 0s\n grid += [row]\n\n return grid", "def idx_to_grid(n):\n\n x = n % MAX_Y\n y = int(n / MAX_X)\n return(x, y)", "def empty_cells(state):\n cells = []\n\n for i, row in enumerate(state):\n for j, col in enumerate(row):\n if state[i][j] == 0:\n cells.append([i, j])\n\n return cells" ]
[ "0.7313377", "0.70912474", "0.70496106", "0.70136446", "0.70017177", "0.69983876", "0.6967043", "0.6942949", "0.69047695", "0.6899912", "0.6888806", "0.6857896", "0.68333375", "0.6820522", "0.6802178", "0.6766183", "0.66904247", "0.66788596", "0.6637702", "0.66127455", "0.6606392", "0.66014403", "0.659337", "0.65744764", "0.65483505", "0.6543926", "0.65347534", "0.6508396", "0.64976", "0.64827985", "0.6466761", "0.64660096", "0.6465397", "0.6464061", "0.6459656", "0.64539254", "0.6438207", "0.6424352", "0.64161617", "0.639448", "0.6362108", "0.635874", "0.6340923", "0.6330599", "0.63146085", "0.6273749", "0.62603015", "0.6259235", "0.62533385", "0.6252939", "0.62378454", "0.62330645", "0.62305456", "0.6230099", "0.622841", "0.6226047", "0.62210244", "0.62202287", "0.6217314", "0.62156206", "0.62050575", "0.62046427", "0.62025785", "0.6172052", "0.616648", "0.61393684", "0.6138189", "0.6135529", "0.6133282", "0.61293995", "0.6128043", "0.6125903", "0.6124246", "0.6117766", "0.6102929", "0.61004955", "0.609598", "0.6091757", "0.60865957", "0.60780036", "0.60774404", "0.6076851", "0.6072343", "0.6071143", "0.6060529", "0.604847", "0.60373515", "0.6033714", "0.60335135", "0.60332626", "0.60207313", "0.6013867", "0.601314", "0.60120636", "0.5997871", "0.5996022", "0.5990635", "0.5990486", "0.5988287", "0.5984943" ]
0.77503717
0
Calculates silence threshold per sound interval for chunking.
def get_silence_threshold(sound, lower_quantile): soundint = sound.to_intensity() max_intensity = call(soundint, 'Get quantile', 0.0, 0.0, 1) sil_intensity = call(soundint, 'Get quantile', 0.0, 0.0, lower_quantile) return sil_intensity - max_intensity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def determine_silence_threshold(self):\n loudest_sound_cohort_size = 0.2 # Top 20% are counted in the loudest sound group.\n silence_threshold_multiplier = 1.6 # Sounds must be at least 1.6x as loud as the loudest silence\n\n rospy.loginfo(\"Getting intensity values from mic.\")\n self.open_stream()\n tss = self.total_silence_samples\n values = [math.sqrt(abs(audioop.avg(self.stream.read(self.chunk_size), self.audio_format_width)))\n for _ in range(tss)]\n values = sorted(values, reverse=True)\n sum_of_loudest_sounds = sum(values[:int(tss * loudest_sound_cohort_size)])\n total_samples_in_cohort = int(tss * loudest_sound_cohort_size)\n average_of_loudest_sounds = sum_of_loudest_sounds / total_samples_in_cohort\n rospy.loginfo(\"Average audio intensity is %d\" % average_of_loudest_sounds)\n self.silence_threshold = average_of_loudest_sounds * silence_threshold_multiplier\n rospy.loginfo(\"Silence threshold set to %d \" % self.silence_threshold)\n self.close_stream()", "def split_on_silence_threshold(wav_file, dest_dir):\n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")", "def split( self, rSilenceTresholdPercent = 0.1, rSilenceMinDuration = 0.3, nExtractJustFirsts = -1 ):\n nLimit = int( self.getSampleMaxValue() * rSilenceTresholdPercent / 100 ) \n print( \"INF: sound.Wav.split: splitting a sound of %5.3fs, using silence limits at %d for %5.3fs\" % (self.rDuration, nLimit, rSilenceMinDuration) ) \n aSplitted = []\n \n precalcWavIsNotSilence = np.abs(self.data)>nLimit\n\n #~ print self\n \n nCurrentPos = 0 # in data index (not sample)\n nSilenceMinLenData = rSilenceMinDuration * self.nAvgBytesPerSec * 8 / self.nNbrBitsPerSample\n while( nCurrentPos < len(self.data) ):\n \n # first find the beginning of a sound \n nFirstNonSilenceIndex = findFirstTrueValue( precalcWavIsNotSilence[nCurrentPos:] )\n #~ print( \"nFirstNonSilenceIndex (brut): %d\" % nFirstNonSilenceIndex )\n if( nFirstNonSilenceIndex == -1 ):\n # all remaining sound are silence!\n break\n nFirstNonSilenceIndex += nCurrentPos\n nNumFirstSample = nFirstNonSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found a sound at sample %d\" % nNumFirstSample )\n nCurrentPos = nFirstNonSilenceIndex # so at the end, we're stopping\n \n # then find end\n nEndOfSilence = nNumFirstSample*self.nNbrChannel # init of the loop\n while( nEndOfSilence < len(self.data) ):\n #nFirstSilenceIndex = np.argmax( np.abs(self.data[nEndOfSilence:])<=nLimit )\n nFirstSilenceIndex = findFirstFalseValue( precalcWavIsNotSilence[nEndOfSilence:] ) \n #~ print( \"nFirstSilenceIndex (brut): %d (from %d)\" % (nFirstSilenceIndex, nEndOfSilence) )\n if( nFirstSilenceIndex == -1 ):\n break\n nFirstSilenceIndex += nEndOfSilence\n # ensure there's enough silence\n nEndOfSilence = findFirstTrueValue( precalcWavIsNotSilence[nFirstSilenceIndex:] )\n #~ print( \"nEndOfSilence (brut): %d (data: %d) (offset: %d)\" % (nEndOfSilence, self.data[nFirstSilenceIndex+nEndOfSilence],nEndOfSilence + nFirstSilenceIndex) )\n # positionnate onto the end of the silence for next time\n if( nEndOfSilence == -1 ):\n nCurrentPos = len(self.data)\n else:\n nCurrentPos = nEndOfSilence + nFirstSilenceIndex\n \n if( nEndOfSilence > nSilenceMinLenData or nEndOfSilence == -1 ):\n break\n nEndOfSilence += nFirstSilenceIndex\n # while - end\n \n # each time we're out, we've got a silence or we're at the end => new split\n if( nFirstSilenceIndex == -1 ):\n break\n nNumLastSample = nFirstSilenceIndex/self.nNbrChannel\n print( \"INF: sound.Wav.split: found the end of that sound at sample %d\" % nNumLastSample )\n if( nNumLastSample - nNumFirstSample > 4000 ):\n w = Wav()\n w.copyHeader( self )\n w.data = np.copy(self.data[nNumFirstSample*self.nNbrChannel:nNumLastSample*self.nNbrChannel])\n nPeakMax = max( max( w.data ), -min( w.data ) )\n if( nPeakMax > self.getSampleMaxValue() / 8 ): # remove glitch sound\n w.updateHeaderSizeFromDataLength()\n print( \"INF: sound.Wav.split: new split of %5.2fs\" % w.rDuration )\n aSplitted.append( w )\n #~ print( \"nCurLocalVs: %s\" % nCurLocalVs )\n if( nExtractJustFirsts != -1 and nExtractJustFirsts == len(aSplitted) ):\n print( \"WRN: sound.Wav.split: got enough split (%d), leaving...\" % len(aSplitted) )\n break\n # while - end\n print( \"INF: sound.Wav.split: created %d wav(s)\" % len( aSplitted ) )\n return aSplitted", "def silence_intervals(file_path,file_name):\r\n nsil_start_time=[]\r\n nsil_end_time=[]\r\n sil_start_time=[]\r\n sil_end_time=[]\r\n #read file \r\n audio, sample_rate = librosa.load(os.path.join(file_path,file_name))\r\n \r\n #silence extraction using librosa\r\n nsil_intv=librosa.effects.split(audio, top_db=30).astype('float32') / sample_rate\r\n \r\n #silence extraction using pyAudioanalysis\r\n # [Fs, x] = aIO.readAudioFile(os.path.join(file_path,file_name))\r\n # nsil_intv = np.array(aS.silenceRemoval(x, Fs, 0.020, 0.020, smoothWindow = 0.7, Weight = 0.3, plot = False))\r\n # print \"non-sil segments=\"+str(nsil_intv)\r\n\r\n #silence detection using webrtcvad (voice activity detection)\r\n #nsil_intv=np.array(vad_webrtcvad(file_path,file_name))\r\n\r\n\r\n dur=librosa.get_duration(y=audio, sr=sample_rate)\r\n print nsil_intv\r\n print dur\r\n print sample_rate\r\n curr_sil_start=0.0\r\n curr_sil_end=0.0\r\n for i in range(nsil_intv.shape[0]):\r\n nsil_start_time.append(nsil_intv[i][0])\r\n #sil_start_time=list(np.array(sil_start_time)/sample_rate)\r\n\r\n nsil_end_time.append(nsil_intv[i][1])\r\n #sil_end_time=list(np.array(sil_end_time)/sample_rate)\r\n\r\n for i in range(len(nsil_start_time)):\r\n curr_sil_end=nsil_start_time[i]\r\n sil_start_time.append(str(curr_sil_start))\r\n sil_end_time.append(str(curr_sil_end))\r\n curr_sil_start=nsil_end_time[i]\r\n\r\n print sil_start_time\r\n print sil_end_time\r\n return sil_start_time,sil_end_time", "def trim_silence(audio, noise_threshold=150):\n start = None\n end = None\n\n for idx, point in enumerate(audio):\n if abs(point) > noise_threshold:\n start = idx\n break\n\n # Reverse the array for trimming the end\n for idx, point in enumerate(audio[::-1]):\n if abs(point) > noise_threshold:\n end = len(audio) - idx\n break\n\n return audio[start:end]", "def __thresholdInput(self,samples):\n absSamples = np.abs(samples) # 1 ms\n thresh = self.peakThresholdScale*np.mean(absSamples) # 0.2 ms\n i = np.where(absSamples>thresh)[0] # 1e-5 s\n samples[i] = thresh * (samples[i]/absSamples[i]) # 8e-5 s\n # Do it again in case the spikes were really loud\n absSamples[i] = np.abs(samples[i])\n thresh = self.peakThresholdScale*np.mean(absSamples)\n i = np.where(absSamples>thresh)[0]\n self.clippedPeakIPure = i # All peaks that are clipped at first round are clipped again. Requires that the peaks in first round are not set to 0\n samples[i] = thresh * (samples[i]/absSamples[i])\n # Mark peaks close to each other\n if len(self.clippedPeakIPure)>0:\n # t = time.time()\n # Mark peaks close to each other as continuous\n diffPeaks = np.diff(self.clippedPeakIPure)\n gapsAll = np.where(diffPeaks>1)[0]\n self.peakMinGap = 100\n gaps = np.where(diffPeaks[gapsAll] < self.peakMinGap)[0] # find gaps smaller than 100\n gapsLen = diffPeaks[gapsAll[gaps]] # length of the gaps\n gapsIdx = gapsAll[gaps] # Index of all gaps\n\n\n # fill the gaps smaller than self.peakMinGap\n pp = np.zeros(self.Nfft,dtype=np.int8)\n pp[self.clippedPeakIPure] = 1\n for i in range(len(gapsLen)):\n pp[self.clippedPeakIPure[gapsIdx[i]]:self.clippedPeakIPure[gapsIdx[i]]+gapsLen[i]] = 1\n\n self.clippedPeakI = np.where(pp==1)[0]\n else:\n self.clippedPeakI = self.clippedPeakIPure.copy()\n if log.level == logging.DEBUG:\n log.debug('clipped peaks ' + str(len(self.clippedPeakIPure)))", "def silence_handler(wav, sr, fl=320, fs=80, \n max_thres_below=30, \n min_thres=-55, \n shortest_len_in_ms=50,\n flag_output=0):\n assert fs < fl, \"Frame shift should be smaller than frame length\"\n \n frames = buffering(wav, fl, fl - fs, 'nodelay')\n windowed_frames = windowing(frames)\n \n frame_energy = 20*np.log10(np.std(frames, axis=1)+np.finfo(np.float32).eps)\n frame_energy_max = np.max(frame_energy)\n \n frame_tag = np.bitwise_and(\n (frame_energy > (frame_energy_max - max_thres_below)),\n frame_energy > min_thres)\n frame_tag = np.asarray(frame_tag, dtype=np.int)\n \n seg_len_thres = shortest_len_in_ms * sr / 1000 / fs\n \n \n def ignore_short_seg(frame_tag, seg_len_thres):\n frame_tag_new = np.zeros_like(frame_tag) + frame_tag\n # boundary of each segment\n seg_bound = np.diff(np.concatenate(([0], frame_tag, [0])))\n # start of each segment\n seg_start = np.argwhere(seg_bound == 1)[:, 0]\n # end of each segment\n seg_end = np.argwhere(seg_bound == -1)[:, 0]\n assert seg_start.shape[0] == seg_end.shape[0], \\\n \"Fail to extract segment boundaries\"\n \n # length of segment\n seg_len = seg_end - seg_start\n seg_short_ids = np.argwhere(seg_len < seg_len_thres)[:, 0]\n for idx in seg_short_ids:\n start_frame_idx = seg_start[idx]\n end_frame_idx = seg_end[idx]\n frame_tag_new[start_frame_idx:end_frame_idx] = 0\n return frame_tag_new\n \n # work on non-speech, 1-frame_tag indicates non-speech frames\n frame_process_sil = ignore_short_seg(1-frame_tag, seg_len_thres)\n # reverse the sign\n frame_process_sil = 1 - frame_process_sil\n \n # work on speech\n frame_process_all = ignore_short_seg(frame_process_sil, seg_len_thres)\n \n # separate non-speech and speech segments\n # do overlap and add\n frame_tag = frame_process_all\n # buffer for speech segments\n spe_buf = np.zeros([np.sum(frame_tag) * fs + fl], dtype=wav.dtype)\n # buffer for non-speech segments\n sil_buf = np.zeros([np.sum(1-frame_tag) * fs + fl], dtype=wav.dtype)\n spe_fr_pt = 0\n non_fr_pt = 0\n for frame_idx, flag_speech in enumerate(frame_tag):\n if flag_speech:\n spe_buf[spe_fr_pt*fs:spe_fr_pt*fs+fl] += windowed_frames[frame_idx]\n spe_fr_pt += 1\n else:\n sil_buf[non_fr_pt*fs:non_fr_pt*fs+fl] += windowed_frames[frame_idx]\n non_fr_pt += 1\n \n if flag_output == 1: \n return spe_buf\n elif flag_output == 2:\n return sil_buf\n else:\n return spe_buf, sil_buf, frame_tag", "def trim_silence(T, hz, signal):\n N = T * hz\n extra = len(signal) - N\n c = np.abs(signal).cumsum()\n c = c[-extra:] - c[:extra]\n i = np.argmax(c)\n print(f'Keeping {T:.2g} of {len(signal)/hz:.2g} seconds'\n f' starting at +{i/hz:.2f} seconds')\n return signal[i:i+N]", "def remove_silence(y, threshold=-50, nb_sample=4096): \r\n from scipy.ndimage.filters import maximum_filter1d \r\n \r\n if np.max(y) != 1.0:\r\n raise ValueError(\"Input signal is expected to be normalised to 1\")\r\n \r\n # Ignore log(0) warnings\r\n np.seterr(divide = 'ignore') \r\n y_db = 20 * np.log10(np.abs(y))\r\n np.seterr(divide = 'warn') \r\n \r\n y_envelope = maximum_filter1d(y_db, nb_sample) \r\n mask = y_envelope >= threshold\r\n y_out = y[mask]\r\n \r\n return(y_out)", "def remove_silence_audio() -> None:\n # Read the wav file and get rate and list of data\n rate, data = scipy.io.wavfile.read('Test.wav')\n\n # Create list for data of amended wav file\n data2 = []\n\n # Loop through data of original file and add data that doesn't meed condition: values >= -10 and <= 10\n for i in range(len(data)):\n if data[i][0] >= -10 and data[i][0] <= 10:\n pass\n else:\n data2.append(data[i])\n\n # Create NumPy array from revised data\n data2 = np.asarray(data2, dtype=np.int16)\n\n # Write new data to wav file\n scipy.io.wavfile.write('Test.wav', rate, data2)\n\n return None", "def get_silence(self, duration):\n nsamples = int(self.sample_rate * duration)\n return \"\".join([wave.struct.pack('h', 0) for i in range(0, nsamples)])", "def majorityVoteSilence(y_Raw, amps, silenceClassNum):\n y_raw = y_Raw.copy()\n silenceThreshold = 1000\n majVotWindowLength = 2.0 #in seconds\n windowLength = 0.032\n frameLengthFloat = math.ceil(majVotWindowLength/windowLength)\n\n frameLength = int(frameLengthFloat)\n\n resArray = np.empty(y_raw.shape)\n\n n_frames = int(math.ceil(y_raw.shape[0]/frameLengthFloat))\n\n for i in range(n_frames):\n\n if ((i+1) * frameLength) < y_raw.shape[0]:\n\n tmpAmps = amps[(i * frameLength):(((i+1) * frameLength))]\n \n if tmpAmps.max() >= silenceThreshold:\n #if True:\n tmpArray = y_raw[(i * frameLength):(((i+1) * frameLength))]\n \n \"\"\" Get most frequent number in that frames: \"\"\"\n count = np.bincount(tmpArray)\n tmpMostFrequent = np.argmax(count)\n\n \"\"\" Fill all elements with most frequent number: \"\"\"\n tmpArray.fill(tmpMostFrequent)\n\n \"\"\" Write it into our result array: \"\"\"\n resArray[(i * frameLength):(((i+1) * frameLength))] = tmpArray\n \n else:\n \"\"\"If all amplitudes are below threshold, the \n sample is considered silent:\"\"\" \n resArray[(i * frameLength):(((i+1) * frameLength))] = silenceClassNum\n else:\n\n tmpAmps = amps[(i * frameLength):y_raw.shape[0]]\n\n\n if tmpAmps.max() >= silenceThreshold: \n #if True:\n tmpArray = y_raw[(i * frameLength):y_raw.shape[0]]\n \"\"\" Get most frequent number in that frames and fill \n all elements in the frame with it: \"\"\"\n count = np.bincount(tmpArray)\n tmpMostFrequent = np.argmax(count)\n\n \"\"\" Fill all elements with most frequent number: \"\"\"\n tmpArray.fill(tmpMostFrequent)\n\n \"\"\" Write it into our result array: \"\"\"\n resArray[(i * frameLength):y_raw.shape[0]] = tmpArray\n \n else:\n \"\"\"If all amplitudes are below threshold, the \n sample is considered silent:\"\"\" \n resArray[(i * frameLength):y_raw.shape[0]] = silenceClassNum\n\n return resArray", "def _synthesize_silence(self, duration_in_msec):\n if duration_in_msec > 0.0:\n sample_count = int(float(self.sample_rate) * duration_in_msec * 0.001);\n # There are two bytes per 16-bit sample.\n byte_count = sample_count + sample_count\n tmp_buffer = bytearray(byte_count)\n # Loop and create the audio samples.\n index = 0\n for i in range(0, byte_count):\n tmp_buffer[i] = 0\n return tmp_buffer", "def _compute_sampling_threshold(global_step, k):\n return k / (k + math.exp(global_step / k))", "def detect_silences(sound, sil_threshold, sil_duration):\n textgrid = call(sound, 'To TextGrid (silences)', 100, 0.0, sil_threshold, sil_duration, 0.1, 'silence', 'speech')\n\n return textgrid", "def trim_silence_file(file_path, noise_threshold=150):\n rate, audio = scipy.io.wavfile.read(file_path)\n trimmed_audio = trim_silence(audio, noise_threshold=noise_threshold)\n print()\n scipy.io.wavfile.write(file_path, rate, trimmed_audio)", "def silence(score0, score1):\n return silence", "def split_vad(silence_probs: List[float], p_silence_threshold: float, len_threshold: int) -> List[Tuple[int, int]]:\n segments = []\n\n start = None\n i = 0\n n = len(silence_probs)\n\n while i < len(silence_probs) and silence_probs[i] > p_silence_threshold:\n i += 1\n # supported invariants: `start` points to the frame where speech starts, i >= start\n start = i\n\n while i < n:\n # scroll until first silence frame\n if silence_probs[i] < p_silence_threshold:\n i += 1\n continue\n\n # now i points to the first silence frame\n # look ahead: do we have at least len_threshold silence frames?\n all_silence = True\n for j in range(i + 1, min(i + len_threshold, n)):\n all_silence = all_silence and silence_probs[j] > p_silence_threshold\n if not all_silence:\n break\n\n if not all_silence:\n # no we don't: disregard the silence, go further\n # starting from the first non-silence frame\n i = j\n else:\n # we do have enough silence for a split\n if i - start > len_threshold:\n segments.append((start, i))\n\n while i < n and silence_probs[i] > p_silence_threshold:\n i += 1\n start = i\n i += 1\n\n if i - start > len_threshold and start < n:\n segments.append((start, i))\n\n return segments", "def split_multiple_recordings(audio, min_silence_duration=0.25, noise_threshold=150, sample_rate_hz=8e3):\n # A list of tuples (start, stop)\n min_silence_frame = sample_rate_hz * min_silence_duration\n silence_zones = []\n\n zone_start = None\n zone_end = None\n\n for idx, point in enumerate(audio):\n if abs(point) < noise_threshold and zone_start is None:\n zone_start = idx\n\n if abs(point) > noise_threshold and zone_start is not None:\n zone_end = idx\n\n # If we are in a silent zone and we come to the end point\n if zone_start is not None and zone_end and abs(point) > noise_threshold:\n if (zone_end - zone_start) > min_silence_frame:\n silence_zones.append((zone_start, zone_end))\n\n zone_start = None\n zone_end = None\n\n # Split the recording by the zones\n split_recordings = []\n for idx, zone in enumerate(silence_zones):\n if idx == 0:\n start = 0\n else:\n start = silence_zones[idx - 1][1]\n\n end = zone[0]\n split_recordings.append(audio[start:end])\n\n return split_recordings", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def loudness(chunk):\n data = numpy.array(chunk, dtype=float) / 32768.0\n ms = math.sqrt(numpy.sum(data ** 2.0) / len(data))\n if ms < 10e-8: ms = 10e-8\n return 10.0 * math.log(ms, 10.0)", "def append_silence(duration_milliseconds=500):\r\n num_samples = duration_milliseconds * (sample_rate / 1000.0)\r\n\r\n for x in range(int(num_samples)): \r\n audio.append(0.0)\r\n\r\n return", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def get_waveform_halfwidth(waveform, sampling_rate=30000.):\n w = resample(waveform,200)#upsample to smooth the data\n time = np.linspace(0,len(waveform)/sampling_rate,200)\n trough = np.where(w==np.min(w))[0][0]\n peak = np.where(w==np.max(w))[0][0]\n \n #dur = time[trough:][np.where(w[trough:]==np.max(w[trough:]))[0][0]] - time[trough]\n if w[peak] > np.abs(w[trough]):\n dur = time[peak:][np.where(w[peak:]>=0.5*np.min(w[peak:]))[0][0]] - time[peak] \n else:\n dur = time[trough:][np.where(w[trough:]<=0.5*np.max(w[trough:]))[0][0]] - time[trough] \n if peak<trough:\n dur=-dur\n return dur", "def weighted_loudness(wavetable: np.ndarray, mult_freq: float = 1.):\n ps = np.abs(np.fft.fft(wavetable))\n\n time_step = 1 / 44100\n freqs = np.fft.fftfreq(wavetable.size, time_step)\n idx = np.argsort(freqs)\n\n weighted_sum = 0\n for i in idx:\n freq = freqs[i]\n if freq > 0:\n weighted_sum += perceptual_amplitude_dbb(freq * mult_freq) * ps[i]\n\n return weighted_sum", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def removeGlitch( self, rGlitchMaxTresholdPercent = 5., rGlitchMaxDurationSec = 0.01, rSilenceTresholdPercent = 1., rSilenceMinDurationSec = 0.020 ):\n timeBegin = time.time()\n nGlitchLimit = int( self.getSampleMaxValue() * rGlitchMaxTresholdPercent / 100 )\n nSilenceLimit = int( self.getSampleMaxValue() * rSilenceTresholdPercent / 100 )\n \n nGlitchNumSampleMaxDuration = int( rGlitchMaxDurationSec * self.nSamplingRate )\n nSilenceNumSampleMinDuration = int( rSilenceMinDurationSec * self.nSamplingRate )\n \n rMarginAroundSilenceBlanking = 0.1 # in sec\n nSilenceAroundSilenceBlanking = int( rMarginAroundSilenceBlanking * self.nSamplingRate )\n \n logging.debug( \"nSilenceLimit: %d, nGlitchLimit: %d, nGlitchNumSampleMaxDuration: %d, nSilenceNumSampleMinDuration: %d\" % ( nSilenceLimit, nGlitchLimit, nGlitchNumSampleMaxDuration, nSilenceNumSampleMinDuration ) )\n \n aPosGlitchBegin = [0]*self.nNbrChannel # for each channel, the position of beginning glitch\n aPosSilenceBegin = [0]*self.nNbrChannel # for each channel, the position of beginning silence\n aPosLastSoundEnd = [0]*self.nNbrChannel # for each channel, the last time with sound\n anState = [0]*self.nNbrChannel # for each channel: the nature of current sound: 0: real silence, 1: glitch, 2: sound, 3: short silence after glitch, 4: short silence after sound\n\n nNbrGlitch = 0\n nNumSample = 0\n nNbrSampleReplace = 0\n while( True ):\n for nNumChannel in range( self.nNbrChannel ):\n val = self.data[(nNumSample*self.nNbrChannel)+nNumChannel]\n val = abs(val)\n nCurrentState = anState[nNumChannel]\n newState = nCurrentState\n \n if( nCurrentState == 0 ):\n if( val > nGlitchLimit ):\n newState = 2\n elif( val > nSilenceLimit ):\n newState = 1\n aPosGlitchBegin[nNumChannel] = nNumSample\n elif( nCurrentState == 1 ):\n if( val > nGlitchLimit ):\n newState = 2\n elif( val < nSilenceLimit ):\n newState = 3\n aPosSilenceBegin[nNumChannel] = nNumSample\n elif( nNumSample - aPosGlitchBegin[nNumChannel] >= nGlitchNumSampleMaxDuration ):\n # too long => sound\n newState = 2\n elif( nCurrentState == 2 ):\n if( val < nSilenceLimit ):\n newState = 4\n aPosSilenceBegin[nNumChannel] = nNumSample\n aPosLastSoundEnd[nNumChannel] = nNumSample\n elif( nCurrentState == 3 ):\n if( val > nGlitchLimit ):\n newState = 2\n elif( val > nSilenceLimit ):\n newState = 1\n elif( nNumSample - aPosSilenceBegin[nNumChannel] >= nSilenceNumSampleMinDuration ):\n newState = 0\n # erase this glitch\n logging.info( \"Channel%d: Erasing glitch between %s (%5.3fs) and %s (%5.3fs)\" % (nNumChannel, aPosGlitchBegin[nNumChannel],aPosGlitchBegin[nNumChannel]/float(self.nSamplingRate), nNumSample, nNumSample/float(self.nSamplingRate) ) )\n nNbrGlitch += 1\n self.data[ (aPosGlitchBegin[nNumChannel]*self.nNbrChannel)+nNumChannel:(nNumSample*self.nNbrChannel)+nNumChannel:self.nNbrChannel]=[0]*(nNumSample-aPosGlitchBegin[nNumChannel])\n elif( nCurrentState == 4 ):\n if( val > nSilenceLimit ):\n newState = 2\n elif( nNumSample - aPosSilenceBegin[nNumChannel] >= nSilenceNumSampleMinDuration ):\n newState = 0\n # nothing to do!\n \n if( newState != nCurrentState ):\n if( nNumSample < 300000 ):\n logging.debug( \"Channel%d: sample: %d (%5.3fs), new state: %d, data: %d\" % (nNumChannel,nNumSample,nNumSample/float(self.nSamplingRate), newState,val) )\n anState[nNumChannel] = newState\n if( newState == 2 ):\n # we add a small respiration to leave sound trail and attacks\n if( aPosLastSoundEnd[nNumChannel] == 0 ):\n nBegin = 0\n else:\n nBegin = aPosLastSoundEnd[nNumChannel] + nSilenceAroundSilenceBlanking\n nEnd = nNumSample - nSilenceAroundSilenceBlanking\n if( nBegin < nEnd ):\n logging.debug( \"Channel%d: Blanking silence between %s (%5.3fs) and %s (%5.3fs)\" % ( nNumChannel, nBegin, nBegin/float(self.nSamplingRate), nEnd, nEnd/float(self.nSamplingRate) ) )\n self.data[ (nBegin*self.nNbrChannel)+nNumChannel:(nEnd*self.nNbrChannel)+nNumChannel:self.nNbrChannel]=[0]*(nEnd-nBegin)\n \n # for each chan - end\n nNumSample += 1\n if( nNumSample % 10000 == 0 ):\n #TODO: unpack to be able to modify just a bit of the chain OR look how to remove a bit of the chain without compy everything (super long)\n logging.debug( \"nNumSample: %d (state[0]: %d)\" % (nNumSample, anState[0]) ) \n \n if( nNumSample >= self.nNbrSample ):\n break\n # while - end\n \n rDuration = time.time()-timeBegin\n \n logging.info( \"removeGlitch: nNbrGlitch: %d, (time taken: %5.3fs)\" % (nNbrGlitch, rDuration ) )\n \n return True", "def trackThresholdOptical(threshold, ants=0) :\n SPEED_OF_LIGHT = 299792458.0 # m/s\n antlist = helpers.makeList(ants)\n if antlist[0] == 0: antlist = currentAntennaNumbers()\n flo = lofreq()\n t = 0\n for ant in antlist:\n antmp = \"control.antenna%d\" %ant\n d = queryDouble(antmp + \".diameter\")\n t = queryDouble(antmp + \".trackTolerance\")\n tbw = threshold*math.pi/(180*60*60)*flo*1e9*d/SPEED_OF_LIGHT\n #print \"Threshold in bw:\", tbw\n trackThreshold(tbw, ant)\n return t", "def peaks_and_thresh(self):\n # split histograms at threshold then get mean and stdev:\n counts = np.array(self.stats['Counts'])\n bg = counts[counts < self.thresh] # background\n signal = counts[counts > self.thresh] # signal above threshold\n try:\n 1//np.size(bg) # raises ZeroDivisionError if size == 0\n 1//(np.size(bg)-1) # need > 1 images to get std dev\n 1//np.size(signal)\n 1//(np.size(signal)-1)\n self.peak_heights = [1, 1]\n self.peak_centre = [np.mean(bg), np.mean(signal)]\n self.peak_widths = [np.std(bg, ddof=1), np.std(signal, ddof=1)]\n self.thresh = self.peak_centre[0] + 5*self.peak_widths[0] # update threshold\n except ZeroDivisionError: pass", "def filter_audio(audio):\n\n # Calculate voice energy for every 123 ms block\n apower = lr.amplitude_to_db(np.abs(lr.stft(audio, n_fft=2048)), ref=np.max)\n\n # Summarize energy of every rate, normalize\n apsums = np.sum(apower, axis=0) ** 2\n apsums -= np.min(apsums)\n apsums /= np.max(apsums)\n\n # Smooth the graph for saving short spaces and pauses, remove sharpness\n apsums = np.convolve(apsums, np.ones((9,)), 'same')\n # Normalize again\n apsums -= np.min(apsums)\n apsums /= np.max(apsums)\n\n # Set noise limit to 35% over voice\n apsums = np.array(apsums > 0.35, dtype=bool)\n\n # Extend the blocks every on 125ms\n # before separated samples (2048 at block)\n apsums = np.repeat(apsums, np.ceil(len(audio) / len(apsums)))[:len(audio)]\n\n return audio[apsums]", "def fine_tune(self, duration = 2):\n\n with sr.Microphone() as source:\n self.recorder.adjust_for_ambient_noise(source, duration=duration)\n return self.recorder.energy_threshold", "def strip_silence(self):\n start_idx = 0\n end_idx = -1\n # class position\n for i, tone in enumerate(self.tone_list):\n if not 'silence' in tone:\n start_idx = i\n break\n\n for i, tone in reversed(list(enumerate(self.tone_list))):\n if not 'silence' in tone:\n end_idx = i - 1\n break\n\n self.tone_list = self.tone_list[start_idx:end_idx]", "def calculate_wavelength_metric(wavelength_min, wavelength_max):\n length_max = np.log(550) * 2\n wavelength = np.abs(wavelength_max + wavelength_min) / 2\n log_wl = np.log(wavelength)\n default_met = np.array(log_wl / length_max)\n scaled_met = 1.75 * (default_met - 0.5) + 0.5\n if wavelength == 0:\n return 0\n else:\n return scaled_met.clip(min=10e-11, max=1)", "def _cache_silence_sample_data(self):\n self.silence_4_sample_buffer = self._synthesize_silence(4.0 * self.dot_time_in_msec)\n self.silence_2_sample_buffer = self._synthesize_silence(2.0 * self.dot_time_in_msec)\n self.silence_1_sample_buffer = self._synthesize_silence(self.dot_time_in_msec)", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def custom_sound(type_of, attack, decay, cutoff, coef, time, freq):\n dzw = np.zeros(time*44100)\n l=0\n for i in type_of:\n if i==\"sin\":\n dzw+= coef[l]*sin_custom(freq,time,attack[l],decay[l])\n if i==\"sq\":\n dzw+= coef[l]*sq_custom(freq,time,attack[l],decay[l])\n if i==\"saw\":\n dzw+= coef[l]*saw_custom(freq,time,attack[l],decay[l])\n l+=1 \n dzw[(1-cutoff)*time*44100 -1:]==0\n dzw = np.repeat(dzw,2).reshape(len(dzw),2)\n dzw = dzw/np.amax(dzw)\n return(dzw)", "def lmin(self):\n cond = (self.transmit / self.transmit.max()) > 1./100\n return min(self.wavelength[cond])", "def wave_samples(self):\n return self._quantized_subsamples", "def mask(self):\n\n mask = self.freqs >= self.minimum_threshold\n mask = mask.astype(int)\n self.freqs = self.freqs * mask\n self.sums = self.sums * mask", "def noise(self, freq: int, /) -> None:", "def timbral_brightness(fname, threshold=0, crossover=3000, stepSize=1024, blockSize=2048, n_oct=2, minFreq=20):\n # use pysoundfile instead\n audio_samples, fs = sf.read(fname, always_2d=False)\n\n num_channels = np.shape(audio_samples)\n if len(num_channels) > 1:\n # take just the left channel\n audio_samples = audio_samples[:, 0]\n\n # initialise default settings\n centroid_list = []\n crossover_idx = 0\n mag_hi_list = []\n mag_all_list = []\n updated = False\n window = np.hamming(blockSize)\n\n i = 0\n # split the audio into blocks of audio (ignore last block like matlab\n while (i + blockSize) < len(audio_samples):\n eval_audio = audio_samples[i:i + blockSize]\n complex_spectrum = np.fft.fft(eval_audio * window)\n magnitude_spectrum = np.absolute(complex_spectrum[0:1 + int(len(complex_spectrum) / 2)])\n\n if sum(magnitude_spectrum) > 0:\n\n if not updated:\n fls = calcFrequencyScale(len(magnitude_spectrum), fs, blockSize)\n crossover_idx = calcCrossoverIDX(fls, crossover)\n minIDX = calcMinIDX(fls, minFreq)\n maxIDX = calcMaxIDX(fls, n_oct)\n if n_oct > 0:\n f_upper = calcUpperFrequencyLimit(fls, n_oct, maxIDX)\n f_lower = calcLowFrequencyLimit(fls, n_oct, maxIDX)\n\n updated = True\n\n if n_oct > 0:\n smoothed_spectrum = spectralSmoothing(magnitude_spectrum, f_upper, f_lower)\n else:\n smoothed_spectrum = magnitude_spectrum\n\n tpower = sum(smoothed_spectrum[minIDX:])\n\n # calculate the spectral centroid\n if tpower > threshold:\n upper_spectrum = smoothed_spectrum[crossover_idx:]\n upper_fls = fls[crossover_idx:(crossover_idx + len(upper_spectrum))]\n upper_power = sum(upper_spectrum)\n centroid = sum(upper_spectrum * upper_fls) / upper_power\n\n centroid_list.append(centroid)\n mag_all_list.append(tpower)\n mag_hi_list.append(upper_power)\n\n else:\n centroid_list.append(0)\n mag_all_list.append(0)\n mag_hi_list.append(0)\n\n i += stepSize\n\n if sum(mag_all_list) == 0:\n return 0\n\n mean_centroid = np.mean(np.array(centroid_list))\n mean_mag_hi = np.mean(np.array(mag_hi_list))\n mean_mag_all = np.mean(np.array(mag_all_list))\n\n # float required for float division in Python 2.7\n ratio = mean_mag_hi / float(mean_mag_all)\n\n # equation taken directly from Pearce [2016]\n bright = -25.8699 + (64.0127 * (np.log10(ratio) + (0.44 * np.log10(mean_centroid))))\n\n return bright", "def pumpThreshold(self):\n EsatL, tauL, etaP = self.EsatL, self.tauL, self.etaP\n loss, DR = self.loss, self.DR\n return(EsatL / tauL * (loss + DR) / etaP)", "def perfectrefl(wavelength):\n return 1.0", "def arg_threshold(data: tensor, threshold: float = 0.8, offset: int = 50) -> (int, int):\n\n assert 0 < data.shape[0] <= 2, \"Expected mono/stereo audio data to be of form (channels, smaples).\"\n\n start = 0\n end = len(data)\n for start, d in enumerate(data[0]):\n if abs(d) > threshold:\n print(\"Starts at: %i with volume %d.\" % (start, float(d)))\n break\n\n for i, d in enumerate(np.flipud(data[0])):\n if abs(d) > threshold:\n end = len(data[0]) - i\n print(\"Ends at: %i with volume %d.\" % (end, float(d)))\n break\n\n return start + offset, end - offset", "def _compute_sampling_threshold(global_step, k):\n return tf.cast(k / (k + tf.exp(global_step / k)), tf.float32)", "def __determina_media_confidence(self):\n media = 0\n nr = 0\n for el in self.__results['conf']:\n media += int(el)\n nr += 1\n media /= nr\n return media", "def get_fft_threshold(self) -> float:\n return float(self.query(':measure:fft:threshold?'))", "def snip_audio(data, snip_length=4, cutoff=0.25, min_snips=None, max_snips=None,\n num_jitters=None, jitter=0.25,\n rate=44100, log=False):\n if max_snips is None:\n if min_snips is None:\n min_snips = 1\n max_snips = max(min_snips, int((float(len(data))/rate)/3.0))\n # Pad data with (snip_length * rate / 2) zeros.\n chop = np.lib.pad(data, int(snip_length*rate/2), 'constant')\n if log:\n logging.info(\"Data padded with %.1f s of zeros.\" %\n (float(snip_length)/2))\n snips = []\n logs = []\n max_sum = 0\n count = 0\n sum_ratio = 1\n\n while True:\n current_sum, start_idx, end_idx = find_loudest_subset(chop, snip_length,\n rate=rate)\n max_sum = max(max_sum, current_sum)\n sum_ratio = float(current_sum) / max_sum\n if sum_ratio < cutoff:\n break\n collection = []\n if num_jitters is None:\n collection.append(np.copy(chop[start_idx : end_idx]))\n else:\n for j in xrange(num_jitters):\n offset = int(jitter * rate * random.uniform(-1, 1))\n try:\n collection.append(np.copy(chop[start_idx+offset : end_idx+offset]))\n except IndexError:\n collection.append(np.copy(chop[start_idx : end_idx]))\n logs.append((sum_ratio, max_sum, start_idx, end_idx))\n chop[start_idx : end_idx] = 0\n snips.append(collection)\n count += 1\n if count >= max_snips:\n break\n return snips, logs", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))", "def find_start_end_rests(audio_data, sr, hop_length=HOP_LENGTH, n_fft=N_FFT):\r\n \r\n # Compute the 3rd percentile of the envelope and \r\n # deem anything below this value as silence \r\n envelope = frame(audio_data, hop_length=hop_length, frame_length=n_fft).max(axis=0)\r\n lower_bound = np.percentile(envelope, 5.0)\r\n \r\n # Implement the search as loop, this should be faster than vectorisation\r\n k = 0\r\n while envelope[k] <= lower_bound:\r\n k += 1\r\n \r\n # Return 0 if there is no start rest\r\n if k == 0:\r\n time_start = 0.0\r\n else:\r\n # The first value of the output of the frame function correspond to the time of\r\n # n_fft, then the times are spaced according to hop_length \r\n time_start = ((k-1)*hop_length + n_fft)/float(sr)\r\n \r\n j = len(envelope)-1\r\n while envelope[j] <= lower_bound:\r\n j -= 1\r\n \r\n # Return the length of the track if the is no end rest\r\n if j == len(envelope)-1:\r\n time_end = len(audio_data)/float(sr)\r\n else:\r\n time_end = ((j-1)*hop_length + n_fft)/float(sr)\r\n \r\n times_start_end_rests = [time_start, time_end]\r\n \r\n return(times_start_end_rests)", "def limit(filename,threshold,makeup,wout=True,plot=False):\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n dataL,dataL_bit=compress(filename,threshold,1000.0,makeup,1.0,500.0,wout=False,plot=plot)\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_limit.wav',dataL_bit,44100,'PCM_16')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('Done!')\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.') \n return dataL,dataL_bit", "def trim_silence(infile, outfile):\n subprocess.call([\n 'sox',\n infile,\n outfile,\n 'silence',\n '1',\n '0.1',\n '1%'\n ])\n return outfile", "def timbral_warmth(fname, dev_output=False, phase_correction=False, clip_output=False, max_FFT_frame_size=8192,\n max_WR = 12000, fs=0):\n '''\n Read input\n '''\n audio_samples, fs = timbral_util.file_read(fname, fs, phase_correction=phase_correction)\n\n # get the weighted high frequency content\n mean_wr, _, _, weighted_hf = warm_region_cal(audio_samples, fs)\n\n # calculate the onsets\n envelope = timbral_util.sample_and_hold_envelope_calculation(audio_samples, fs, decay_time=0.1)\n envelope_time = np.arange(len(envelope)) / float(fs)\n\n # calculate the onsets\n nperseg = 4096\n original_onsets = timbral_util.calculate_onsets(audio_samples, envelope, fs, nperseg=nperseg)\n # If onsets don't exist, set it to time zero\n if not original_onsets:\n original_onsets = [0]\n # set to start of file in the case where there is only one onset\n if len(original_onsets) == 1:\n original_onsets = [0]\n '''\n Initialise lists for storing features\n '''\n # set defaults for holding\n all_rms = []\n all_ratio = []\n all_SC = []\n all_WR_Ratio = []\n all_decay_score = []\n\n\n # calculate metrics for each onset\n for idx, onset in enumerate(original_onsets):\n if onset == original_onsets[-1]:\n # this is the last onset\n segment = audio_samples[onset:]\n else:\n segment = audio_samples[onset:original_onsets[idx+1]]\n\n segment_rms = np.sqrt(np.mean(segment * segment))\n all_rms.append(segment_rms)\n\n # get FFT of signal\n segment_length = len(segment)\n if segment_length < max_FFT_frame_size:\n freq, time, spec = spectrogram(segment, fs, nperseg=segment_length, nfft=max_FFT_frame_size)\n else:\n freq, time, spec = spectrogram(segment, fs, nperseg=max_FFT_frame_size, nfft=max_FFT_frame_size)\n\n # flatten the audio to 1 dimension. Catches some strange errors that cause crashes\n if spec.shape[1] > 1:\n spec = np.sum(spec, axis=1)\n spec = spec.flatten()\n\n # normalise for this onset\n spec = np.array(list(spec)).flatten()\n this_shape = spec.shape\n spec /= max(abs(spec))\n\n '''\n Estimate of fundamental frequency\n '''\n # peak picking algorithm\n peak_idx, peak_value, peak_x = timbral_util.detect_peaks(spec, freq=freq, fs=fs)\n # find lowest peak\n fundamental = np.min(peak_x)\n fundamental_idx = np.min(peak_idx)\n\n '''\n Warmth region calculation\n '''\n # estimate the Warmth region\n WR_upper_f_limit = fundamental * 3.5\n if WR_upper_f_limit > max_WR:\n WR_upper_f_limit = 12000\n tpower = np.sum(spec)\n WR_upper_f_limit_idx = int(np.where(freq > WR_upper_f_limit)[0][0])\n\n if fundamental < 260:\n # find frequency bin closest to 260Hz\n top_level_idx = int(np.where(freq > 260)[0][0])\n # sum energy up to this bin\n low_energy = np.sum(spec[fundamental_idx:top_level_idx])\n # sum all energy\n tpower = np.sum(spec)\n # take ratio\n ratio = low_energy / float(tpower)\n else:\n # make exception where fundamental is greater than\n ratio = 0\n\n all_ratio.append(ratio)\n\n '''\n Spectral centroid of the segment\n '''\n # spectral centroid\n top = np.sum(freq * spec)\n bottom = float(np.sum(spec))\n SC = np.sum(freq * spec) / float(np.sum(spec))\n all_SC.append(SC)\n\n '''\n HF decay\n - linear regression of the values above the warmth region\n '''\n above_WR_spec = np.log10(spec[WR_upper_f_limit_idx:])\n above_WR_freq = np.log10(freq[WR_upper_f_limit_idx:])\n np.ones_like(above_WR_freq)\n metrics = np.array([above_WR_freq, np.ones_like(above_WR_freq)])\n\n # create a linear regression model\n model = linear_model.LinearRegression(fit_intercept=False)\n model.fit(metrics.transpose(), above_WR_spec)\n decay_score = model.score(metrics.transpose(), above_WR_spec)\n all_decay_score.append(decay_score)\n\n\n '''\n get mean values\n '''\n mean_SC = np.log10(np.mean(all_SC))\n mean_decay_score = np.mean(all_decay_score)\n weighted_mean_ratio = np.average(all_ratio, weights=all_rms)\n\n if dev_output:\n return mean_SC, weighted_hf, mean_wr, mean_decay_score, weighted_mean_ratio\n else:\n\n '''\n Apply regression model\n '''\n all_metrics = np.ones(6)\n all_metrics[0] = mean_SC\n all_metrics[1] = weighted_hf\n all_metrics[2] = mean_wr\n all_metrics[3] = mean_decay_score\n all_metrics[4] = weighted_mean_ratio\n\n coefficients = np.array([-4.464258317026696,\n -0.08819320850778556,\n 0.29156539973575546,\n 17.274733561081554,\n 8.403340066029507,\n 45.21212125085579])\n\n warmth = np.sum(all_metrics * coefficients)\n\n # clip output between 0 and 100\n if clip_output:\n warmth = timbral_util.output_clip(warmth)\n\n return warmth", "def test_signal_threshold(df_phys, signal, threshold):\n df_signal = df_phys[df_phys[\"Signal\"] == signal][\"Physical Value\"]\n\n stats = df_signal.agg([\"count\", \"min\", \"max\", \"mean\", \"std\"])\n delta = stats[\"max\"] - stats[\"min\"]\n\n if delta > threshold:\n print(f\"{signal} exhibits a 'max - min' delta of {delta} exceeding threshold of {threshold}\")", "def _getWavelet(self, ch='dos1rate', thresh=0.1, maxWidth=1, SIGNIF_LEVEL=0.25):\n # Feed the counts into the wavelet microburst finder\n validDataIdt = np.where(self.d[ch] != -1E31)[0]\n waveletAnalysis.WaveletDetector.__init__(self, self.d[ch][validDataIdt], \n self.d['dateTime'][validDataIdt], 0.1, mother='DOG', siglvl=0.95)\n self.waveletTransform() # Get wavelet space\n self.waveletFilter(self.s0, maxWidth, SIGNIF_LEVEL=SIGNIF_LEVEL) # Do a band pass and significance filter.\n self.degenerateInvWaveletTransform() # Inverse transform filtered data.\n # Indicies where the error-filetered data is greater than thresh\n self.burstIdt = np.where(self.dataFlt > thresh)[0] \n self._getPeaks(ch, validDataIdt) # Find peaks\n return", "def checkfrequency(inputgiven):\n data_size = 40000\n wav_file = wave.open(inputgiven, 'r')\n data = wav_file.readframes(data_size)\n wav_file.close()\n data = struct.unpack('{n}h'.format(n=data_size), data)\n print max(data)", "def num_samples(self):\n with audioread.audio_open(self.path) as f:\n return int(f.duration * f.samplerate)", "def window_step(self):\n if self.window_samples and self.overlap_samples and self.audio_sample_rate:\n return (\n float(self.window_samples - self.overlap_samples)\n / self.audio_sample_rate\n )\n return None", "def get_UL_bw(self,wavelength):\n sec_before = self.wavelengths[wavelength]['sec_UL_timestamp']\n if self.env.now > (sec_before + 2):\n # IF its 2 seconds ahead of last timestamp, it means no more pkts arrived since then\n return 0\n return self.wavelengths[wavelength]['last_UL_bps']", "def wav2(bins, weis):\n\n mean = np.average(bins, weights=weis)\n var = np.average((bins-mean)**2, weights=weis)\n ##\n sum1 = weis.sum()\n sum2 = (weis**2).sum()\n rms = np.sqrt(sum1*var/(sum1-sum2/sum1))\n\n return mean, rms", "def calculateMaxAmplitude(sampleWidth: int) -> int:\n return 2 ** (sampleWidth * NUM_BITS_IN_A_BYTE - 1) - 1", "def get_variance_threshold(variances, whisker_percent, whisker_length):\n low_percentiles, high_percentiles = np.percentile(variances, (whisker_percent, 100-whisker_percent))\n threshold = high_percentiles + (high_percentiles - low_percentiles) * whisker_length\n\n return threshold", "def split_multiple_recordings_file(file_path, min_silence_duration=0.25, noise_threshold=150):\n print(file_path)\n rate, audio = scipy.io.wavfile.read(file_path)\n split_recordings = split_multiple_recordings(audio, min_silence_duration=min_silence_duration,\n noise_threshold=noise_threshold, sample_rate_hz=rate)\n\n if file_path.count('.') != 1:\n raise Exception('File_path must contain exactly one period, usually in extension. IE: /home/test.wav')\n\n for idx, recording in enumerate(split_recordings):\n print(\"spliting \" + file_path)\n new_file_path = file_path.split('.')[0] + '_' + str(idx) + \".wav\"\n scipy.io.wavfile.write(new_file_path, rate, recording)", "def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,\n weight=0.5, plot=False):\n\n if weight >= 1:\n weight = 0.99\n if weight <= 0:\n weight = 0.01\n\n # Step 1: feature extraction\n signal = audioBasicIO.stereo_to_mono(signal)\n st_feats, _ = stf.feature_extraction(signal, sampling_rate,\n st_win * sampling_rate,\n st_step * sampling_rate)\n\n # Step 2: train binary svm classifier of low vs high energy frames\n # keep only the energy short-term sequence (2nd feature)\n st_energy = st_feats[1, :]\n en = np.sort(st_energy)\n # number of 10% of the total short-term windows\n st_windows_fraction = int(len(en) / 10)\n\n # compute \"lower\" 10% energy threshold\n low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15\n\n # compute \"higher\" 10% energy threshold\n high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15\n\n # get all features that correspond to low energy\n low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]\n\n # get all features that correspond to high energy\n high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]\n\n # form the binary classification task and ...\n features = [low_energy.T, high_energy.T]\n # normalize and train the respective svm probabilistic model\n\n # (ONSET vs SILENCE)\n features_norm, mean, std = at.normalize_features(features)\n svm = at.train_svm(features_norm, 1.0)\n\n # Step 3: compute onset probability based on the trained svm\n prob_on_set = []\n for index in range(st_feats.shape[1]):\n # for each frame\n cur_fv = (st_feats[:, index] - mean) / std\n # get svm probability (that it belongs to the ONSET class)\n prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])\n prob_on_set = np.array(prob_on_set)\n\n # smooth probability:\n prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)\n\n # Step 4A: detect onset frame indices:\n prog_on_set_sort = np.sort(prob_on_set)\n\n # find probability Threshold as a weighted average\n # of top 10% and lower 10% of the values\n nt = int(prog_on_set_sort.shape[0] / 10)\n threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +\n weight * np.mean(prog_on_set_sort[-nt::]))\n\n max_indices = np.where(prob_on_set > threshold)[0]\n # get the indices of the frames that satisfy the thresholding\n index = 0\n seg_limits = []\n time_clusters = []\n\n # Step 4B: group frame indices to onset segments\n while index < len(max_indices):\n # for each of the detected onset indices\n cur_cluster = [max_indices[index]]\n if index == len(max_indices)-1:\n break\n while max_indices[index+1] - cur_cluster[-1] <= 2:\n cur_cluster.append(max_indices[index+1])\n index += 1\n if index == len(max_indices)-1:\n break\n index += 1\n time_clusters.append(cur_cluster)\n seg_limits.append([cur_cluster[0] * st_step,\n cur_cluster[-1] * st_step])\n\n # Step 5: Post process: remove very small segments:\n min_duration = 0.2\n seg_limits_2 = []\n for s_lim in seg_limits:\n if s_lim[1] - s_lim[0] > min_duration:\n seg_limits_2.append(s_lim)\n seg_limits = seg_limits_2\n\n if plot:\n time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /\n sampling_rate)\n\n plt.subplot(2, 1, 1)\n plt.plot(time_x, signal)\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.subplot(2, 1, 2)\n plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step), \n prob_on_set)\n plt.title('Signal')\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.title('svm Probability')\n plt.show()\n\n return seg_limits", "def measure_wavelength(self, t_measure):\n overlap = self.overlap(t_measure)\n \n # set all overlap entries below 0.1% of the maximum overlap to zero\n overlap[overlap<0.001*np.max(overlap)] = 0.0\n nonzero = np.nonzero(overlap)\n \n # find connected section of chain with nonzero overlap\n consecutives = np.split(nonzero, np.where(np.diff(nonzero) != 1)[0]+1)\n if len(consecutives) != 1:\n warnings.warn('Wavelength could not be determined unambiguously.')\n return np.nan\n else:\n # add 1 since overlap involves two beads each\n wavelength = len(consecutives[0][0]) + 1\n return wavelength", "def threshold(self) -> float:\n return pulumi.get(self, \"threshold\")", "def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))", "def get_large_audio_transcription(path):\n # open the audio file using pydub\n r = sr.Recognizer()\n sound = AudioSegment.from_mp3(path)\n sound.export(\"tmp.wav\", format=\"wav\")\n sound = AudioSegment.from_wav('tmp.wav')\n # split audio sound where silence is 700 miliseconds or more and get chunks\n chunks = split_on_silence(sound,\n # experiment with this value for your target audio file\n min_silence_len = 500,\n # adjust this per requirement\n silence_thresh = sound.dBFS-14,\n # keep the silence for 1 second, adjustable as well\n keep_silence=500,\n )\n folder_name = \"audio-chunks\"\n # create a directory to store the audio chunks\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n whole_text = \"\"\n\n chapter=(str(path.split('/')[-1])).split('_')[3]\n # if chapter == '01':\n # target=2\n # else:\n # target=1\n target=2\n # process each chunk\n for i, audio_chunk in enumerate(chunks, start=1):\n # export audio chunk and save it in\n # the `folder_name` directory.\n if i==1:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened,language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n #print(chunk_filename, \":\", text)\n whole_text += text\n # return the text for all chunks detected\n else:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened, language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n # print(chunk_filename, \":\", text)\n if chapter == '01':\n whole_text += ' ' +text\n if str(text).isalnum():\n if str(text).split(' ')[0]==' ':\n whole_text += text\n else: whole_text += ' '+text\n # return the text for all chunks detected\n\n if i==target:\n break\n if os.path.isfile('tmp.wav') :os.remove('tmp.wav')\n subprocess.run([\"rm\", \"-rf\", folder_name])\n return whole_text", "def sample_interval(self):\n\n if self.sample_rate != 0:\n return 1.0 / self.sample_rate\n return 0.0", "def silencing_constraint(self, x0: devices.PrimaryWeights) -> float:\n contrast = self.get_photoreceptor_contrasts(x0)\n return sum(pow(contrast[self.silence].values, 2))", "def get_data(self, wave):\n data = np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])\n self.min_threshold = np.min(data)\n self.max_threshold = np.max(data)\n return data", "def checkendsilence(inputgiven):\n output = getlastslice(inputgiven)\n wave_file = wave.open(output, \"r\")\n for i in range(wave_file.getnframes()):\n current_frame = wave_file.readframes(1)\n unpacked_signed_value = struct.unpack(\"<h\", current_frame)\n if abs(unpacked_signed_value[0]) > 500:\n return False\n return True", "def listen_for_speech(threshold=THRESHOLD):\n\n #Open stream\n p = pyaudio.PyAudio()\n\n stream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\n print(\"* recording\")\n\n frames = []\n\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n\tdata = stream.read(CHUNK)\n frames.append(data)\n\n print(\"* done recording\")\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n \n response = stt_google_wav(WAVE_OUTPUT_FILENAME)\n\n return response", "def micsample(listentime):\n frames, sampling_rate = record_audio(listentime)\n samples = np.hstack([np.frombuffer(i, np.int16) for i in frames])\n times = np.arange(samples.size) / sampling_rate\n return samples, times", "def separate_silence_and_speech(signal, sample_rate, time_segments):\n\n silence = signal\n speech = signal\n\n for segment in reversed(time_segments):\n\n start = int(segment['start'] * sample_rate)\n stop = int(segment['stop'] * sample_rate)\n\n if segment['is_speech']:\n silence = np.delete(silence, np.s_[start:stop], axis=0)\n else:\n speech = np.delete(speech, np.s_[start:stop], axis=0)\n\n return silence, speech", "def detect_freqs(self):\n n_fft_bins = self._config[\"audio_config\"][\"N_FFT_BINS\"]\n channel_avgs = []\n differences = []\n \n for i in range(n_fft_bins):\n channel_avgs.append(sum(self.freq_channels[i])/len(self.freq_channels[i]))\n differences.append(((self.freq_channels[i][0]-channel_avgs[i])*100)//channel_avgs[i])\n for i in [\"beat\", \"low\", \"mid\", \"high\"]:\n if any(differences[j] >= self.min_percent_diff[i]\\\n and self.freq_channels[j][0] >= self.min_detect_amplitude[i]\\\n for j in range(*self.detection_ranges[i]))\\\n and (time.time() - self.prev_freq_detects[i] > 0.2)\\\n and len(self.freq_channels[0]) == self.freq_channel_history:\n self.prev_freq_detects[i] = time.time()\n self.current_freq_detects[i] = True\n else:\n self.current_freq_detects[i] = False", "def check_audio_pre_trial(data, audio=None, **_):\n if audio is None:\n _log.warning(\"No BNC2 input in function call, retuning None\")\n return None\n s = audio[\"times\"][~np.isnan(audio[\"times\"])] # Audio TTLs with NaNs removed\n metric = np.array([], dtype=np.int8)\n for i, c in zip(data[\"intervals\"][:, 0], data[\"goCue_times\"]):\n metric = np.append(metric, sum(s[s > i] < (c - 0.02)))\n passed = metric == 0\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed", "def set_new_threshold(wavform, thr, n_pre=N_PRE_PT, rng=SEARCH_RNG, i_chg=20):\n wav = np.array(wavform)\n sgn = np.sign(thr)\n if np.max(wav[rng[0]:rng[1]] * sgn) < np.abs(thr): return None # reject\n\n \"\"\" NOT USED -- GIVES IMPRECISE RESULT\n # -- align: find the steepest point having the same sign as `sgn`\n df = np.diff(wav)\n si = np.argsort(-sgn * df) # reverse sorted\n for i in si:\n if np.sign(wav[i]) == sgn: break\n \"\"\"\n # -- align: find the point where waveform crosses `thr`\n n = len(wav)\n for i in range(n - 1):\n if sgn * wav[i] <= sgn * thr and sgn * thr <= sgn * wav[i + 1]:\n break\n if i == n - 2:\n # although i could be n - 2, it's highly likely an artifact\n return None\n n_shift = n_pre - i - 1 # > 0: right shift, < 0: left shift\n if n_shift == 0:\n return wav\n\n wavnew = np.empty(wav.shape)\n wavnew[n_shift:] = wav[:-n_shift] # PBC shifting\n wavnew[:n_shift] = wav[-n_shift:]\n\n # -- done: but if the spike doesn't change its sign\n # within `i_chg`, reject.\n if np.max(-sgn * wavnew[n_pre:i_chg]) < 0:\n return None\n\n \"\"\" DEBUG\n if np.abs(n_shift) > 3:\n print '!!!', n_shift, '/', i, '/', n\n print '---', np.max(-sgn * wavnew[n_pre:i_chg])\n print list(wav)\n print list(wavnew)\n \"\"\"\n\n return wavnew", "def lmax(self):\n cond = (self.transmit / self.transmit.max()) > 1./100\n return max(self.wavelength[cond])", "def audioEpochFeats(cur,uid,timestamp):\n\tuidA = uid +'audio'\n\n\tvar_stats = []\n\tstd_stats = []\n\tnoise = []\n\tvoiceToSilenceRatio = []\n\n\tfor i in range(1,24):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour\n\t\the_timestamp = timestamp-86400+i*hour\n\t\t# Determining if start/end time of given hour is in the night\n\t\t# If yes, proceed with feature calculation, if not skip\n\t\ts_epoch = epochCalc(hs_timestamp)\n\t\te_epoch = epochCalc(he_timestamp)\n\n\t\tif s_epoch[0][0]=='night' or e_epoch[0][0]=='night':\n\t\t\tcur.execute('SELECT audio FROM {0} WHERE time_stamp >= {1} AND time_stamp<= {2}'\n\t\t\t\t.format(uidA,timestamp-86400+(i-1)*hour,timestamp-86400+i*hour))\n\t\t\trecords = cur.fetchall()\n\n\t\t\tvar_stats.append(np.var(records))\n\t\t\tstd_stats.append(np.std(records))\n\n\t\t\t# Calculating number of silence and voice/noise occurences\n\t\t\tsilence = len([item for item in records if item==0])\n\t\t\tvoice = len([item for item in records if item==1 or item==2])\n\t\t\tnoise.append(len([item for item in records if item==3]))\n\t\t\tif silence>0:\n\t\t\t\tvoiceToSilenceRatio.append(float(voice) / silence)\n\t\t\telse:\n\t\t\t\tvoiceToSilenceRatio.append(0)\n\treturn(np.nan_to_num(np.hstack((voiceToSilenceRatio,var_stats,std_stats,noise))))\n\t\"\"\"\ndef main():\n\tcon = psycopg2.connect(database='dataset', user='tabrianos')\n\tcur = con.cursor()\n\t#warnings.simplefilter(\"error\")\n\t#centers = np.load('visualizations/clustercenters.npy')\n\n# ------------TEST CASE-----------------------------\n\tfor loso in uids1:\n\t\tytest=[]\n\t\taccuracies =[]\n\t\tacc=0\n\t\tmaxminAcc =[]\n\t\tXbig = np.zeros([1,132])\t\n\t\tYbig = np.zeros([1])\n\t\tlabels=[]\n\t\tlabels.append(19)\n\t\t# loso means leave one student out: forest is trained on other users data\n\t\t# then tests are run on 'loso' student \n\t\tuids2.remove(loso)\n\t\tuids2.append(loso)\n\t\tprint('LOSO: {0}'.format(loso))\n\t\tfor testUser in uids2:\n\t\t\tprint(testUser)\n\t\t\t# lists that temporary store features before concatenation\n\t\t\t\n\t\t\tcolocationList =[]\n\t\t\tconversationList =[]\n\t\t\tactivityList=[]\n\t\t\taudioList = []\n\n\t\t\t# loading stress labels from database (currently on 0-5 scale)\n\t\t\trecords = loadSleepLabels(cur,testUser) \n\t\t\n\n\t\t\t\n\t\t\t#X,Y store initially the dataset and the labels accordingly\n\t\t\tY = np.zeros(len(records))\n\t\t\tX = np.array(records)\n\n\t\n\n\n\t\t\tfor i in range(0,len(records)):\n\t\t\t\tcolocationList.append( colocationEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tconversationList.append( convEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tactivityList.append(activityEpochFeats(cur,testUser,X[i][0]))\n\t\t\t#\tScreenList.append( screenStatFeatures(cur,testUser,X[i][0],day) )\n\t\t\t\taudioList.append(audioEpochFeats(cur,testUser,X[i][0]))\n\t\t\n\t\t\t\tif testUser==loso:\n\t\t\t\t\tytest.append(X[i][1])\n\t\t\t\t#labels list holds user ids to be used in LeaveOneOut pipeline\n\t\t\t\tlabels.append(testUser[-2:])\n\t\t\t\tY[i] = X[i][2]\n\n\t\t\t\n\t\t\t#concatenating features in one array \n\n\t\t\tXtt = np.concatenate((np.array(activityList),np.array(conversationList),np.array(colocationList),np.array(audioList)),axis=1)\n\t\t\tprint(Xtt.shape)\n\n\t\t\t#initiating and training forest, n_jobs indicates threads, -1 means all available\n\t\t\t# while the test student is not reached, training data are merged into one big matrix\n\t\t\tXbig = np.concatenate((Xbig,Xtt),axis=0)\n\t\t\tYbig = np.concatenate((Ybig,Y),axis=0)\n\n\t\t\tdel colocationList[:]\n\t\t\tdel conversationList[:]\n\t\t\tdel activityList[:]\n\t\t\tdel audioList[:]\n\n\n\n\t\t\tif testUser!=loso:\n\t\t\t\tXbig = Xbig.astype(np.float64)\n\t\t\t\tprint(Xbig.dtype)\n\t\t\t\t\n\n\t\t\t# when loso, tests are run\n\t\t\telif testUser==loso:\n\t\t\t\t#Xbig = preprocessing.scale(Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyX.npy',Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyY.npy',Ybig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyLOO.npy',np.array(labels))\n\t\t\t\tprint(Xbig.shape[0],Ybig.shape[0],len(labels))\n\t\t\t\tprint('train matrix saved')\n\t\t\t\ta = raw_input()\n\t\t\t\tforest = RandomForestClassifier(n_estimators=100, n_jobs = -1)\n\t\t\t\tforest.fit(Xbig,Ybig)\n\t\t\t\tef = forest.score(Xtt,ytest)\n\t\t\t\tprint(ef*100)\n\n\t\t\t\toutput = np.array(forest.predict(Xtt))\n\t\t\t\tscored = output - np.array(ytest)\n\n\t\t\t\t# Counting as correct predictions the ones which fall in +/-1, not only exact\n\t\t\t\t# I call it the 'Tolerance technique'\n\t\t\t\tcorrect=0\n\t\t\t\tc = Counter(scored)\n\t\t\t\tfor k in c.keys():\n\t\t\t\t\tif k<2 and k>-2:\n\t\t\t\t\t\tcorrect += c[k]\n\t\t\t\t\n\t\t\t\tscore = float(correct)/len(scored)\n\t\t\t\tprint(score*100)\n\n\n\n\t\tprint(Xbig.shape)\n\t\n\t\t\n\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\t\"\"\"", "def get_wavelength(self):\n E = -self.E0*(1.0/self.n_low**2 - 1.0/self.n_high**2)\n return SI['hc']*1e12/(E*SI['keV'])", "def _compute_noise_level(self, data):\n noise = max(data)\n noise_min = 2600\n noise_max = 4095\n ratio = (noise - noise_min)/(noise_max - noise_min)\n return int(ratio*100)", "def get_sampled_timesteps(self) -> int:\n return self.sampled_timesteps", "def speedx(self, sound_array, factor): # http://zulko.github.io/blog/2014/03/29/soundstretching-and-pitch-shifting-in-python/\n indices = np.round(np.arange(0, len(sound_array), factor))\n indices = indices[indices < len(sound_array)].astype(int)\n return sound_array[indices.astype(int)]", "def calc_spectra(stream, data_type):\n \n import numpy as np\n from mtspec import mtspec\n from scipy import interpolate\n from scipy.stats import binned_statistic \n\n # Read in file \n tr = stream[0]\n data = tr.data\n delta = tr.stats.delta\n samprate = tr.stats.sampling_rate\n npts = tr.stats.npts\n \n # Determine nyquist frequency\n nyquist = 0.5 * samprate\n \n\n # Calc spectra amplitudes and frequencies \n # Switched number of tapers from 7 to 5. Decreases computation time and\n # results are similar\n amp_squared, freq = mtspec(data, delta=delta, time_bandwidth=4, \n number_of_tapers=5, nfft=npts, quadratic=True)\n \n # Convert from power spectra to amplitude spectra\n amp = np.sqrt(amp_squared)\n \n # Use scipy interpolate function to fill in data in missing bins\n f = interpolate.interp1d(freq, amp)\n freq_new = np.arange(np.min(freq), np.max(freq), 0.0001)\n amp_new = f(freq_new)\n\n # Remove certain frequencies that are too low or high. \n indexes = []\n \n for i, val in enumerate(freq_new):\n \n # Remove frequencies below 1/2 length of record\n if val <= 1/(delta*npts*0.5) :\n indexes.append(i)\n \n # Remove frequencies above 10 Hz for sm data because of the way it was processed \n elif val > 10 and data_type == 'sm':\n indexes.append(i)\n\n # Remove frequencies above nyquist frequency for disp data\n # (it's already removed in the previous step for sm data)\n elif val > nyquist and data_type == 'disp': \n indexes.append(i)\n \n # Remove any duplicate indexes\n indexes = np.unique(indexes)\n freq_new = np.delete(freq_new,indexes)\n amp_new = np.delete(amp_new,indexes) \n \n # Set up bins\n if data_type == 'sm':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 10 Hz because after that the sm data is unusable due to how it was\n # processed. \n bins = np.logspace(np.log10(0.004), np.log10(10), num=21)\n \n elif data_type == 'disp':\n # Starting bins at 0.004 Hz (that is about equal to half the length\n # of the record for the synthetic and observed data) and ending at\n # 0.5 Hz because that is the nyquist frequency .\n bins = np.logspace(np.log10(0.004), np.log10(0.5), num=21)\n \n bin_means, bin_edges, binnumber = binned_statistic(freq_new,\n amp_new,\n statistic='mean',\n bins=bins)\n \n # for i in range(len(bin_means)):\n # bin_means[i] = 10**bin_means[i]\n \n \n return(bin_means, freq, amp)", "def _frequency_to_wavelength(freq):\n return ifc.SPEED_OF_LIGHT_METRES_PER_SECOND / freq", "def standardize_samples_for_speakers(source_df, speakers_duration, n_samples_min, verbose=True):\n speakers_duration = speakers_duration.loc[speakers_duration.n_samples >= n_samples_min]\n if verbose:\n print(f\"{len(speakers_duration)} speakers remain\")\n\n # get the samples > n_seconds for the remaining speakers\n samples_remaining_speakers = pd.merge(source_df, speakers_duration, on=\"speaker_id\")\n # sort the samples by speakers and duration\n samples_remaining_speakers = samples_remaining_speakers.sort_values(['speaker_id', 'seconds_x'], ascending=[True, True])\n if verbose:\n print(f\"{len(samples_remaining_speakers.loc[samples_remaining_speakers['dataset_name_x'] == 'LibriSpeech'])} samples remain in LibriSpeech\")\n print(f\"{len(samples_remaining_speakers.loc[samples_remaining_speakers['dataset_name_x'] == 'sitw'])} samples remain in sitw\")\n print(f\"{len(samples_remaining_speakers.loc[samples_remaining_speakers['dataset_name_x'] == 'CommonVoice'])} samples remain in CommonVoice\")\n print(f\"{len(samples_remaining_speakers.loc[samples_remaining_speakers['dataset_name_x'] == 'TCOF'])} samples remain in TCOF\")\n return samples_remaining_speakers, speakers_duration", "def audio_resample(self, data):\n\n data = np.asarray(data)\n if data.ndim <= 1:\n logging.log_first_n(logging.INFO,\n 'Converting %s sound from shape %s to 2-D' %\n (self._name, data.shape), 5)\n data = np.reshape(data, (-1, 1))\n if data.shape[1] > data.shape[0]:\n logging.log_first_n(logging.INFO,\n 'Transposing %s sound from shape %s' %\n (self._name, data.shape), 5)\n data = np.transpose(data)\n\n # Get half window size in seconds.\n half_window_size = 0.5 * self._window / self._fs_out\n\n # Concatenate and update buffer.\n if self._buff is not None:\n data = np.concatenate((self._buff, data), axis=0)\n tau = self._buff.shape[0]\n else:\n tau = 0\n self._buff = data[-int(self._fs_in * half_window_size):, :]\n\n # Get i/o data dimensions.\n frames_in = data.shape[0]\n frames_out = int(round((frames_in - tau) / self._fs_in * self._fs_out))\n\n # Resample data via moving average.\n data_out = np.zeros((frames_out, data.shape[1]))\n if self._fs_out < self._fs_in or self._window > 1:\n for i in range(frames_out):\n t = float(i) / self._fs_out # center of window in seconds\n t1 = int(max(0, round(self._fs_in * (t - half_window_size)) + tau))\n t2 = int(min(frames_in,\n round(self._fs_in * (t + half_window_size)) + tau))\n data_out[i, :] = np.mean(data[t1:t2, :], axis=0)\n\n else:\n\n data_out = data\n\n return data_out", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def filesample(filename):\n sampling_rate, samples = wavfile.read(filename)\n times = np.arange(len(samples)) / sampling_rate\n return samples, sampling_rate", "def measure_wavelength_avg(self, from_time, to_time, print_n=False):\n from_idx = self._find_index_for_time(from_time)\n to_idx = self._find_index_for_time(to_time)\n \n # print number of measurements\n if print_n:\n n_measurements = to_idx - from_idx + 1\n print 'n measurements:', n_measurements\n \n # calculate overlap\n overlap = self.overlap()[from_idx:to_idx+1,:]\n \n # intitialize wavelength storage\n wavelengths = np.zeros(overlap.shape[0])\n \n for i in range(overlap.shape[0]):\n this_overlap = overlap[i,:]\n \n # set all overlap entries below 0.1% of the maximum overlap to zero\n this_overlap[this_overlap<0.001*np.max(this_overlap)] = 0.0\n \n nonzero = np.nonzero(this_overlap)\n \n # find connected section of chain with nonzero overlap\n consecutives = np.split(nonzero, np.where(np.diff(nonzero) != 1)[0]+1)\n \n if len(consecutives) != 1:\n warnings.warn('Wavelength could not be determined unambiguously.')\n return np.nan\n else:\n # add 1 since overlap involves two beads each\n wavelengths[i] = float( len(consecutives[0][0]) ) + 1\n \n return np.mean( wavelengths )", "def get_samples_per_signal(self):\n return np.array([self.samples_in_file(chn) for chn in range(self.signals_in_file)])", "def silence(self):\n return self._replace(velocity=0.)", "def peakRecognition(y, sg_window, threshold):\n\n corrected_sg2 = savgol_filter(\n y, window_length=sg_window, polyorder=3, deriv=2)\n\n peaks_all = []\n\n for row in corrected_sg2:\n peaks = argrelmin(row)[0]\n peaks = [peak for peak in peaks if row[peak] < -threshold] # Remove peaks below threshold\n\n # Combine peaks w/o positive 2nd derivative between them\n peak_condensing = []\n peaks_condensed = []\n for j in range(len(row)):\n if j in peaks:\n peak_condensing.append(j)\n if row[j] > 0 and len(peak_condensing) > 0:\n peaks_condensed.append(int(np.mean(peak_condensing)))\n peak_condensing = []\n if len(peak_condensing) > 0:\n peaks_condensed.append(int(np.mean(peak_condensing)))\n\n peaks_all.append(peaks_condensed)\n bar3.update(bar3.value + 1)\n\n return peaks_all", "def calc_dur(mld, threshold):\n dur = 0\n durmax = 0\n for i in range(len(mld)):\n if np.abs(mld[i]) < threshold:\n dur+=1\n else:\n if dur>durmax:\n durmax=dur \n dur=0\n return np.max([dur,durmax])", "def clip(wavelength, spectra, threshold, substitute=None):\n\n if substitute == None: # remove threshold violations\n mask = np.any(spectra > threshold, axis=1)\n spectra = spectra[~mask, :]\n wavelength = wavelength[~mask]\n else: # substitute threshold violations with a value\n spectra[spectra > threshold] = substitute\n return wavelength, spectra\n\n return wavelength, spectra", "def duration(self):\n return self.sound.nframes", "def estimate_pulse_time(self, f=0.75):\n idxs = np.abs(self.flux) > f * self.max_flux\n return np.mean(self.time[idxs])", "def sp_audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n sig = sig.unsqueeze(0)\n sig = hparams[\"speed_perturb\"](sig)\n sig = sig.squeeze(0)\n return sig", "def mix_in_audio_sample(track_data, track_offset, sample_data, sample_offset,\n clip_duration, sample_volume, ramp_in, ramp_out):\n ramp_out_index = clip_duration - ramp_out\n track_end = min(track_offset + clip_duration, track_data.shape[0])\n track_end = min(track_end,\n track_offset + (sample_data.shape[0] - sample_offset))\n sample_range = track_end - track_offset\n for i in range(sample_range):\n if i < ramp_in:\n envelope_scale = i / ramp_in\n elif i > ramp_out_index:\n envelope_scale = (clip_duration - i) / ramp_out\n else:\n envelope_scale = 1\n sample_input = sample_data[sample_offset + i]\n track_data[track_offset\n + i] += sample_input * envelope_scale * sample_volume" ]
[ "0.7811607", "0.7448463", "0.6895357", "0.6664065", "0.6417837", "0.6375742", "0.6263973", "0.61554515", "0.6127289", "0.60519105", "0.60171616", "0.5937067", "0.5910318", "0.5892728", "0.58905584", "0.5843424", "0.58425045", "0.5808453", "0.57665783", "0.5759342", "0.57378775", "0.5722452", "0.56557935", "0.56487143", "0.56326514", "0.5627053", "0.5608545", "0.5589628", "0.55816114", "0.55515337", "0.5523653", "0.5475721", "0.547328", "0.54496133", "0.54458165", "0.5423756", "0.54192334", "0.54108334", "0.54083383", "0.536651", "0.5366032", "0.5354383", "0.5347404", "0.53341174", "0.5330366", "0.532764", "0.53185964", "0.5312305", "0.530155", "0.5290813", "0.52704334", "0.5266224", "0.52633613", "0.5260763", "0.52565277", "0.5253922", "0.52490526", "0.52474356", "0.523403", "0.5226085", "0.5218949", "0.52038807", "0.5199998", "0.51988626", "0.5190761", "0.5183309", "0.51816547", "0.51796705", "0.51718724", "0.51641685", "0.5163859", "0.5160769", "0.51586246", "0.5149331", "0.51469415", "0.51443344", "0.51400673", "0.5135204", "0.51339835", "0.5133169", "0.5123317", "0.5106125", "0.51058215", "0.5105051", "0.510151", "0.5094277", "0.5087108", "0.5086567", "0.50858825", "0.50837976", "0.50825393", "0.50751734", "0.5060701", "0.50599056", "0.50523597", "0.5051391", "0.50512284", "0.50410557", "0.5038423", "0.5034951" ]
0.6908193
2
Wrapper to run Praat 'To Textgrid (silences)' function.
def detect_silences(sound, sil_threshold, sil_duration): textgrid = call(sound, 'To TextGrid (silences)', 100, 0.0, sil_threshold, sil_duration, 0.1, 'silence', 'speech') return textgrid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__ (self,win,text='Press a key to continue',**kwargs):\n\n self.win = win\n \n self.text = visual.TextStim(win,text=text,**kwargs)", "def __init__ (self,win,text='Press a key to continue',**kwargs):\n\n self.win = win\n \n self.text = visual.TextStim(win,text=text,**kwargs)", "def make_silence_phones_txt(self):\n raise NotImplementedError", "def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n #text = stop(text)# if changing this, then chnage the dims \n #(not to be done yet as its effecting the embeddings..,we might be\n #loosing words)...\n return text", "def wrapper(scr, text: list):\n\n display = Display(scr, text)\n display.run()", "async def translate(context, arguments, style):\n large_font_spaces = 5\n small_font_spaces = int(27 / 40 * large_font_spaces)\n large_font = True\n\n text = arguments[0]\n runes = \"\"\n\n rune_count = 0\n for i, char in enumerate(text.lower()):\n if char in default_values.RUNES[style]:\n runes += default_values.RUNES[style][char]\n rune_count += 1\n elif char in default_values.RUNES[\"archaic\"]:\n runes += default_values.RUNES[\"archaic\"][char]\n rune_count += 1\n else:\n runes += text[i]\n if char != \" \":\n large_font = False\n\n if not context.desktop_ui or not large_font or rune_count > 27:\n runes = runes.replace(\" \", \" \" * small_font_spaces)\n else:\n runes = runes.replace(\" \", \" \" * large_font_spaces)\n\n await context.message.channel.send(runes)\n return True", "def do(text):\n return freeling_stemming(text)", "def cut_item_texts(self, arrays=None):\n if not arrays: arrays = self.masks()\n for a in arrays:\n for item in self.sources(a):\n i = self._meta['columns'][item]\n for tk in self.valid_tks:\n text = self.text(item, True, tk)\n if text: i['text'][tk] = text\n for ed in ['x', 'y']:\n if i['text'].get('{} edits'.format(ed)):\n for tk in self.valid_tks:\n text = self.text(item, True, tk, ed)\n if text: i['text']['{} edits'.format(ed)][tk] = text\n return None", "def apply(self, text):", "def text(text = 'abcd', size = 10, justify = 'left', layer = 0, font = \"DEPLOF\"):\n t = Device('text')\n xoffset = 0\n yoffset = 0\n\n face = font\n if face == \"DEPLOF\":\n scaling = size/1000\n\n for line in text.split('\\n'):\n l = Device(name = 'textline')\n for c in line:\n ascii_val = ord(c)\n if c == ' ':\n xoffset += 500*scaling\n elif (33 <= ascii_val <= 126) or (ascii_val == 181):\n for poly in _glyph[ascii_val]:\n xpts = np.array(poly)[:, 0]*scaling\n ypts = np.array(poly)[:, 1]*scaling\n l.add_polygon([xpts + xoffset, ypts + yoffset],\n layer = layer)\n xoffset += (_width[ascii_val] + _indent[ascii_val])*scaling\n else:\n valid_chars = '!\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~µ'\n warnings.warn('[PHIDL] text(): Warning, some characters ignored, no geometry for character \"%s\" with ascii value %s. ' \\\n 'Valid characters: %s' % (chr(ascii_val), ascii_val, valid_chars))\n t.add_ref(l)\n yoffset -= 1500*scaling\n xoffset = 0\n else:\n from .font import _get_font_by_name, _get_font_by_file, _get_glyph\n\n # Load the font\n # If we've passed a valid file, try to load that, otherwise search system fonts\n font = None\n if (face.endswith(\".otf\") or face.endswith(\".ttf\")) and os.path.exists(face):\n font = _get_font_by_file(face)\n else:\n try:\n font = _get_font_by_name(face)\n except ValueError:\n pass\n if font is None:\n raise ValueError(('[PHIDL] Failed to find font: \"%s\". ' +\n 'Try specifying the exact (full) path to the .ttf or .otf file. ' +\n 'Otherwise, it might be resolved by rebuilding the matplotlib font cache') % (face))\n\n # Render each character\n for line in text.split('\\n'):\n l = Device('textline')\n xoffset = 0\n for letter in line:\n letter_dev = Device(\"letter\")\n letter_template, advance_x = _get_glyph(font, letter)\n for poly in letter_template.polygons:\n letter_dev.add_polygon(poly.polygons, layer=layer)\n ref = l.add_ref(letter_dev)\n ref.move(destination=(xoffset, 0))\n ref.magnification = size\n xoffset += size*advance_x\n\n ref = t.add_ref(l)\n ref.move(destination=(0, yoffset))\n yoffset -= size\n\n justify = justify.lower()\n for l in t.references:\n if justify == 'left': pass\n if justify == 'right': l.xmax = 0\n if justify == 'center': l.move(origin = l.center,\n destination = (0, 0), axis = 'x')\n\n t.flatten()\n return t", "def obtain_text():\n pass", "async def aesthetic(self, ctx, *, text):\n out = \"\"\n for char in text:\n out += utils.fullwidth_transform.get(char, char)\n await ctx.send(out)", "def FlashyText(win,center,text,timing):\n winner = Text(center,text)\n winner.setFace(\"arial\")\n winner.setFill(\"black\")\n winner.setSize(30)\n for i in range(1,6):\n time.sleep(timing)\n if i % 2 == 0:\n winner.draw(win)\n else:\n winner.undraw()", "async def outline_text(draw_surface, coords, draw_text, font):\n draw = partial(draw_surface.text, text=draw_text, font=font,\n fill=\"black\")\n for offset_pair in product(range(-1, 2), repeat=2):\n draw((coords[0]+offset_pair[0], coords[1]+offset_pair[1]))\n draw(coords, fill=\"white\")", "def args_batch_to_text(args_batch: ArgsBatch) -> Text:\n lines = []\n for args in args_batch:\n lines.append('; '.join(str(a) for a in args))\n return '\\n'.join(lines)", "def setText(*args):", "def draw_text(self, text, i, j, **params):", "def preprocess(self, text):\r\n return text", "def rich(text):\n return full(text, False)", "def mytext(x,y,s,**kwargs):\n # we take care of this one\n model = kwargs.pop('model', None)\n if model:\n th = text(x,y,model,**kwargs)\n draw()\n x0,y0,w,h = th.get_window_extent().bounds\n gca().texts.remove(th)\n x = x0\n y = y0\n kwargs['transform'] = matplotlib.transforms.IdentityTransform()\n kwargs['horizontalalignment'] = 'left'\n kwargs['verticalalignment'] = 'baseline'\n# print x,y,kwargs\n return text(x,y,s,**kwargs)", "def get_text(downgrade_titles=False):", "def text_grid(self, text, clear_screen=True, x=0, y=0, text_color='black', font=None):\n\n assert 0 <= x < Display.GRID_COLUMNS,\\\n \"grid columns must be between 0 and %d, %d was requested\" %\\\n ((Display.GRID_COLUMNS - 1, x))\n\n assert 0 <= y < Display.GRID_ROWS,\\\n \"grid rows must be between 0 and %d, %d was requested\" %\\\n ((Display.GRID_ROWS - 1), y)\n\n return self.text_pixels(text, clear_screen, x * Display.GRID_COLUMN_PIXELS, y * Display.GRID_ROW_PIXELS,\n text_color, font)", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def preprocess_text(self, seq):\n if self.text_preprocess_fn:\n seq = list(map(self.text_preprocess_fn, seq))\n return seq", "def textile(text, **kwargs):\n from django.contrib.markup.templatetags.markup import textile\n return textile(text)", "def basic(text):\n lines = text.split(\"\\n\")\n result = []\n\n for line in lines:\n result.append(_inline(line))\n\n return \"\\n\".join(result)", "def getKernels(indices):\n\n\t\t\ti = indices[0]\n\t\t\tj = indices[1]\n\n\t\t\th = i - self.scope\n\t\t\tk = j + self.scope\n\n\t\t\tif h < 0: h = 0\n\t\t\tif k > len(text): k = len(text)-1\n\n\t\t\treturn text[h:i].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), text[i:j].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), text[j:k].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")\n\t\t\t#return \"|\"+text[h:i].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")+\"|\", text[i:j].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), \"|\"+text[j:k].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")+\"|\"", "def textCurves(*args, font: AnyStr=\"\", name: AnyStr=\"\", object: bool=True, text: AnyStr=\"\",\n q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def generate(self, path, language, textgrid):\n activations = []\n self.model.eval()\n iterator = tokenize(path, language, path_like=True, train=False)\n if self.generation == 'bucket':\n # Here, we give as input the text line by line.\n for line in iterator:\n line = line.strip() # Remove trailing characters\n\n line = '[CLS] ' + line + ' [SEP]'\n tokenized_text = self.tokenizer.wordpiece_tokenizer.tokenize(line)\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)\n segment_ids = [1 for x in tokenized_text]\n mapping = utils.match_tokenized_to_untokenized(tokenized_text, line)\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segment_ids])\n\n with torch.no_grad():\n encoded_layers = self.model(tokens_tensor, segments_tensors) # last_hidden_state, pooled_last_hidden_states, all_hidden_states\n # filtration\n encoded_layers = np.vstack(encoded_layers[2][1:]) # retrieve all the hidden states (dimension = layer_count * len(tokenized_text) * feature_count)\n encoded_layers = encoded_layers[self.loi, :, :]\n activations += utils.extract_activations_from_tokenized(encoded_layers, mapping)\n elif self.generation == 'sequential':\n # Here we give as input the sentence up to the actual word, incrementing by one at each step.\n for line in iterator:\n for index in range(1, len(line.split())):\n tmp_line = \" \".join(line.split()[:index])\n tmp_line = tmp_line.strip() # Remove trailing characters\n\n tmp_line = '[CLS] ' + tmp_line + ' [SEP]'\n tokenized_text = self.tokenizer.wordpiece_tokenizer.tokenize(tmp_line)\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)\n segment_ids = [1 for x in tokenized_text]\n mapping = utils.match_tokenized_to_untokenized(tokenized_text, line)\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segment_ids])\n\n with torch.no_grad():\n encoded_layers = self.model(tokens_tensor, segments_tensors) # dimension = layer_count * len(tokenized_text) * feature_count\n # filtration\n encoded_layers = np.vstack(encoded_layers[2][1:])\n encoded_layers = encoded_layers[self.loi, :, :]\n activations.append(utils.extract_activations_from_tokenized(encoded_layers, mapping)[-1])\n result = pd.DataFrame(np.vstack(activations), columns=['layer-{}-{}'.format(layer, index) for layer in self.loi for index in range(self.FEATURE_COUNT)])\n return result", "def tokenize(self, text, **kwargs):\n if self.opt['tracker'] == 'babi6':\n text = babi6_dirty_fix(text)\n text = text.replace('<SILENCE>', '_SILENCE_')\n\n return [t.text for t in NLP.tokenizer(text)]", "def draw(text: list):\n\n curses.wrapper(wrapper, text)", "def text_plot(self):\n if self.stext is not None:\n # Create text object :\n self.stextmesh = visu.Text(text=self.stext, color=self.stextcolor,\n font_size=self.stextsize, pos=self.xyz,\n bold=True, name='SourcesText')\n\n # Set text texture :\n self.stextmesh.set_gl_state('translucent', depth_test=True)\n\n # Apply a transformation to text elements to not cover sources :\n self.stextmesh.transform = vist.STTransform(\n translate=self.stextshift)\n else:\n self.stextmesh = visu.Text(name='NoneText')", "def graphtextdetextor(image_path):\n img=cv2.imread(image_path)\n\n #img=image_filter.rotate_anticlockwise(img)\n\n\n custom_config_number=r'--oem 3 --psm 6 outputbase digits'\n custom_config=r'--oem 3 --psm 6'\n\n custom_config1=r'--oem 3 --psm 1'\n\n custom_config2=r'--oem 3 --psm 4'\n\n text=pytesseract.image_to_string(img,config=custom_config)\n text2=pytesseract.image_to_string(img,config=custom_config1)\n text3=pytesseract.image_to_string(img,config=custom_config2)\n\n\n\n d=pytesseract.image_to_data(img,config=custom_config,output_type=Output.DICT)\n\n #print(text3)\n return [text,text2,text3]", "def Execute_text( canvas, Fill, column, line, high, wide, linebuffer ):\n global textitem # cache\n # new text over writing old\n if(column==2 and len(linebuffer)>1):\n key = \"T {}.{}\".format(3,line)\n try:\n item = textitem[key]\n canvas.delete(item)\n del textitem[key]\n # pdb.set_trace()\n except KeyError:\n pass\n \n (x00,y00) = (10,10)\n x0 = column * (6,12)[wide]\n key = \"T {}.{}\".format(column,line)\n remark=\"\"\n try:\n item = textitem[key] \n if canvas.itemcget(item,'text')==linebuffer :\n remark = \"repeat\"\n pass # ignore exact repetitions\n else:\n canvas.itemconfigure( item, text=linebuffer )\n remark = f\"at {key} old item {item} update text '{linebuffer}'\"\n except KeyError:\n textitem[key] = canvas.create_text(\n (x00+ x0)*2,\n (y00+line)*2,\n text=linebuffer,\n font=DDD_Font[(1,2)[high]],\n fill=Fill,\n anchor=\"nw\",\n tags = canvas.tag )\n remark = f\"at {key} NEW text item '{linebuffer}'\"\n if remark: print(\" \"*13 + remark )", "def show_text(text, args):\n return expyriment.stimuli.TextLine(text,\n text_font=args[\"--text-font\"],\n text_size=args[\"--text-size\"],\n text_colour=args[\"stimuli_color\"],\n background_colour=args[\"bg_color\"])", "def _setText(self, text):\n self.text = \"\"\n for ch in text:\n char, vertices, glyph = self._extractGlyph(ch)\n if not vertices is None and self.glyphs in (\n GlyphTypes.BASE, GlyphTypes.LAYER_COLOR):\n vertices['rgba'] = glm.vec4(self.color)\n if not self.text:\n off, kern = self._updateMetric(0, char)\n if char in self.NO_GLYPH_CHARS:\n self.colors.append([char, None])\n else:\n vertices['vtx'] += off + glyph['offset']\n self.allVertices = np.hstack(vertices)\n self.allIndices = self._baseInd\n self.colors.append([char, self.color])\n self.text += char\n else:\n pos = len(self.text)\n nonGlyph = countInSet(self.text, self.NO_GLYPH_CHARS)\n # Set the metric\n off, kern = self._updateMetric(pos, char)\n if char in self.NO_GLYPH_CHARS:\n self.colors.append([char, None])\n else:\n vertices['vtx'] += off + kern + glyph['offset']\n if self.allVertices is None:\n self.allVertices = np.hstack(vertices)\n else:\n self.allVertices = np.append(self.allVertices,\n vertices)\n if self.allIndices is None:\n self.allIndices = self._baseInd\n else:\n self.allIndices = np.vstack((self.allIndices,\n self._baseInd + (pos - nonGlyph) * 4))\n self.colors.append([char, self.color])\n self.text += char\n self.setUniforms()", "def render_text_on_surface(text, surface, font, color=BLACK, top_padding=0, left_pading=0):\n rect = surface.get_rect()\n \n last_top = rect.top + top_padding\n for index, line in enumerate(text.split(\"\\n\")):\n text_surf = font.render(line, True, color)\n text_rect = text_surf.get_rect()\n text_rect.topleft = (rect.left + left_pading, last_top)\n surface.blit(text_surf, text_rect)\n \n last_top += text_rect.h", "def write(self, text):\n text = open(text, 'w')\n text.write('File type = \"ooTextFile\"\\n')\n text.write('Object class = \"TextGrid\"\\n\\n')\n text.write('xmin = %f\\n' % self.__xmin)\n text.write('xmax = %f\\n' % self.__xmax)\n text.write('tiers? <exists>\\n')\n text.write('size = %d\\n' % self.__n)\n text.write('item []:\\n')\n for (tier, n) in zip(self.__tiers, range(1, self.__n + 1)):\n text.write('\\titem [%d]:\\n' % n)\n if tier.__class__ == IntervalTier: \n text.write('\\t\\tclass = \"IntervalTier\"\\n')\n text.write('\\t\\tname = \"%s\"\\n' % tier.name())\n text.write('\\t\\txmin = %f\\n' % tier.xmin())\n text.write('\\t\\txmax = %f\\n' % tier.xmax())\n text.write('\\t\\tintervals: size = %d\\n' % len(tier))\n for (interval, o) in zip(tier, range(1, len(tier) + 1)): \n text.write('\\t\\t\\tintervals [%d]:\\n' % o)\n text.write('\\t\\t\\t\\txmin = %f\\n' % interval.xmin())\n text.write('\\t\\t\\t\\txmax = %f\\n' % interval.xmax())\n text.write('\\t\\t\\t\\ttext = \"%s\"\\n' % interval.mark())\n else: # PointTier\n text.write('\\t\\tclass = \"TextTier\"\\n')\n text.write('\\t\\tname = \"%s\"\\n' % tier.name())\n text.write('\\t\\txmin = %f\\n' % tier.xmin())\n text.write('\\t\\txmax = %f\\n' % tier.xmax())\n text.write('\\t\\tpoints: size = %d\\n' % len(tier))\n for (point, o) in zip(tier, range(1, len(tier) + 1)):\n text.write('\\t\\t\\tpoints [%d]:\\n' % o)\n text.write('\\t\\t\\t\\ttime = %f\\n' % point.time())\n text.write('\\t\\t\\t\\tmark = \"%s\"\\n' % point.mark())\n text.close()", "def export_textgrids(self, output_directory):\n ali_directory = self.align_directory\n convert_ali_to_textgrids(self.align_config, output_directory, ali_directory, self.dictionary,\n self.corpus, self.corpus.num_jobs, self)\n self.compile_information(ali_directory, output_directory)", "def used_tex_func(val):\n return None", "def imText(self, text, align=\"left\", \n textSize=None, rotate=None, bgColor=255, fontColor=0, scale=None, \n leading=0.25, txtWidth=None):\n if not textSize:\n textSize = int(self.printerConf['textSize'])\n if not txtWidth:\n txtWidth = self.printerConf['printerWidth']\n font = ImageFont.truetype(self.printerConf['fontFile'], textSize)\n\n def splitList(txtWidth, txtList, font, newlineSplitOnly=False):\n \"\"\"Each str/unicode in txtList equals one line when printet. Split at newlines and furthermore split if a line is too wide.\"\"\"\n # First of search for newlines and split the list if a newline is found\n withoutNewlines = []\n for txt in txtList:\n withoutNewlines.extend(txt.split(\"\\n\"))\n txtList = withoutNewlines\n if newlineSplitOnly:\n return txtList\n\n txtListWrapped = []\n for txt in txtList:\n # If the whole line is too wide, remove words until we are good\n if font.getsize(txt)[0] > txtWidth:\n txtLen = len(txt)\n for i in range(txtLen)[::-1]:\n if font.getsize(txt[:i+1])[0] <= txtWidth:\n whitespaceEtc = [ \" \", \"\\t\", \"-\" ]\n if txt[i] in whitespaceEtc:\n txtSplit = [ txt[:i+1].rstrip(), txt[i+1:] ]\n if font.getsize(txtSplit[1])[0] > txtWidth:\n txtSplit = splitList(txtWidth, txtSplit, font)\n break\n else:\n break\n # If there are no whitespaces etc. then split the word\n elif not any(w in txt[:i+1] for w in whitespaceEtc):\n if font.getsize(txt[:i+1]+\"-\")[0] <= txtWidth:\n txtSplit = [ txt[:i+1].rstrip()+\"-\", txt[i+1:] ]\n if font.getsize(txtSplit[1])[0] > txtWidth:\n txtSplit = splitList(txtWidth, txtSplit, font)\n break\n else:\n break\n else:\n continue\n else:\n txtSplit = [ txt ]\n txtListWrapped.extend(txtSplit)\n return txtListWrapped\n\n # If txtList is a simple string make it a list\n if type(text) is list:\n txtList = text\n else:\n txtList = [ text ]\n # Spacing between lines as a proportion of the width of a danish letter for the current text size.\n leadingDots = int(font.getsize(u\"Å\")[0]*leading)\n if rotate in [ 90, 270 ]:\n # Don't wrap lines based on width when turned 90 or 270 degrees\n txtList = splitList(txtWidth, txtList, font, newlineSplitOnly=True)\n else:\n # Do wordwrapping etc.\n txtList = splitList(txtWidth, txtList, font)\n\n # Determine the size of the resulting text image\n size = [0,0]\n lineHeight = font.getsize(\"a\")[1]\n size = [ 0, ( leadingDots + lineHeight ) * len(txtList) + leadingDots]\n # Find the width\n if rotate is 180:\n # Avoid right alignment of rotated text, if a line is less wide than the paper / printerConf['printerWidth']\n size[0] = self.printerConf['printerWidth']\n else:\n for txt in txtList:\n maxWidth = font.getsize(txt)[0]\n if maxWidth > size[0]:\n size[0] = maxWidth\n # Create the actual image containing the text\n img = Image.new(\"1\",size)\n draw = ImageDraw.Draw(img)\n draw.rectangle((0,0) + img.size,fill=bgColor)\n pointer = [0, 0]\n # For each line..\n for txt in txtList:\n txtPxWidth = font.getsize(txt)[0]\n if align == \"left\":\n pointer[0] = 0\n elif align == \"right\":\n pointer[0] = size[0] - txtPxWidth\n elif align == \"center\":\n pointer[0] = (size[0] - txtPxWidth)/2\n draw.text(pointer, txt, font=font, fill=fontColor)\n pointer[1] += lineHeight + leadingDots\n\n if rotate:\n angles = [0, 90, 180, 270]\n if rotate in angles:\n img = img.rotate(rotate, expand=True)\n else:\n raise ValueError(\"rotate must be part of %s if set \" % str(angles))\n if rotate in [90, 270]:\n if img.size[0] > self.printerConf['printerWidth'] and not scale:\n raise Exception(\"The textSize is too large to print. Use either a smaller textSize or the scale parameter\")\n else:\n if img.size[0] > self.printerConf['printerWidth']:\n raise Exception(\"Could not print the text. One or more lines are too wide. Did you choose a very large font?\")\n\n if align is not \"left\":\n imgOld = img\n img = Image.new(\"1\",(txtWidth,imgOld.size[1]))\n draw = ImageDraw.Draw(img)\n draw.rectangle((0,0) + img.size,fill=bgColor)\n pointer = [0, 0]\n if align is \"center\":\n i = 2\n else:\n i = 1\n img.paste(imgOld,((txtWidth-imgOld.size[0])/i,0))\n return img", "def fn(): # fn definition # help2\r\n print(my_text)", "def process_text(text):\n text = re.sub(r'<@>\\s+|<s>\\s+|</s>\\s+|<p>\\s+|</p>\\s+|\\s+\\,|\\'s|\\'|\\;|\\(|\\)|\\-\\-\\s+|\\s+\\.', '', text)\n text = re.sub(r'\\.\\,', '. ,', text)\n text = re.sub(r'\\,', '', text)\n text = re.sub(r'\\$', '$ ', text)\n text = re.sub(r'\\%', ' %', text)\n text = re.sub(r'\\s\\\"\\s', ' ', text)\n text = re.sub(r'\\.\\s+', '. ', text)\n text = text.lower()\n return text", "def result(target_text):\n\n display_text(target_text)\n readability(target_text)", "def text(self, text: str, xo: int, yo: int, color: int):\n for offset, letter in enumerate(text):\n template = font.get(letter)\n for x, line in enumerate(template):\n line_str = '{:08b}'.format(line).replace('0b', '')\n if self.portrait:\n line_str = reversed(line_str)\n for y, pix in enumerate(line_str):\n if pix == '1':\n self.pixel(xo + x + (offset * 8), yo + y, color)", "def parse_text(self, text):\r\n MAXLEN = 100\r\n sentences = []\r\n punct = [\",\",\":\",\";\",\".\",\"–\",\"?\",\"!\",\"(\",\")\"] # Interpunctuation marks\r\n text = text.replace(\"\\r\", \" \").replace(\"\\t\", \" \") # Remove CR and tabs\r\n words = text.split(\" \") if len(text) > MAXLEN else []\r\n sentence = \"\" if len(text) > MAXLEN else text\r\n\r\n # Preprocess list for silence markers\r\n if conf.SilenceMarker in text:\r\n words_new = []\r\n if not words and sentence: # Was too short to be cut initially\r\n words = text.split(\" \")\r\n sentence = \"\"\r\n for w in filter(None, words):\r\n if conf.SilenceMarker not in w.lower():\r\n words_new.append(w)\r\n else:\r\n text_chunks = w.lower().split(conf.SilenceMarker)\r\n for i, part in enumerate(text_chunks):\r\n if part:\r\n words_new.append(part)\r\n if i < len(text_chunks) - 1:\r\n words_new.append(conf.SilenceMarker)\r\n else:\r\n if words_new and conf.SilenceMarker in words_new[-1]:\r\n words_new[-1] += conf.SilenceMarker\r\n else:\r\n words_new.append(conf.SilenceMarker)\r\n words = words_new\r\n\r\n for w in words:\r\n if conf.SilenceMarker in w:\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n sentences.append(w)\r\n sentence = \"\"\r\n elif w[-1] in punct or w[0] in punct: # Encountered punctuation\r\n if w[-1] in punct and (len(sentence) + len(w) + 1 < MAXLEN):\r\n # Word ends with punct and sentence can still be added to\r\n sentences.append(sentence.strip() + \" \" + w.strip())\r\n sentence = \"\" # Save sentence and word, start new sentence\r\n elif w[0] in punct and w[-1] not in punct:\r\n # Word starts with punctuation, like '('\r\n sentences.append(sentence.strip()) # Save current sentence\r\n sentence = w # Start a new sentence with punct and word\r\n else: # word ends with punct and sentence already long enough\r\n sentences.extend([sentence.strip(), w.strip()])\r\n sentence = \"\" \r\n else:\r\n if (len(sentence) + len(w) + 1 < MAXLEN): # Sentence still\r\n sentence += \" \" + w # short enough\r\n else: # Sentence too long\r\n sentences.append(sentence.strip())\r\n sentence = w # Start a new sentence with the word\r\n if sentence:\r\n sentences.append(sentence.strip())\r\n return sentences", "def run_ocr_in_chart(chart, pad=0, psm=PSM.SINGLE_LINE):\n img = chart.image\n\n # add a padding to the initial figure\n fpad = 1\n img = cv2.copyMakeBorder(img.copy(), fpad, fpad, fpad, fpad, cv2.BORDER_CONSTANT, value=(255, 255, 255))\n fh, fw, _ = img.shape\n\n api = PyTessBaseAPI(psm=psm, lang='eng')\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4, 4))\n\n for tbox in chart.texts:\n # adding a pad to original image. Some case in quartz corpus, the text touch the border.\n x, y, w, h = ru.wrap_rect(u.ttoi(tbox.rect), (fh, fw), padx=pad, pady=pad)\n x, y = x + fpad, y + fpad\n\n if w * h == 0:\n tbox.text = ''\n continue\n\n # crop region of interest\n roi = img[y:y + h, x:x + w]\n # to gray scale\n roi_gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n #\n roi_gray = cv2.resize(roi_gray, None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC)\n # binarization\n _, roi_bw = cv2.threshold(roi_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n # removing noise from borders\n roi_bw = 255 - clear_border(255-roi_bw)\n\n # roi_gray = cv2.copyMakeBorder(roi_gray, 5, 5, 5, 5, cv2.BORDER_CONSTANT, value=255)\n\n # when testing boxes from csv files\n if tbox.num_comp == 0:\n # Apply Contrast Limited Adaptive Histogram Equalization\n roi_gray2 = clahe.apply(roi_gray)\n _, roi_bw2 = cv2.threshold(roi_gray2, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n _, num_comp = morphology.label(roi_bw2, return_num=True, background=255)\n tbox.regions.extend(range(num_comp))\n\n pil_img = smp.toimage(roi_bw)\n if SHOW:\n pil_img.show()\n max_conf = -np.inf\n min_dist = np.inf\n correct_text = ''\n correct_angle = 0\n u.log('---------------')\n for angle in [0, -90, 90]:\n rot_img = pil_img.rotate(angle, expand=1)\n\n api.SetImage(rot_img)\n conf = api.MeanTextConf()\n text = api.GetUTF8Text().strip()\n dist = abs(len(text.replace(' ', '')) - tbox.num_comp)\n\n u.log('text: %s conf: %f dist: %d' % (text, conf, dist))\n if conf > max_conf and dist <= min_dist:\n max_conf = conf\n correct_text = text\n correct_angle = angle\n min_dist = dist\n\n tbox.text = post_process_text(lossy_unicode_to_ascii(correct_text))\n tbox.text_conf = max_conf\n tbox.text_dist = min_dist\n tbox.text_angle = correct_angle\n\n u.log('num comp %d' % tbox.num_comp)\n u.log(u'** text: {} conf: {} angle: {}'.format(correct_text, max_conf, correct_angle))\n\n api.End()\n\n # return boxes", "def bert_preprocess(raw_text):\n nlp = English()\n nlp.add_pipe(nlp.create_pipe('sentencizer')) # updated\n doc = nlp(raw_text)\n sentences = [sent.string.strip() for sent in doc.sents][0:2] \n new_sentences = []\n for i, sentence in enumerate(sentences):\n if i==0:\n new_sentences.append(\"[CLS] \" + sentence + \" [SEP]\")\n else:\n new_sentences.append(sentence + \" [SEP]\")\n \n preprocessed_text = ' '.join(new_sentences)\n \n if \"[CLS]\" not in preprocessed_text:\n raise Exception(\"[CLS] not found in preprocessed text\")\n if \"[SEP]\" not in preprocessed_text:\n raise Exception(\"[SEP] not found in preprocessed text\")\n \n return", "def sliptText(text):\n\treturn [char for char in text]", "def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text", "def stats_text(test):\n\n stats_text_en(test) \n \n stats_text_cn(test)", "def text_standardization(text_in):\n stand_text = text_in.strip()\n stand_text = ' '.join(stand_text.split())\n stand_text = stand_text.replace(u'(', u'(')\n stand_text = stand_text.replace(u')', u')')\n stand_text = stand_text.replace(u':', u':')\n return stand_text", "def get_visible_text(_text):\n #text = _text.decode('utf-8', 'ignore').lower() # Don't get hung up on unicode chars in foreign languages\n text = _text.lower()\n text = re.compile(r'<').sub(' <',text) # These two lines keep words from getting smushed\n text = re.compile(r'>').sub('> ',text) # together when two they are only separated by tags.\n soup = BeautifulSoup(text, 'lxml')\n\n # decompose removes the tag and it's text content completely\n for s in soup(['script','code','style']):\n s.decompose()\n\n text = soup.get_text()\n # compress space to reduce footprint and fit on one line so it neatly fits in csv file\n text = re.compile(r'\\s+').sub(' ',text).strip()\n return text", "def _setText(self, text):\n self.text = \"\"\n for ch in text:\n char, vertices, glyph = self._extractGlyph(ch)\n if not self.text:\n off, kern = self._updateMetric(0, char)\n if vertices is not None and not char in self.NO_GLYPH_CHARS:\n vertices['vtx'] += off + glyph['offset']\n self.allVertices = np.hstack(vertices)\n self.allIndices = self._baseInd\n self.text += char\n else:\n pos = len(self.text)\n nonGlyph = countInSet(self.text, self.NO_GLYPH_CHARS)\n # Set the metric\n off, kern = self._updateMetric(pos, char)\n if vertices is not None and not char in self.NO_GLYPH_CHARS:\n vertices['vtx'] += off + kern + glyph['offset']\n if self.allVertices is None:\n self.allVertices = np.hstack(vertices)\n else:\n self.allVertices = np.append(self.allVertices,\n vertices)\n if self.allIndices is None:\n self.allIndices = self._baseInd\n else:\n self.allIndices = np.vstack((self.allIndices,\n self._baseInd + (pos - nonGlyph) * 4))\n self.text += char\n self.setUniforms()", "def initializeFeedback(win, expInfo):\n \n feedbackClock = core.Clock()\n feedbackText = visual.TextStim(win=win, ori=0, name='text',\n text='default text', font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=None,\n color=u'white', colorSpace='rgb', opacity=1,\n depth=0.0, units=\"norm\")\n return(feedbackClock, feedbackText)", "def print_banner(text):\n print(Figlet(font='smslant').renderText(text))", "def adjusting_fonts(self):\n fix_x = int(0 * settings.scale)\n fix_y = int(0 * settings.scale)\n font_object = self.fontA\n box = self.box\n text_box = self.box.get_size()\n text_list = self.text.split()\n number_of_words = len(text_list)\n count = 0\n height = fix_y\n first = True\n line = \"\"\n line_break = False\n while count < number_of_words:\n line += text_list[count]\n line_size = font_object.size(line)\n line_pos = int((text_box[0] + fix_x - line_size[0]) / 2)\n if line_size[0] < text_box[0]:\n if count + 1 < number_of_words:\n temporary_line = line + \" \" + text_list[count + 1]\n if font_object.size(temporary_line)[0] >= text_box[0]:\n line_image = font_object.render(line, 1, self.color)\n height += int((line_size[1] * 0.8))\n box.blit(line_image, (line_pos, height))\n line = \"\"\n else:\n line += \" \"\n elif count + 1 == number_of_words:\n height += int((line_size[1] * 0.8))\n box.blit(\n font_object.render(line, 1, self.color), (line_pos, height)\n )\n else:\n line = text_list[count]\n height += int(\n line_size[1] * 0.8\n ) # If line height is perfect it does not seem that it is the same text\n count += 1", "def scene_to_text(scenes):\n scene_text_dict = []\n scene_text_list = []\n for i, scene in enumerate(scenes):\n if len(scene['frame_data']) == 0:\n break\n scene_image = Image.fromarray(scene['frame_data'])\n str_text = pytesseract.image_to_string(scene_image)\n #list_text = list(filter(('').__ne__, re.split(\" |\\n|, |. |:|.\\n|\\x0c\", str_text)))\n list_text = list(filter(('').__ne__, re.split(\" |\\n\", str_text)))\n bag_of_word = collections.Counter(list_text)\n scene_text_dict.append(\n {'start': scene['start'], \n 'end': scene['end'], \n 'bag_of_word': dict(bag_of_word)\n })\n scene_text_list.append(list_text)\n return scene_text_dict, scene_text_list", "def test_sentencier_en_trim_spaces():\n sentencizer = Sentencizer()\n text = ' This , text is... . Amazing !!'\n chunks = [i['text'] for i in sentencizer.segment(text, 0)]\n locs = [i['location'] for i in sentencizer.segment(text, 0)]\n assert chunks, [\"This , text is...\" == \"Amazing\"]\n assert text[locs[0][0]:locs[0][1]], ' This == text is...'\n assert text[locs[1][0]:locs[1][1]] == ' Amazing'\n\n def validate(req):\n assert req.docs[0].chunks[0].text, 'This == text is...'\n assert req.docs[0].chunks[1].text == 'Amazing'\n\n f = Flow().add(uses='!Sentencizer')\n with f:\n f.index_lines([' This , text is... . Amazing !!'], on_done=validate, callback_on_body=True, line_format='csv')", "def texts(self, tft, oled, text_1, text_2, text_3=None, text_4=None, wait=0):\n text_line_1 = text_1\n text_line_2 = text_2\n text_line_3 = text_3\n text_line_4 = text_4\n self.clear(tft, oled)\n oled.text((5, 5), text_line_1, tft.WHITE, sysfont, 1)\n sleep(wait)\n self.clear(tft, oled)\n oled.text((5, 5), text_line_2, tft.WHITE, sysfont, 1)\n sleep(wait)\n if text_3:\n self.clear(tft, oled)\n oled.text((5, 5), text_line_3, tft.WHITE, sysfont, 1)\n sleep(wait)\n if text_4:\n self.clear(tft, oled)\n oled.text((5, 5), text_line_4, tft.WHITE, sysfont, 1)\n sleep(wait)\n self.clear(tft, oled)\n oled.text((5, 5), 'Press Button 1, 2 or 3!', tft.WHITE, sysfont, 1)", "def render_text(grid):\r\n X = len(grid[0])\r\n Y = len(grid)\r\n#top row:\r\n for j in range(Y):\r\n for sub_j in range(3): #3 rows \r\n ROW = []\r\n for i in range(X):\r\n ROW += grid[j][i].arr[sub_j]\r\n print(ROW)", "def get_formatted_text(self, n_cols):", "def calculate_texts(self) -> None:\n texts = []\n for text in self.texts:\n paragraphs = list(filter(lambda x: x != \"\", text.split(\"\\n\\n\")))\n for paragraph in paragraphs:\n text = paragraph.replace(\"\\n\", \" \").strip()\n if len(text) > self.split_threshold_min:\n text_sentences = nlp(text)\n sentences = []\n for sentence in text_sentences.sents:\n current = sentence.text\n sentences.append(current.strip())\n texts.extend(sentences)\n else:\n texts.append(text)\n self.texts = list(set(texts))", "def text_prepare(text):\r\n\r\n replace_by_space_re = re.compile('[/(){}\\[\\]\\|@,;]')\r\n good_symbols_re = re.compile('[^0-9a-z #+_]')\r\n stopwords_set = set(stopwords.words('english'))\r\n\r\n text = text.lower()\r\n text = replace_by_space_re.sub(' ', text)\r\n text = good_symbols_re.sub('', text)\r\n text = ' '.join([x for x in text.split() if x and x not in stopwords_set])\r\n\r\n return text.strip()", "def text_draw(self, x, y, text, style={}):", "def text_preprocessing_pdf(self,p):\n #remover_end_paragraphs=np.vectorize(self.remove_end_paragraphs,otypes=[str])\n cleaner=np.vectorize(self.remove_non_alpha,otypes=[str])\n cut_text=np.vectorize(self.cut_text,otypes=[str])\n cut_text_raw=np.vectorize(self.cut_text_raw,otypes=[str])\n assert len(self.parser)==len(self.parser_raw), \"Length of the treated sentence treated list does not match length of raw text list: {} / {}\".format(len(self.parser),len(self.parser_raw))\n cut_text_raw(p)\n p=cleaner(p)\n cut_text(p)\n return p", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def render_ents(\n self, text: str, spans: List[Dict[str, Any]], title: Optional[str]\n ) -> str:\n markup = \"\"\n offset = 0\n for span in spans:\n label = span[\"label\"]\n start = span[\"start\"]\n end = span[\"end\"]\n kb_id = span.get(\"kb_id\", \"\")\n kb_url = span.get(\"kb_url\", \"#\")\n kb_link = TPL_KB_LINK.format(kb_id=kb_id, kb_url=kb_url) if kb_id else \"\"\n additional_params = span.get(\"params\", {})\n entity = escape_html(text[start:end])\n fragments = text[offset:start].split(\"\\n\")\n for i, fragment in enumerate(fragments):\n markup += escape_html(fragment)\n if len(fragments) > 1 and i != len(fragments) - 1:\n markup += \"<br>\"\n if self.ents is None or label.upper() in self.ents:\n color = self.colors.get(label.upper(), self.default_color)\n ent_settings = {\n \"label\": label,\n \"text\": entity,\n \"bg\": color,\n \"kb_link\": kb_link,\n }\n ent_settings.update(additional_params)\n markup += self.ent_template.format(**ent_settings)\n else:\n markup += entity\n offset = end\n fragments = text[offset:].split(\"\\n\")\n for i, fragment in enumerate(fragments):\n markup += escape_html(fragment)\n if len(fragments) > 1 and i != len(fragments) - 1:\n markup += \"<br>\"\n markup = TPL_ENTS.format(content=markup, dir=self.direction)\n if title:\n markup = TPL_TITLE.format(title=title) + markup\n return markup", "def display_text(target_text):\n\n print('Text to analyze:')\n print('')\n print('-------TEXT BELOW-------')\n print(target_text)\n print('-------TEXT ENDS-------')\n print('')", "def process_text(self, text, language):", "def process_text(text):\n text = text.strip()\n textList = text.split('\\n')\n newText = ''\n addNewline = True\n for line in textList:\n # Remove duplicate white space\n temp = ' '.join(line.split())\n # Trim any beginning non-alphabet letters\n temp = trim(temp)\n # Remove overly short lines, but keep ends of sentences\n # Add a newline if gap detected\n if len(temp) < 40 and not '.' in temp:\n if addNewline:\n newText += '\\n'\n addNewline = False\n continue\n # Add line to growing string\n newText += temp + ' '\n addNewline = True\n return newText", "def sentences(summary, nlp):\n text = remove_spurious_words(text_of(summary))\n all_sentence = [sentence for sentence in re.split(\"[。,?!\\n]\", text) if sentence]\n all_sentence = [re.sub('[ ]+', ' ', sentence.encode('gb2312', 'ignore').decode('gb2312')).strip() for sentence in\n all_sentence]\n return [nlp.ner(sentence) for sentence in all_sentence if sentence]", "async def ascii(self, ctx, *, text):\n text = text.replace(' ', '\\n')\n \n if not text:\n await ctx.send(f\"{ctx.tick(False)} You need to specify the text you want to convert!\")\n \n _fig = figlet_format(text.replace(' ', '\\n'))\n \n if len(_fig) > 1300:\n await ctx.send(f\"{ctx.tick(False)} That message is too long!\")\n await ctx.send(f\"{ctx.tick(True)} Done!\")\n await ctx.send(f\"```{_fig}```\")", "def get_sentence(self):", "def frontend(text):\n text = pyopenjtalk.g2p(text, kana=False)\n print(f\"Cleaned text: {text}\")\n charseq = text.split(\" \")\n idseq = []\n for c in charseq:\n if c.isspace():\n idseq += [char_to_id[\"<space>\"]]\n elif c not in char_to_id.keys():\n idseq += [char_to_id[\"<unk>\"]]\n else:\n idseq += [char_to_id[c]]\n idseq += [idim - 1] # <eos>\n return torch.LongTensor(idseq).view(-1).to(device)", "def txt(input):\n output=atpic.cleaner_alex.txtclean(input)\n return output", "def getExcerpts(self, text, DICECodeResults):\n\t\t\"\"\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \"\"\"\n\t\t\n\t\tdef getKernels(indices):\n\t\t\t\"\"\"\n\t\t\t\tgetKernels() is a sub-method that extracts strings from a doc-\n\t\t\t\tument using indices provided by the DICECodeResults data struc-\n\t\t\t\tture passed into this sub-method's parent method, getExcerpts().\n\t\t\t\tThis sub-method returns three strings.\n\n\t\t\t\tindices --> tuple containing indices in the document with text to extract.\n\t\t\t\"\"\"\n\n\t\t\ti = indices[0]\n\t\t\tj = indices[1]\n\n\t\t\th = i - self.scope\n\t\t\tk = j + self.scope\n\n\t\t\tif h < 0: h = 0\n\t\t\tif k > len(text): k = len(text)-1\n\n\t\t\treturn text[h:i].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), text[i:j].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), text[j:k].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")\n\t\t\t#return \"|\"+text[h:i].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")+\"|\", text[i:j].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), \"|\"+text[j:k].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")+\"|\"\n\n\t\tdef getComboTerms(tuples):\n\t\t\t\"\"\"\n\t\t\t\tgetComboTerms() is a sub-method that combines search terms and \n\t\t\t\ttheir indices provided in the tuple parameter into a string with\n\t\t\t\tthe following structure: [(variant, index)]. This sub-method re-\n\t\t\t\tturns a string of that structure.\n\n\t\t\t\ttuples --> data structure containing the search term and index of the search term in the form of: (term, index)\n\t\t\t\"\"\"\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))\n\n\t\tdef getProximity(tuples):\n\t\t\t\"\"\"\n\t\t\t\tgetProximity() is a sub-method that calculates the distance of the search terms provided in the tuple parameter. \n\t\t\t\tThis sub-method returns an absolute value integer.\n\n\t\t\t\ttuples:\tdata structure containing the search term and index of the search term in the form of: (term, index)\n\t\t\t\"\"\"\n\t\t\tsortedIndices = [indices for indices in tuples]\n\t\t\t#return abs(sortedIndices[0][1] - sortedIndices[-1][0])\n\t\t\treturn sortedIndices[-1][0] - sortedIndices[0][1] \n\n\t\t\"\"\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \"\"\"\n\n\t\texcerptsResults = list()\t\t# NEW list to contain the expanded data structure provided by the DICECodeResults parameter\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tappend = excerptsResults.append\n\t\tformat = str.format\n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\tfor row in DICECodeResults:\n\n\t\t\tDICECode \t\t= row[0]\t# (1) DICE code as specified in C:\\Users\\a5rjqzz\\Desktop\\Python\\files\\Types.gd\n\t\t\tTYPECode \t\t= row[1]\t# (2) Type code as specified in C:\\Users\\a5rjqzz\\Desktop\\Python\\files\\Types.gd\n\t\t\tCombo \t\t= False\t\t# (3) Boolean status of the presence of a combo term\n\t\t\tdocumentIndex \t= 0\t\t\t# (4) Index of this search term in the document\n\t\t\tindices \t\t= row[2]\t# (5) Indices of the search term and combo term if present\n\t\t\tproximity\t\t= 0\t\t\t# (6) Distance between search term and combo terms\n\n\t\t\tif type(row[2][0]) == type(tuple()):\n\t\t\t\tCombo = True\t# If the type of search term is a combo, this is true\n\n\t\t\t\tfor tuples in row[2]:\n\t\t\t\t\tindices \t\t\t\t\t\t= tuples[0]\t\t\t\t# (1) Location(s) of the search term in the tuple\n\t\t\t\t\tdocumentIndex \t\t\t\t\t= indices[0]\t\t\t# (2) Location of the search term in the document\n\t\t\t\t\tcomboTerms \t\t\t\t\t\t= getComboTerms(tuples)\t# (3) Multiple terms assigned to variable comboTerms\n\t\t\t\t\tproximity \t\t\t\t\t\t= getProximity(tuples)\t# (4) Proximity of combo terms if present\n\t\t\t\t\tkernelLeft, kernel, kernelRight = getKernels(indices)\t# (5) Left, center, and right kernels or excerpts\n\n\t\t\t\t\tappend([DICECode, TYPECode, Combo, documentIndex, kernelLeft, kernel, kernelRight, comboTerms, proximity])\n\n\t\t\telse:\n\t\t\t\tdocumentIndex \t\t\t\t\t= indices[0]\t\t\t\t\t\t\t\t\t# (1) Location of the search term in the document\n\t\t\t\tcomboTerms \t\t\t\t\t\t= format(\"[{0}]\", text[indices[0]:indices[1]])\t# (2) Single term assigned to variable comboTerms\n\t\t\t\tkernelLeft, kernel, kernelRight = getKernels(indices)\t\t\t\t\t\t\t# (3) Left, center, and right kernels or excerpts\n\n\t\t\t\tappend([DICECode, TYPECode, Combo, documentIndex, kernelLeft, kernel, kernelRight, comboTerms, proximity])\n\n\t\treturn excerptsResults", "def textManip(*args, visible: bool=True, q=True, query=True, **kwargs)->Union[None, Any]:\n pass", "def main(argv):\n correct_font(*argv[1:])", "def make_optional_silence_txt(self):\n raise NotImplementedError", "def draw(grid):\n\n for row in grid:\n for char in row:\n if char is None:\n sys.stdout.write(\" \")\n else:\n sys.stdout.write(char)\n sys.stdout.write(\"\\n\")", "def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()", "def process_text(text, args):\n if args.uppercase:\n text = convert_to_uppercase(text)\n\n if args.spaces:\n text = add_spaces(text)\n\n if not args.uppercase and not args.spaces:\n text = add_spaces(text)\n\n return text", "def adjust(text, run_in_function=False):\n lines = text.split('\\n')\n if len(lines) == 1:\n return text\n\n if lines[0].strip() == '':\n lines = lines[1:]\n first_line = lines[0].lstrip()\n n_spaces = len(lines[0]) - len(first_line)\n\n final_lines = [(' ' if run_in_function else '') + line[n_spaces:] for line in lines]\n\n if run_in_function:\n final_lines = [\n \"def test_function():\",\n ] + final_lines + [\n \"test_function()\",\n ]\n\n return '\\n'.join(final_lines)", "def render_text_surfaces(self):\n self.images = [] # The text surfaces.\n line_width = 0\n line = []\n space_width = self.font.size(' ')[0]\n\n # Put the words one after the other into a list if they still\n # fit on the same line, otherwise render the line and append\n # the resulting surface to the self.images list.\n for word in self.text:\n line_width += self.font.size(word)[0] + space_width\n # Render a line if the line width is greater than the rect width.\n if line_width > self.rect.w:\n surf = self.font.render(' '.join(line), True, self.text_color)\n self.images.append(surf)\n line = []\n line_width = self.font.size(word)[0] + space_width\n\n line.append(word)\n\n # Need to render the last line as well.\n surf = self.font.render(' '.join(line), True, self.text_color)\n self.images.append(surf)", "def clear_text(self):\n global empty_string\n \n for r in range(1,3):\n for c in range(6):\n self.create_text_under_photo(data = empty_string,r=r,c=c)", "def clear_text(self):\n global empty_string\n \n for r in range(1,3):\n for c in range(6):\n self.create_text_under_photo(data = empty_string,r=r,c=c)", "def convert_chn_text(detail=True):\n p = {\n \"data_path\": \"../data/data_literature\",\n \"output_dir\": \"../data/converted_data\"\n }\n if detail:\n gen_params_info(p)\n\n os.system(\"rm -rf %s\" % p[\"output_dir\"])\n os.system(\"mkdir -p %s\" % p[\"output_dir\"])\n files = os.listdir(p[\"data_path\"])\n for file_name in files:\n if detail:\n print(\"to process %s\" % file_name)\n file_path = \"%s/%s\" % (p[\"data_path\"], file_name)\n out_file_path = \"%s/%s\" % (p[\"output_dir\"], file_name)\n fh_in = codecs.open(filename=file_path, mode=\"r\", encoding='utf8')\n fh_out = codecs.open(filename=out_file_path, mode=\"w\", encoding='utf8')\n line_idx = 1\n verb = \"\"\n for line in fh_in:\n line = line.lstrip()\n if line.find(\"\\t\") < 0:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n items = line.split(\"\\t\")\n if len(items) != 4:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO 4 TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n frame_id = items[0]\n if frame_id.find(\".\") >= 0:\n frame_id = frame_id.split(\".\")[0]\n verb = items[2].strip()\n left_sent = items[1].strip()\n right_sent = items[3].strip()\n out_line = \"%s\\t%s\\t%s\\t%s\"\\\n % (frame_id, left_sent, verb, right_sent)\n print(out_line, file=fh_out)\n\n line_idx += 1\n\n fh_in.close()\n fh_out.close()", "def small_text(self):\n pass", "def convert_to_text(batch, lengths, dico, params):\n batch = batch.cpu().numpy()\n lengths = lengths.cpu().numpy()\n\n slen, bs = batch.shape\n assert lengths.max() == slen and lengths.shape[0] == bs\n assert (batch[0] == params.eos_index).sum() == bs\n assert (batch == params.eos_index).sum() == 2 * bs\n sentences = []\n\n for j in range(bs):\n words = []\n for k in range(1, lengths[j]):\n if batch[k, j] == params.eos_index:\n break\n words.append(dico[batch[k, j]])\n sentences.append(\" \".join(words))\n return sentences", "def processText(text):\n print(type(text))\n for line in text:\n print(line)\n return text", "def draw_text(\n self,\n text: str,\n transform: Matrix44,\n properties: Properties,\n cap_height: float,\n ) -> None:\n raise NotImplementedError", "def text_update(self):\n if self.stext is not None:\n # Get index of non-masked sources :\n # idx = self._select_unmasked()[-1]\n\n # Set masked-sources text to '':\n text = np.array(self.stext)\n # text[np.array(~idx, dtype=bool)] = ''\n\n # Update elements :\n self.stextmesh.text = text\n self.stextmesh.color = self.stextcolor\n self.stextmesh.font_size = self.stextsize\n self.stextmesh.update()", "def preprocess_training_text_with_stops(text, convert=False):\n return preprocess_training_text(text, accented_chars=True,\n convert_num=False, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=False)", "def text_to_columns(text):\n \n dedented_txt = textwrap.dedent(text).strip()\n dedented_text = dedented_txt.splitlines()\n for line in dedented_text:\n ded_list = [textwrap.fill(line.strip(), initial_indent='', subsequent_indent='', width=20) for line in dedented_text] \n ded_list2=[]\n ded_list2.append(ded_list)\n return print(tabulate(ded_list2, tablefmt ='plain'))", "def text_to_speech(entry):\n text = entry.get_text()\n if text:\n subprocess.call([\"milena_say\", text])" ]
[ "0.5339875", "0.5339875", "0.52565366", "0.5225016", "0.5224651", "0.52221847", "0.52040446", "0.5149263", "0.50935054", "0.509346", "0.50882745", "0.50577587", "0.50410724", "0.50378585", "0.5034093", "0.5024333", "0.5018697", "0.50046563", "0.4991087", "0.49812207", "0.49809837", "0.49805796", "0.49801278", "0.49495757", "0.49275097", "0.4912974", "0.49064887", "0.48921722", "0.4890334", "0.48809183", "0.4877972", "0.4876322", "0.4858871", "0.4855568", "0.48357993", "0.48342806", "0.48279387", "0.48185685", "0.48101938", "0.48063084", "0.47938663", "0.47929645", "0.4747644", "0.47426447", "0.47422916", "0.474015", "0.4737755", "0.47338927", "0.47299847", "0.47261256", "0.47242594", "0.4721024", "0.47210225", "0.47191656", "0.47144884", "0.47099337", "0.47098053", "0.47086412", "0.4703541", "0.4702951", "0.4697118", "0.4695288", "0.46925586", "0.46904412", "0.4682169", "0.46792045", "0.46771646", "0.46771646", "0.46771646", "0.46771646", "0.46771646", "0.46718454", "0.46705294", "0.46695283", "0.46686673", "0.46679652", "0.4666158", "0.46600655", "0.4659671", "0.46546715", "0.46506232", "0.46491417", "0.46484506", "0.46456665", "0.464041", "0.46394974", "0.4627992", "0.46162754", "0.46110514", "0.4610035", "0.4610035", "0.46094075", "0.4609097", "0.46023232", "0.46008855", "0.4592708", "0.4589503", "0.45869377", "0.4586473", "0.45854598" ]
0.59195006
0
Saves chunked speech intervals as WAV file.
def save_chunks(chunk_sound, out_path, video_id): chunk_start_ms = int(chunk_sound.get_start_time()*1000) chunk_end_ms = int(chunk_sound.get_end_time()*1000) chunk_duration = chunk_end_ms - chunk_start_ms chunk_fn = '{0}_{1}_{2}.wav'.format(video_id, chunk_start_ms, chunk_end_ms) chunk_file_path = path.join(out_path, chunk_fn) chunk_sound.save(chunk_file_path, 'WAV') return {'filename': chunk_fn, 'video_id': video_id, 'start_time': chunk_start_ms, 'end_time': chunk_end_ms, 'duration': chunk_duration}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_wav(file_name, signal, fs):\n wavfile.write(file_name, fs, np.int16(signal/np.max(np.abs(signal)) * (2**(16)/2-1)))", "def save_audio(self, name=DEFAULT_OUT_NAME):\n print(\"Saving...\")\n wf = wave.open(name+'.wav', 'wb')\n wf.setnchannels(DEFAULT_CHANNELS)\n wf.setsampwidth(self.audio.get_sample_size(DEFAULT_FORMAT))\n wf.setframerate(DEFAULT_RATE)\n wf.writeframes(b''.join(self.frames))\n wf.close()\n print('Saved')", "def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)", "def save_frames_to_wav_file(frames: np.ndarray, sample_rate: int, file_path: str):\n wavfile.write(file_path, sample_rate, np.hstack(frames))", "def write_wav(fname, samps, sampling_rate=16000, normalize=True):\n\t# for multi-channel, accept ndarray [Nsamples, Nchannels]\n\tif samps.ndim != 1 and samps.shape[0] < samps.shape[1]:\n\t\tsamps = np.transpose(samps)\n\t\tsamps = np.squeeze(samps)\n\t# same as MATLAB and kaldi\n\tif normalize:\n\t\tsamps = samps * MAX_INT16\n\t\tsamps = samps.astype(np.int16)\n\tfdir = os.path.dirname(fname)\n\tif fdir and not os.path.exists(fdir):\n\t\tos.makedirs(fdir)\n\t# NOTE: librosa 0.6.0 seems could not write non-float narray\n\t# so use scipy.io.wavfile instead\n\twavfile.write(fname, sampling_rate, samps)", "def save(self, fname, master_volume=1.):\n \n # first pass - find max amplitude value to normalise output\n vmax = 0.\n for c in range(len(self.out_channels)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n )\n\n # normalisation for conversion to int32 bitdepth wav\n norm = master_volume * (pow(2, 31)-1) / vmax\n\n # setup array to house wav stream data \n chans = np.zeros((self.out_channels['0'].values.size,\n len(self.out_channels)), dtype=\"int32\")\n \n # normalise and collect channels into a list\n for c in range(len(self.out_channels)):\n vals = self.out_channels[str(c)].values\n chans[:,c] = (vals*norm).astype(\"int32\")\n \n # finally combine and write out wav file\n wavfile.write(fname, self.samprate, chans)\n print(f\"Saved {fname}\")", "def _save_wav(buff, data, rate) -> None:\n # Code inspired from `IPython.display.Audio`\n data = np.array(data, dtype=float)\n\n bit_depth = 16\n max_sample_value = int(2**(bit_depth - 1)) - 1\n\n num_channels = data.shape[1] if len(data.shape) > 1 else 1\n scaled = np.int16(data / np.max(np.abs(data)) * max_sample_value)\n # The WAVE spec expects little-endian integers of \"sampwidth\" bytes each.\n # Numpy's `astype` accepts array-protocol type strings, so we specify:\n # - '<' to indicate little endian\n # - 'i' to specify signed integer\n # - the number of bytes used to represent each integer\n # See: https://numpy.org/doc/stable/reference/arrays.dtypes.html\n encoded_wav = scaled.astype(f'<i{bit_depth // 8}', copy=False).tobytes()\n\n with wave.open(buff, mode='wb') as waveobj:\n waveobj.setnchannels(num_channels)\n waveobj.setframerate(rate)\n waveobj.setsampwidth(bit_depth // 8)\n waveobj.setcomptype('NONE', 'NONE')\n waveobj.writeframes(encoded_wav)", "def write_wav(self, full_out_file = None):\n\n if full_out_file is None:\n \n (out_file, out_dir) = misc.save_file(FilterSpec='*.wav', DialogTitle='Write sound to ...', \n DefaultName='')\n full_out_file = os.path.join(out_dir, out_file)\n if full_out_file is None:\n print('Output discarded.')\n return 0\n else:\n full_out_file = os.path.abspath(full_out_file)\n (out_dir , out_file) = os.path.split(full_out_file)\n\n write(str(full_out_file), int(self.rate), self.data)\n print('Sounddata written to ' + out_file + ', with a sample rate of ' + str(self.rate))\n print('OutDir: ' + out_dir)\n \n return full_out_file", "def save_sound(filename,sound,sample_freq,num_channels):\n #open a wave file in write ('w') mode, this will create the file\n file=wave.open(filename,'w')\n #set the framerate aka sample frequency\n file.setframerate(sample_freq)\n #set the number of the channels\n file.setnchannels(num_channels)\n #the size of the one sample in bytes\n file.setsampwidth(2)\n #write the actual sound to the file, notice the call to get_raw\n file.writeframesraw(sound.get_raw())\n file.close()", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "def save_wav(filename, samples, rate=16000, width=2, channels=1):\n wav = wave.open(filename, 'wb')\n wav.setnchannels(channels)\n wav.setsampwidth(width)\n wav.setframerate(rate)\n wav.writeframes(samples)\n wav.close()", "def split_on_silence_threshold(wav_file, dest_dir):\n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")", "def save_sample(file_path, sampling_rate, audio):\n audio = (audio.numpy() * 32768).astype(\"int16\")\n write(file_path, sampling_rate, audio)", "def write_data_to_wav(self, file_name: str, data):\r\n # apply scale and convert to int16\r\n data = np.int16(data/np.max(np.abs(data)) * self.wav_scale)\r\n # write to file\r\n write(file_name, self.audio_sample_rate, data)\r\n print('Sound ', file_name, ' has been saved')", "def save_stereo(self, fname, master_volume=1.):\n\n if len(self.out_channels) > 2:\n print(\"Warning: sonification has > 2 channels, only first 2 will be used. See 'save_combined' method.\")\n \n # first pass - find max amplitude value to normalise output\n # and concatenate channels to list\n vmax = 0.\n channels = []\n for c in range(min(len(self.out_channels), 2)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n ) / master_volume\n channels.append(self.out_channels[str(c)].values)\n \n wav.write(fname, \n np.column_stack(channels),\n self.samprate, \n scale = (-vmax,vmax),\n sampwidth=3)\n \n print(\"Saved.\")", "def export_wav(self, folder, name_fmt=\"{:02d}.wav\", dtype=np.int16):\n data = np.atleast_2d(self.in_time)\n\n assert data.ndim == 2\n assert np.all(np.abs(data) <= 1.0)\n\n # convert and scale to new output datatype\n if dtype in [np.uint8, np.int16, np.int32]:\n lim_orig = (-1.0, 1.0)\n lim_new = (np.iinfo(dtype).min, np.iinfo(dtype).max)\n data = _rescale(data, lim_orig, lim_new).astype(dtype)\n elif dtype != np.float32:\n raise TypeError(f\"dtype {dtype} is not supported by scipy.wavfile.write.\")\n\n path = Path(folder)\n if not path.is_dir():\n path.mkdir(parents=True, exist_ok=False)\n\n for i in range(data.shape[0]):\n wavfile.write(path / name_fmt.format(i + 1), self.fs, data[i])", "def write(f, sr, x, normalized=False):\n channels = 2 if (x.ndim == 2 and x.shape[1] == 2) else 1\n if normalized: # normalized array - each item should be a float in [-1, 1)\n y = np.int16(x * 2 ** 15)\n else:\n y = np.int16(x)\n song = pydub.AudioSegment(y.tobytes(), frame_rate=sr, sample_width=2, channels=channels)\n song.export(f, format=\"mp3\", bitrate=\"64k\")", "def write_timeline_to_wav(output_path, data, sample_rate):\n\n scipy.io.wavfile.write(output_path, sample_rate, data.T)", "def save_combined(self, fname, ffmpeg_output=False, master_volume=1.):\n # setup list to house wav stream data \n inputs = [None]*len(self.out_channels)\n\n # first pass - find max amplitude value to normalise output\n vmax = 0.\n for c in range(len(self.out_channels)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n ) / master_volume\n \n print(\"Creating temporary .wav files...\")\n \n for c in range(len(self.out_channels)):\n tempfname = f\"./.TEMP_{c}.wav\"\n wav.write(tempfname, \n self.out_channels[str(c)].values,\n self.samprate, \n scale = (-vmax,vmax),\n sampwidth=3)\n inputs[self.channels.forder[c]] = ff.input(tempfname)\n \n print(\"Joning temporary .wav files...\")\n (\n ff.filter(inputs, 'join', inputs=len(inputs), channel_layout=self.channels.setup)\n .output(fname)\n .overwrite_output()\n .run(quiet=~ffmpeg_output)\n )\n \n print(\"Cleaning up...\")\n for c in range(len(self.out_channels)):\n os.remove(f\"./.TEMP_{c}.wav\")\n \n print(\"Saved.\")", "def save_wavetables(self, path: str, filename_prefix: str = '') -> None:\n for i in range(len(self.wavetables)):\n if not os.path.exists(path):\n os.mkdir(path)\n location = os.path.join(path, filename_prefix + f'{i:02d}.wav')\n wav_file = WavFile(location)\n wav_file.write_samples(self.wavetables[i])", "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def save(self, name):\n try:\n os.mkdir(os.path.join(self.dbpath, name))\n except:\n pass\n\n wf = wave.open(os.path.join(self.dbpath, name, str(uuid.uuid4()) + \".wav\"), 'wb')\n wf.setnchannels(self.CHANNELS)\n wf.setsampwidth(self.p.get_sample_size(self.FORMAT))\n wf.setframerate(self.RATE)\n wf.writeframes(b''.join(list(self.frames)))\n wf.close()", "def export_wav(\n filename_wav: Path,\n tradb: vae.io.TraDatabase,\n channel: int,\n time_start: Optional[float] = None,\n time_stop: Optional[float] = None,\n decimation_factor: int = 1,\n):\n y, fs = tradb.read_continuous_wave(\n channel=channel,\n time_start=time_start,\n time_stop=time_stop,\n time_axis=False,\n show_progress=False,\n raw=True, # read as ADC values (int16)\n )\n\n if decimation_factor > 1:\n y = signal.decimate(y, decimation_factor).astype(np.int16)\n fs //= decimation_factor\n\n wavfile.write(filename_wav, fs, y)", "def save_secured_song_to_wave(self, file_location):\n protected_wav = wave.open(os.path.abspath(file_location), 'wb')\n protected_wav.setnchannels(self.original_song.getnchannels())\n protected_wav.setsampwidth(self.original_song.getsampwidth())\n protected_wav.setframerate(self.original_song.getframerate())\n protected_wav.writeframes(self.metadata)\n\n for val in self.full_song:\n protected_wav_val = struct.pack('<h', val)\n protected_wav.writeframesraw(protected_wav_val)\n\n protected_wav.close()", "def wavwrite(y, fs, filename):\n \n x = copy.deepcopy(y) # copy array\n x *= INT16_FAC # scaling floating point -1 to 1 range signal to int16 range\n x = np.int16(x) # converting to int16 type\n wavfile.write(filename, fs, x)", "def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))", "def wavwrite(y, fs, filename):\n\n\tx = copy.deepcopy(y) # copy array\n\tx *= INT16_FAC # scaling floating point -1 to 1 range signal to int16 range\n\tx = np.int16(x) # converting to int16 type\n\twrite(filename, fs, x)", "def save_to_file(\n sources,\n codec='wav', audio_adapter=ffmpeg.FFMPEGProcessAudioAdapter(),\n bitrate='128k', synchronous=True):\n\n # filename = \"chengdu.mp3\"\n pool = Pool()\n tasks = []\n for instrument, data in sources.items():\n path = \"./out/\"+instrument + \".\" + codec\n\n if pool:\n task = pool.apply_async(audio_adapter.save, (\n path,\n data,\n 44100,\n codec,\n bitrate))\n tasks.append(task)\n else:\n audio_adapter.save(path, data, 44100, codec, bitrate)\n if synchronous and pool:\n while len(tasks) > 0:\n task = tasks.pop()\n task.get()\n task.wait(timeout=200)", "def output_beat_to_file(file_name, e):\n print(\"Writing to file:\", file_name)\n routine = gp.compile(e,pset)\n with open(file_name+\".raw\",'w') as f:\n for t in range(200000):\n f.write(chr(int(routine(t+1))%256))\n # Now convert to wav\n subprocess.call(SOX_COMMAND + \" \" + file_name + \".raw\" + \" \" + file_name + \".wav\", shell=True)\n subprocess.call(LAME_COMMAND + \" \" + file_name + \".wav\", shell=True)", "def wavwrite(fname, Fs, xt):\n # convert to np.int16 data type\n xt = np.array((2**15-1)*xt, np.int16)\n sio_wav.write(fname, Fs, xt)", "def save_wfm(self, source, dest):\n self.bus.write('SAV:WAVE %s,%s' % (source, dest))", "def write_wav(filename, data, rate = 44100):\r\n \r\n # Compress the data (the input format is likely to be float64)\r\n # Make sure that the format is readable by Librosa\r\n maxv = np.iinfo(np.int16).max\r\n lb_write_wav(filename, (data * maxv).astype(np.int16), rate) \r\n \r\n return(None)", "def write_wave(path, audio, sample_rate):\n with contextlib.closing(wave.open(path, 'wb')) as wf:\n wf.setnchannels(1)\n wf.setsampwidth(2)\n wf.setframerate(sample_rate)\n wf.writeframes(audio)", "def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()", "def write_sound(file, snds: Property, pack_list, snd_prefix='*'):\n if snds.has_children():\n file.write('\"rndwave\"\\n\\t{\\n')\n for snd in snds:\n file.write(\n '\\t\"wave\" \"{sndchar}{file}\"\\n'.format(\n file=snd.value.lstrip(SOUND_CHARS),\n sndchar=snd_prefix,\n )\n )\n pack_list.add('sound/' + snd.value.casefold())\n file.write('\\t}\\n')\n else:\n file.write(\n '\"wave\" \"{sndchar}{file}\"\\n'.format(\n file=snds.value.lstrip(SOUND_CHARS),\n sndchar=snd_prefix,\n )\n )\n pack_list.add('sound/' + snds.value.casefold())", "def save_wav(data, file_path, sample_rate):\n if np.issubdtype(data.dtype, np.floating):\n data = data.astype(np.float32)\n elif data.dtype not in [np.int32, np.int16, np.uint8]:\n raise ValueError(f'wavfile data must be np.float*, np.int32, np.int16, or np.uint8, got {data.dtype}')\n\n wavfile.write(file_path, sample_rate, data)", "def writesilence(self, duration):\n samples = int(self.sample_rate * duration)\n values = []\n for i in range(0, samples):\n signal = wave.struct.pack('h', 0)\n values.append(signal)\n # Buffer values every 5 seconds (22050 samples)\n if len(values) >= 220500:\n value_string = \"\".join(values)\n self.file.writeframes(value_string)\n # Clear values array\n del values[0:len(values)]\n value_string = \"\".join(values)\n self.file.writeframes(value_string)", "def raw_to_wav(data, path, rate=44100):\n wavfile.write(path, rate, data)", "def write_audio_segment(self, data):\n cache_name = self.CACHE_FILE_NAME + str(time.time()) + '.wav'\n file = open(cache_name, \"wb\")\n file.write(data)\n file.close()\n return cache_name", "def record_audio_to_file(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()", "def make_waves(wave_array, filename: str, num_cycle=1):\n sampleRate = 44100.0 # hertz\n duration = 1.0 # seconds\n frequency = 440.0 # hertz\n obj = wave.open(filename, 'w')\n obj.setnchannels(1) # mono\n obj.setsampwidth(2)\n obj.setframerate(sampleRate)\n waves = list(wave_array)\n for w in range(num_cycle):\n for i in waves:\n value = i\n data = struct.pack('<h', int(value))\n obj.writeframesraw(data)\n obj.close()", "def save_to_file(samps, filename, save_as_numpy):\n with open(filename, 'wb') as out_file:\n if save_as_numpy:\n np.save(out_file, samps, allow_pickle=False, fix_imports=False)\n else:\n samps.tofile(out_file)", "def test_wav_multiple_channels(self, dtype, sample_rate, num_channels):\n duration = 1\n path = self.get_temp_path(\"data.wav\")\n data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)\n save_wav(path, data, sample_rate)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)\n assert info.encoding == get_encoding(\"wav\", dtype)", "def play(sampler, name=\"/Users/Jxie0755/Documents/DXcodings/Learning_Python/CS_61A/week03/mario.wav\", seconds=2):\n out = open(name, \"wb\")\n out.setnchannels(1)\n out.setsampwidth(2)\n out.setframerate(frame_rate)\n t = 0\n while t < seconds * frame_rate:\n sample = sampler(t)\n out.writeframes(encode(sample))\n t = t + 1\n out.close()", "def _to_wav(self):\n self._status = 0\n fname = fm.file2wav(self.get_filename()) \n if fname != self.get_filename(): # can change the name\n self._set_filename(fname) # in case of wave transcoding\n self._status = 1", "def saveSong(song, filename, append = 1):\n if append:\n mode = \"w+\"\n else:\n mode = \"w\"\n fp = open(filename, mode) # will append it if it exists\n if type(song) in [list]:\n for tup in song:\n if len(tup) == 2:\n f, d = tup\n fp.write(\"%s %s\\n\" % (_getNoteFromFrequency(f), d))\n elif len(tup) == 3:\n f1, f2, d = tup\n fp.write(\"%s %s %s\\n\" % (_getNoteFromFrequency(f),\n _getNoteFromFrequency(f), d))\n else: # string\n song = song.replace(\"\\n\", \";\")\n lines = song.split(\";\")\n for line in lines:\n fp.write(line + \"\\n\")\n fp.close()", "def write_audio_to_file(audio: torch.Tensor, sample_id: str = ''):\n global FS_HZ\n assert FS_HZ is not None\n audio_extension = '.wav'\n audio_path = upload_directory + 'sample' + sample_id + audio_extension\n audio_np = audio.cpu().numpy()\n with open(audio_path, 'wb') as f:\n soundfile.write(f,\n audio_np,\n samplerate=FS_HZ)\n return audio_path", "def save_mp3(ndarray, sr, feature_name, out_path, x, y, new_labels, mp3_filename=None):\n import soundfile as sf\n\n def _save_mp3(source_path, out_path):\n cmd = [\n 'lame',\n '--preset',\n 'insane',\n str(source_path),\n str(out_path)\n ]\n errno = subprocess.call(cmd)\n if errno:\n print('{} encoding failed with code'.format(source_path), end=' ')\n print(errno)\n print('skipping...')\n return errno\n os.remove(source_path)\n return 0\n\n # this is kind-of standard\n if mp3_filename is None:\n mp3_filename = FeatureExtractor.get_file_name(x, feature_name, 'mp3')\n wav_filename = mp3_filename.replace('mp3', 'wav')\n sf.write(str(out_path / wav_filename), ndarray, sr) # write wav file\n errno = _save_mp3(out_path / wav_filename,\n out_path / mp3_filename) # load wav, encode as mp3 and remove wav file\n if errno:\n # if any error, then keep wav\n filename = wav_filename\n else:\n # non-error clause, then it was successfully exported to mp3\n filename = mp3_filename\n if new_labels is not None:\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename", "def snip(filename,s,e,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n st=int(s*44100)\n en=int(e*44100)\n data_s=data[st:en,:]\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_snipped.wav',data_s,sr,'PCM_16')\n print('Done!')\n return data_s", "def write_wave(data, samp_rate, file):\n if data.dtype != np.float16:\n assert(data.dtype in [np.float32, np.float64])\n if (len(data.shape) < 2 or data.shape[0] > data.shape[1] or\n not data.dtype in [np.float32, np.float64]):\n raise ValueError(\"Input audio had unexpected type or shape or dtype: {},{}\"\n .format(data.shape, data.dtype))\n max_val = data.max() * 32768.0\n min_val = data.min() * 32768.0\n\n truncation_scale = 1.0\n if max_val > 32767.0:\n # The + 0.1 below is a small offset to prevent roundoff causing\n # wrap-around errors.\n truncation_scale = 32767.0 / (max_val + 0.1)\n if min_val < -32768.0:\n s = 32768.0 / (-min_val + 0.1);\n if s > truncation_scale:\n truncation_scale = s\n scale = 32768.0 * truncation_scale\n data = np.rint(data * scale).astype(np.int16)\n data = data.swapaxes(0, 1)\n file = file_utils.open_or_fd(file, \"w\", encoding=None)\n wavio.write(file, data, samp_rate, scale='none')\n file.close()", "def convert_to_wav(fin, fout):\n temp = subprocess.run([\"ffmpeg\",\n \"-i\", \n fin, \n fout], \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)", "def to_voice(item):\r\n item.seek(0)\r\n item = AudioSegment.from_file(item)\r\n m = io.BytesIO()\r\n m.name = \"voice.ogg\"\r\n item.split_to_mono()\r\n dur = len(item) / 1000\r\n item.export(m, format=\"ogg\", bitrate=\"64k\", codec=\"libopus\")\r\n m.seek(0)\r\n return m, dur", "def test_wav(self, dtype, sample_rate, num_channels):\n duration = 1\n path = self.get_temp_path(\"data.wav\")\n data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)\n save_wav(path, data, sample_rate)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)\n assert info.encoding == get_encoding(\"wav\", dtype)", "def morse_to_audio(words, playsound=None, name_file=\"output\\\\code_to_audio_output.wav\"):\n dot = wave.open(\"kropka.wav\", 'rb')\n dash = wave.open(\"kreska.wav\", 'rb')\n\n rate_dot = dot.getframerate()\n\n rate_dash = dash.getframerate()\n\n data_dot = dot.readframes(-1)\n data_dash = dash.readframes(-1)\n data_dot = np.fromstring(data_dot, 'Int16')\n data_dash = np.fromstring(data_dash, 'Int16')\n\n l2=len(data_dot)\n l1=len(data_dash)\n\n output=[]\n\n for element in words:\n # print(element)\n for i in range(0, len(element)):\n # print(element[i])\n if element[i] == '1':\n # playsound(\"kropka.wav\")\n output.extend(data_dot)\n\n if element[i] == '0':\n # playsound(\"kreska.wav\")\n output.extend(data_dash)\n if element[i] == ' ':\n output.extend(np.zeros(int(len(data_dash)))*3)\n if i != len(element) - 1:\n # time.sleep(dl_kropka)\n output.extend(np.zeros(int(len(data_dot))))\n else:\n continue\n # time.sleep(dl_kreska)\n output.extend(np.zeros(int(len(data_dash))))\n\n # print(output)\n\n wynik=np.asarray(output)\n\n wynik=np.array(wynik).astype('int16')\n\n wav.write(name_file, rate_dash, wynik)\n\n #plik sie nie odtwarza w windowsie ale w audacity jest już wyraźnym szumem XD\n\n dot.close()\n dash.close()", "def split_multiple_recordings_file(file_path, min_silence_duration=0.25, noise_threshold=150):\n print(file_path)\n rate, audio = scipy.io.wavfile.read(file_path)\n split_recordings = split_multiple_recordings(audio, min_silence_duration=min_silence_duration,\n noise_threshold=noise_threshold, sample_rate_hz=rate)\n\n if file_path.count('.') != 1:\n raise Exception('File_path must contain exactly one period, usually in extension. IE: /home/test.wav')\n\n for idx, recording in enumerate(split_recordings):\n print(\"spliting \" + file_path)\n new_file_path = file_path.split('.')[0] + '_' + str(idx) + \".wav\"\n scipy.io.wavfile.write(new_file_path, rate, recording)", "def segment_audio(filename, y_value, split='train', clf='gender'):\n\n filepath = 'recordings/recordings/' + filename + '.mp3'\n audio, sr = librosa.load(filepath, sr=16000)\n audio = normalize(audio)\n\n # Add gender label to filename for later processing\n sex = y_value\n if sex == 'female':\n filename = '{}.F'.format(filename)\n else: filename = '{}.M'.format(filename)\n\n # Segment audio file\n seg_files = segment_10s(audio, sr)\n\n for key, val in seg_files.items():\n new_name = '{}.{}'.format(filename, key)\n sf.write('data/{}/{}/{}o.wav'.format(clf, split, new_name), val, sr)", "def audio_file_save(folder_path, current_time, data, name_by_date):\r\n\r\n name_by_time = current_time + '.wav' #timestamp for the audio file name\r\n usage = disk_usage(folder_path)\r\n if usage.used / usage.total < args.storage_threshold:\r\n file_path = os.path.join(folder_path, name_by_time)\r\n\r\n if args.resampling:\r\n sampling_rate = args.resampling_rate\r\n audio = audio_resampling(data)\r\n else:\r\n sampling_rate = args.recording_samplerate\r\n audio = data\r\n\r\n sf.write(file_path , audio, sampling_rate)\r\n\r\n else:\r\n name = os.path.join(folder_path, name_by_date + '.txt')\r\n f = open(name, 'a')\r\n f.write(current_time + '\\t Activity Detected \\n')\r\n f.close()", "def record_audio_to_file_and_get_wav(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()\n return WavFile(samples=frames, sample_width=sample_width, time=time, word=file_name)", "def convert_wav(src_wav, dst_wav, subtype='PCM_16'):\n assert os.path.exists(src_wav), \"{} not exists!\".format(src_wav)\n data, sr = soundfile.read(src_wav)\n soundfile.write(dst_wav, data, sr, subtype=subtype)", "def output_wave_file(predicted_mfccs, filename):\n global eng\n predicted_mfccs_transposed = np.transpose(predicted_mfccs)\n\n\n # MFCC features need to be a numpy array of shape (num_coefficients x num_frames) in order to be passed to the invmelfcc function\n inverted_wav_data = eng.invmelfcc(matlab.double(predicted_mfccs_transposed.tolist()), 16000.0, 25, 100.0, 0.005, 0.005)\n\n inverted_wav_data = np.squeeze(np.array(inverted_wav_data))\n\n # scales the waveform to be between -1 and 1\n maxVec = np.max(inverted_wav_data)\n minVec = np.min(inverted_wav_data)\n inverted_wav_data = ((inverted_wav_data - minVec) / (maxVec - minVec) - 0.5) * 2\n\n wav.write(filename + '.wav', 16000.0, inverted_wav_data)", "def mp3_to_wav(show_progress=True):\n\n # Define a devnull var to supress subprocess output\n devnull = open(os.devnull, 'w')\n\n # Get a list of the filepath for each of the mp3 files in each subdirectory of data/fma_small\n file_list = glob.glob('./../data/fma_small/*/*.mp3')\n\n # Get the number of files N and initialize a counter\n N = len(file_list)\n counter = 0\n\n # For each file/filepath, convert that file to wav format and save it to data/wavs/*/*.wav (so as a wave file)\n for filepath in file_list:\n\n # Every 100 file conversions, print a progress update\n if counter % 50 == 49 and show_progress:\n progress = str(round(100 * counter / N, 2))\n print('File conversion ' + progress + '% complete.')\n\n # Get the file name from the path and define a new path for the wav file\n file_name = filepath[24:-4]\n new_path = './../data/wavs/' + file_name + '.wav'\n\n # Call the subprocess using ffmpeg to convert the file to wav format (and supress all the output)\n subprocess.call(['ffmpeg', '-i', filepath, new_path], stdout=devnull)\n\n # Increment the counter\n counter += 1", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def save_audio(ndarray, feature_name, out_path, x, y, new_labels, filename=None, sr=SR):\n # this is kind-of standard\n filename = filename or FeatureExtractor.get_file_name(x, feature_name, 'wav')\n librosa.output.write_wav(out_path / filename, ndarray, sr=sr, norm=True)\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename", "def generate_waveform(self, mel, normalize=True, batched=True,\n target=8000, overlap=800, do_save_wav=True):\n wav = self.vocoder_manager.infer_waveform(mel,\n normalize=normalize,\n batched=batched,\n target=target,\n overlap=overlap,\n do_save_wav=do_save_wav\n )\n return wav", "def export_sounds(names, path, base_label='Sound_'):\n\tfor filename, output in dump_sounds(names, base_label):\n\t\twith open(os.path.join(path, filename), 'w') as out:\n\t\t\tout.write(output)", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def encode_audio(in_file, out_file):\r\n # construct the encoder\r\n autoencoder = keras.models.load_model(\"audio_autoencoder.model\")\r\n in_layer = keras.layers.Input(shape=(416, 1))\r\n encode = autoencoder.layers[1](in_layer)\r\n encode = autoencoder.layers[2](encode)\r\n encode = autoencoder.layers[3](encode)\r\n encode = autoencoder.layers[4](encode)\r\n encode = autoencoder.layers[5](encode)\r\n encode = autoencoder.layers[6](encode)\r\n encode = autoencoder.layers[7](encode)\r\n encode = autoencoder.layers[8](encode)\r\n encode = autoencoder.layers[9](encode)\r\n encode = autoencoder.layers[10](encode)\r\n encode = autoencoder.layers[11](encode)\r\n encode = autoencoder.layers[12](encode)\r\n encoder = keras.models.Model(in_layer, encode)\r\n\r\n # Read the file\r\n samp_rate, data = wavfile.read(in_file)\r\n # check if the file is mono or stereo\r\n if len(data.shape) == 2:\r\n data = np.concatenate(data)\r\n chans = 2\r\n else:\r\n chans = 1\r\n\r\n # Rescale integer samples over range [-32768,32767] to floats over range [0.0,1.0]\r\n data = data.astype('float32') / float(pow(2, 15))\r\n data += 1.0\r\n data = data / 2.0\r\n\r\n # Pad the samples with zeroes, if needed, to make the last encoding frame full\r\n padded = np.pad(data, (0, 416 - (len(data) % 416)), 'constant')\r\n\r\n # Construct input layer\r\n inputs = padded.reshape(len(padded) // 416, 416, 1)\r\n\r\n # Encode the data\r\n encoded = encoder.predict(inputs)\r\n\r\n # Save the encoded data, as well as the important parameters\r\n np.savez_compressed(out_file, data=encoded, rate=samp_rate, Type=1, channels=chans)", "def play_wav_on_index(audio_data, stream_object):\n\n stream_object.write(audio_data)", "def save_all_chunks_with_labels(audio_dir, json_dir, csv_dir):\n for file in os.listdir(json_dir):\n file_path = os.path.join(json_dir, file)\n audio_file_path = os.path.join(audio_dir, file)[:-4] + \"wav\"\n with open(file_path) as f:\n data = json.load(f)\n save_arrays_with_labels(audio_file_path, data, csv_dir)", "def save_song(self):\n if self.is_stream:\n self.save_song_from_stream()\n else:\n self.save_song_from_file()", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def convert_to_wav(mp3_filename):\n\n wav_filename = mp3_filename[:-4] + \".wav\"\n complete_mp3FileName = os.path.join(MP3_FOLDER, mp3_filename)\n complete_wavFileName = os.path.join(WAV_FOLDER, wav_filename)\n\n mp3_file = AudioSegment.from_mp3(complete_mp3FileName)\n mp3_file.export(complete_wavFileName, format=\"wav\")\n\n print(f\"The mp3 file {complete_mp3FileName} was successfully converted to \" \\\n + f\"the wav file {complete_wavFileName}.\")", "def _writeWiggle(trackName, trackDescription, allCounts, wigOut, win=1):\n wigFile = open(wigOut, \"w\")\n wigFile.write(\"track type=wiggle_0 name='%s' description='%s' visibility=2\\n\" % (trackName,\n trackDescription))\n for name in allCounts.keys():\n start = 0\n end = max(allCounts[name].keys())\n\n wigFile.write(\"fixedStep chrom=%s start=%s step=%s span=%s\\n\" % (name, start+1, win, win))\n\n for i in range(start, end):\n if allCounts[name].has_key(i):\n curValue = allCounts[name][i]\n else:\n curValue = 0\n wigFile.write(\"%s\\n\" % (curValue))\n\n wigFile.close()", "def _record_wav(stream, N, CHUNK):\n frames = []\n for i in range(N):\n data = stream.read(CHUNK)\n frames.append(data)\n return np.fromstring(b\"\".join(frames), 'Int16')", "def save_to_file(filename: str, sequence: List[Sample]):\n\n with open(get_path() + \"/sequence/\" + filename, \"ab+\") as file:\n for sample in sequence:\n pickle.dump(sample, file, pickle.HIGHEST_PROTOCOL)", "def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)", "def write( self, strFilename, bAsRawFile = False, bQuiet = False ):\n if( len( self.data ) < 1 ):\n logging.warning( \"Wav is empty, NOT saving it (to '%s').\" % (strFilename) ) \n return False\n timeBegin = time.time()\n file = open( strFilename, \"wb\" )\n if( not bAsRawFile ):\n self.writeHeader( file )\n self.writeData( file, bAddBeginOfDataChunk = not bAsRawFile )\n file.close()\n rDuration = time.time() - timeBegin\n if not bQuiet: logging.info( \"sound.Wav: successfully saved wav to '%s', duration: %5.3fs, datasize: %d (saving takes %5.3fs)\" % (strFilename, self.rDuration, self.nDataSize, rDuration) )\n return True", "def storeWavelengths(self, nm):\n pre = \"w,0\"\n d = {\"wavelength_nm\": list(nm)}\n self._writeline(pre, str(d))", "def save_bin(words,data,fname):\n\n out=open(fname,\"wb\")\n\n rows,dims=data.shape\n out.write(\"{} {}\\n\".format(rows,dims).encode(\"utf-8\"))\n counter=0\n\n for i,w in enumerate(words):\n out.write(w.encode(\"utf-8\"))\n out.write(\" \".encode(\"utf-8\"))\n out.write(struct.pack(\"{}f\".format(dims),*data[i,:]))\n counter+=1\n \n out.close()\n print(\"Model saved to\",fname,file=sys.stderr)", "def do_wave(l, wave_type, r, g, b, duration, repeat):\n command = create_wave_command(\n wave_type, r, g, b, duration, repeat\n )\n l.write(command)", "def cut_audio(old_path, new_path, start, end):\r\n fs, data = wavfile.read(old_path)\r\n indx_start = int(start*fs)\r\n indx_end = int(end*fs)+1\r\n wavfile.write(new_path,fs,data[indx_start:indx_end])\r\n\r\n return True", "def write_data(infbfile,begin_N,dur_N,outfbfile):\n infbfile.seek_to_sample(begin_N)\n for i in range(begin_N,(begin_N+dur_N)):\n data = infbfile.read_sample()\n data.tofile(outfbfile)", "def fillSongsArray():\r\n counter = 1\r\n notealt = 0.0\r\n frequenz = 0\r\n notencounter = 0\r\n\r\n file2write.write(\"\\n{\")\r\n for instrument in midi_data.instruments:\r\n while counter == 1:#first line of the instrument e.g piano it will only save the treble clef and NOT the bass clef\r\n for note in instrument.notes:\r\n if note.start - notealt >= 0.15: #If the note is a break it will save it as such\r\n value = dauer/((note.start - notealt)*1000)\r\n y = round(value)\r\n file2write.write(\"{0,\")\r\n file2write.write(str(y+1))\r\n file2write.write(\"},\")\r\n\r\n else:\r\n frequenz = int(pretty_midi.note_number_to_hz(note.pitch)) #convert the midi-note-number to a frequency with function of the library\r\n value = dauer/((note.end - note.start)*1000) #calculates the duration of the note\r\n x = round(value)\r\n file2write.write(\"{\")\r\n file2write.write(str(frequenz))\r\n file2write.write(\",\")\r\n file2write.write(str(x))\r\n file2write.write(\"},\")\r\n notealt = note.end\r\n counter += 1\r\n file2write.write(\"},\")\r\n #file2write.write(\"};\\n\")\r", "def on_stop(self):\n self.songs.save_songs(FILE_NAME)", "def create_audio_file():\n # Get the response from boto3\n raw_audio = generate_audio()\n # pull the Audiostream object from the response from boto3\n raw_audio = raw_audio[\"AudioStream\"]\n # create output location\n # process the whole block\n with closing(raw_audio) as audio:\n with open(\"output_audio.mp3\", \"wb\") as file:\n file.write(raw_audio.read())", "def GenerateSinewav(self, dut_file_path, channel, wav_duration):\n with file_utils.UnopenedTemporaryFile(suffix='.wav') as file_path:\n cmd = audio_utils.GetGenerateSineWavArgs(file_path, channel,\n _DEFAULT_FREQ_HZ, wav_duration)\n process_utils.Spawn(cmd.split(' '), log=True, check_call=True)\n self._dut.link.Push(file_path, dut_file_path)", "def split_diphones(wav_path, outdir=None):\n tg = ml.parsing.textgrid_reader.read(textgrid_path(wav_path))\n word = os.path.splitext(os.path.basename(wav_path))[0]\n\n wav_dir = os.path.dirname(wav_path)\n diphones_dir = os.path.join(wav_dir, \"diphones\")\n\n if not os.path.exists(diphones_dir):\n os.mkdir(diphones_dir)\n\n wav = AudioSegment.from_file(wav_path)\n for (begin, end, diphone) in tg[u'phones']:\n diphone = diphone.strip().replace(\"-\", \"_\")\n if len(diphone) > 0 and diphone[0] != \".\":\n diphone_file = \"{}_{}.wav\".format(diphone, word)\n diphone_path = os.path.join(diphones_dir, diphone_file)\n\n # Works in milliseconds\n segment = wav[(begin * 1000):(end * 1000)]\n print(\"Saving {} ({} - {})\".format(diphone_path, begin, end))\n segment.export(diphone_path, format=\"wav\")\n elif diphone[0] == \".\":\n print(\"skipping {}\".format(diphone))", "def set_audio_sink(core, filenameOrHandle):\n\tres = wave.open(filenameOrHandle, \"wb\")\n\tres.setnchannels(2)\n\tres.setsampwidth(2)\n\tres.setframerate(SNES_OUTPUT_FREQUENCY)\n\tres.setcomptype('NONE', 'not compressed')\n\n\tdef audio_sample(left, right):\n\t\t# We can safely use .writeframesraw() here because the header will be\n\t\t# corrected once we call .close()\n\t\tres.writeframesraw(sndstruct.pack(left, right))\n\n\tcore.set_audio_sample_cb(audio_sample)\n\n\treturn res", "def record_audio(self):\n stream = self.audio.open(format=DEFAULT_FORMAT,\n channels=DEFAULT_CHANNELS,\n rate=DEFAULT_RATE,\n input=True,\n frames_per_buffer=DEFAULT_CHUNK_SIZE)\n\n print(\"Recording...\")\n\n for i in range(0, int(DEFAULT_RATE / DEFAULT_CHUNK_SIZE * RECORD_SECONDS)):\n data = stream.read(DEFAULT_CHUNK_SIZE)\n self.frames.append(data)\n\n print(\"Done.\")\n\n stream.stop_stream()\n stream.close()", "def convert_to_wav (filename, name, origpath, wavpath, mono):\n print(\"Converting {0} to .wav...\".format(filename))\n if not re.match(r\".*_\\d+$\",name):\n # If filenames do include video titles\n name = name.rsplit('_',1)[0]\n\n channel, vid_num = name.rsplit('_', 1)\n channel = re.sub(r'[^A-Za-z1-9]', '', channel)\n newname = '_'.join([channel, vid_num])\n\n exportname = newname + \".wav\"\n filepath = path.join(origpath, filename)\n\n if not path.exists(wavpath):\n makedirs(wavpath)\n exportPath = path.join(wavpath, exportname)\n sound = AudioSegment.from_file(filepath,\"mp4\")\n if mono == True:\n sound = sound.set_channels(1)\n sound.export(exportPath, format=\"wav\")", "def write_WFs(folder, filename_prefix='', indices=None, WFs=None, extension=settings.HEAVY_AUDIO_WRITE_EXTENSION, sample_rate=settings.DEFAULT_SAMPLE_RATE):\n if WFs is None:\n raise ValueError('save_WFs expects either a WF list or 2-D np array')\n\n is_array = isinstance(WFs, np.ndarray)\n if is_array:\n n_WFs = WFs.shape[0]\n else:\n n_WFs = len(WFs)\n\n if indices is None:\n indices = range(n_WFs)\n if len(indices) != n_WFs:\n raise ValueError('save_WFs expects as many indices as there are WFs')\n\n file_names = []\n file_paths = []\n for i in indices:\n file_name = '{0}_{1}.{2}'.format(filename_prefix, rank_4_audacity(i), extension)\n file_path = '{0}/{1}'.format(folder, file_name)\n if is_array:\n sf.write(file_path, WFs[i, :], sample_rate)\n else:\n sf.write(file_path, WFs[i], sample_rate)\n file_names.append(file_name)\n file_paths.append(file_path)\n\n return file_paths, file_names", "def wavPlayer(data, rate, scale=False, autoplay=False):\r\n #if np.max(abs(data)) > 1 or scale:\r\n # data = data/np.max(abs(data))\r\n #data = (2**13*data).astype(np.int16)\r\n \r\n buffer = BytesIO()\r\n buffer.write(b'RIFF')\r\n buffer.write(b'\\x00\\x00\\x00\\x00')\r\n buffer.write(b'WAVE')\r\n \r\n buffer.write(b'fmt ')\r\n if data.ndim == 1:\r\n noc = 1\r\n else:\r\n noc = data.shape[1]\r\n \r\n bits = data.dtype.itemsize * 8\r\n sbytes = rate*(bits // 8)*noc\r\n ba = noc * (bits // 8)\r\n buffer.write(struct.pack('<ihHIIHH', 16, 1, noc, rate, sbytes, ba, bits))\r\n\r\n # data chunk\r\n buffer.write(b'data')\r\n buffer.write(struct.pack('<i', data.nbytes))\r\n\r\n if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'):\r\n data = data.byteswap()\r\n\r\n buffer.write(data.astype(np.int16).tostring())\r\n\r\n # Determine file size and place it in correct position at start of the file.\r\n size = buffer.tell()\r\n buffer.seek(4)\r\n buffer.write(struct.pack('<i', size-8))\r\n \r\n val = buffer.getvalue()\r\n autoplay = \" autoplay=\\\"autoplay\\\"\"*autoplay + \"\"\r\n \r\n src = \"\"\"<audio controls=\"controls\" style=\"width:600px\"{autoplay}>\r\n <source controls src=\"data:audio/wav;base64,{base64}\" type=\"audio/wav\" />\r\n Your browser does not support the audio element.\r\n </audio>\"\"\".format(base64=base64.b64encode(val).decode(\"ascii\"), autoplay=autoplay)\r\n display(HTML(src))", "def rec_one_shot(self, sec, file_name=None):\n self.__open_noncallback_stream()\n frames = []\n for i in range(int(self.RATE / self.CHUNK * sec)):\n data = self.stream.read(self.CHUNK)\n data = np.fromstring(data, dtype=np.int16)\n frames.append(data)\n self.stream.stop_stream()\n if file_name is not None:\n with wave.open(file_name, 'wb') as wav_file:\n wav_file.setnchannels(self.CHANNELS)\n wav_file.setsampwidth(self.recorder.get_sample_size(self.FORMAT))\n wav_file.setframerate(self.RATE)\n wav_file.writeframes(b''.join(frames))\n frame = np.concatenate(frames, 0)\n self.stop_streaming()\n return frame", "def save_waveform(self, chan=None):\n t, y, pre = self.device.retrieve_current_waveform()\n meta = self.create_meta()\n if chan != None:\n meta['Channel'] = chan\n for name in pre:\n meta[name] = pre[name]\n data = {'meta' : meta,\n 't' : t,\n 'y' : y}\n rsp = daq.Rsp('save', data, meta)\n self.shot += 1\n self.r_queue.put(rsp)", "def save_timit_pitch():\n timit_names = []\n pitch_intensity_tables = []\n\n wav_txt_file_names = glob.glob(os.path.join(timit_pitch_data_path, '*.wav.txt'))\n for wav_txt_file in wav_txt_file_names:\n pitch_intensity = pd.read_csv(wav_txt_file, delimiter='\\t', dtype=np.float64, na_values=['?'])\n pitch_intensity = pitch_intensity.dropna()\n pitch_intensity.loc[pitch_intensity.pitch == 0, 'pitch'] = np.NaN\n pitch_intensity.loc[pitch_intensity.intensity == 0, 'intensity'] = np.NaN\n pitch_intensity['log_hz'] = np.log(pitch_intensity['pitch'])\n pitch_intensity['erb_rate'] = convert_hz(pitch_intensity['pitch'], \"erb\")\n pitch = pitch_intensity['log_hz']\n pitch_intensity['rel_pitch_global'] = (pitch - np.mean(pitch))/np.std(pitch)\n pitch = pitch_intensity['erb_rate']\n pitch_intensity['rel_pitch_global_erb'] = (pitch - np.mean(pitch))/np.std(pitch)\n\n timit_name = wav_txt_file.split(os.sep)[-1][:-8]\n\n timit_names.append(timit_name)\n pitch_intensity_tables.append(pitch_intensity)\n\n timit_pitch = pd.concat(pitch_intensity_tables, keys=timit_names)\n #print(np.mean(timit_pitch['log_hz'])) # -> 4.9406, (no log: 147.0387)\n #print(np.std(timit_pitch['log_hz'])) # -> 0.3112, (no log: 48.59846)\n timit_pitch['abs_pitch'] = (timit_pitch['log_hz'] - np.mean(timit_pitch['log_hz']))/np.std(timit_pitch['log_hz'])\n timit_pitch['abs_pitch_erb'] = (timit_pitch['erb_rate'] - np.mean(timit_pitch['erb_rate']))/np.std(timit_pitch['erb_rate'])\n timit_pitch['abs_pitch_change'] = timit_pitch['abs_pitch'].diff()\n timit_pitch['abs_pitch_erb_change'] = timit_pitch['abs_pitch_erb'].diff()\n #print(np.mean(timit_pitch.intensity)) # -> 63.000\n #print(np.std(timit_pitch.intensity)) # -> 15.537\n timit_pitch['zscore_intensity'] = (timit_pitch.intensity - np.mean(timit_pitch.intensity))/np.std(timit_pitch.intensity)\n\n filename = os.path.join(processed_timit_data_path, 'timit_pitch.h5')\n timit_pitch.to_hdf(filename, 'timit_pitch')\n return timit_pitch", "def writeMIDI(filepath,data):\n\n\n\twith open(filepath,\"wb\") as f:\n\n\t\t## Writing the MIDI file header\n\t\tf.write(\"MThd\")\n\t\tf.write(struct.pack(\">ihhh\",6,1,len(d[\"tracks\"]),1000)) # Length of the header, MIDI type 1, number of track, 1000 ticks per quarter\n\n\t\tfor x in data[\"tracks\"]:\n\t\t\t# Reordering all the sound events by increasing time\n\t\t\ttrackdata = [[y[\"type\"],y[\"note\"],y[\"velocity\"],y[\"time\"]] for y in x]\n\t\t\ttrackdata = sorted(trackdata, key=lambda x:x[3])\n\t\t\t\n\t\t\t# MIDI files deal with time differences, which we calculate here\n\t\t\ttrackdata_diff = [[trackdata[0][0],trackdata[0][1],trackdata[0][2],trackdata[0][3]]]\n\t\t\tfor i in range(1,len(trackdata)):\n\t\t\t\ttrackdata_diff.append([trackdata[i][0],trackdata[i][1],trackdata[i][2],trackdata[i][3]-trackdata[i-1][3]])\n\t\t\ttrackdata_diff = [[x[0],x[1],x[2],encodeVL(int(2000.*x[3]))] for x in trackdata_diff] ## 2000= 1000 ticks per quarter * 2 (because 120 bpm = 2 quarters per seconds)\n\t\t\t\n\t\t\t# Number of bytes of the track chunk: 15 for standard info (120bpm, fake notes, etc.), 4 for the tail, the rest depends on the data\n\t\t\ttrackdata_numbytes = 15+4+3*len(trackdata_diff)+sum([len(x[3]) for x in trackdata_diff])\n\t\t\t# If sustain is used\n\t\t\t#trackdata_numbytes += 4 \n\t\t\t\n\t\t\t## Writing the track chunk to the MIDI file\n\t\t\tf.write(\"MTrk\")\n\t\t\tf.write(struct.pack(\">i\",trackdata_numbytes)) # Length of the track chunk\n\n\t\t\t# 120 bpm\n\t\t\tf.write(struct.pack(\">BBBB\",0,0xFF,0x51,0x03))\n\t\t\tf.write(struct.pack(\">BBB\",0x07,0xA1,0x20))\n\t\t\t\n\n\t\t\t# Fake note at the beginning to mark 0 time\n\t\t\tf.write(struct.pack(\">BBBB\",0,0x90,0,40))\n\t\t\tf.write(struct.pack(\">BBBB\",1,0x80,0,40))\n\t\t\t\n\t\t\t#Sustain pedal on\n\t\t\t#f.write(struct.pack(\">BBBB\",2,0xB0,0x40,0x41))\n\n\t\t\t# Writing one note\n\t\t\tfor x in trackdata_diff:\n\t\t\t\tfor y in x[3]:\n\t\t\t\t\tf.write(struct.pack(\">B\",y))\n\t\t\t\tif x[0]==\"ON\":\n\t\t\t\t\tf.write(struct.pack(\">BBB\",0x90,x[1],x[2]))\n\t\t\t\tif x[0]==\"OFF\":\n\t\t\t\t\tf.write(struct.pack(\">BBB\",0x80,x[1],x[2]))\n\t\t\t\n\t\t\t## End of the track chunk\n\t\t\tf.write(struct.pack(\">BBBB\",0,0xFF,0x2F,0))", "def generate_wavplot(song_name):\n\n filepath = features[features.inferred_name.str.title() == song_name].feature_file.values[0]\n rate, wave = wavfile.read(filepath)\n mono = np.mean(wave, axis=1)\n mono.shape\n plt.figure(figsize=(20,6))\n plt.axis('off')\n plt.plot(mono[::mono.shape[0]//6000], color='white')\n plt.tight_layout;\n friendly_song_name = '_'.join(song_name.split()).lower()\n output_filepath = './static/wavplots/' + friendly_song_name + '.png'\n plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0, transparent=True)\n return output_filepath", "def webm_to_wav(webm_file: str):\n wav_file = webm_file.replace(\".webm\", \".wav\")\n wav = AudioSegment.from_file(webm_file)\n wav.export(wav_file, format=\"wav\")\n return wav_file", "def _writeWiggleVar(trackName, trackDescription, allCounts, wigOut):\n wigFile = open(wigOut, \"w\")\n wigFile.write(\"track type=wiggle_0 name='%s' description='%s' visibility=2\\n\" % (trackName,\n trackDescription))\n\n for name in allCounts.keys():\n start = 0\n end = max(allCounts[name].keys())\n\n wigFile.write(\"variableStep chrom=%s span=1\\n\" % (name))\n\n for pos in sorted(allCounts[name].keys()):\n wigFile.write(\"%s\\t%s\\n\" % (pos,allCounts[name][pos]))\n\n\n wigFile.close()", "def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")" ]
[ "0.6919104", "0.6847627", "0.672993", "0.6697336", "0.66271955", "0.6598934", "0.65593815", "0.65480053", "0.65218306", "0.6511665", "0.64606607", "0.6440448", "0.63984156", "0.6338785", "0.63294864", "0.6315476", "0.63118845", "0.63059294", "0.6304708", "0.6291267", "0.62757117", "0.6230143", "0.6198605", "0.61878157", "0.6102107", "0.6099962", "0.60927933", "0.60619295", "0.6056393", "0.6048941", "0.6047668", "0.5988708", "0.5978977", "0.5970552", "0.5914261", "0.5913791", "0.5910785", "0.5894348", "0.58427626", "0.57939655", "0.5757947", "0.5756954", "0.57461345", "0.57365096", "0.5732714", "0.57275516", "0.57220566", "0.5706426", "0.5699579", "0.5690336", "0.5682572", "0.5677494", "0.5662849", "0.56330985", "0.5630398", "0.56192356", "0.5605534", "0.56053364", "0.55982536", "0.55929387", "0.5591035", "0.5579563", "0.55716544", "0.5555678", "0.5525672", "0.5497725", "0.54900086", "0.5479204", "0.54781985", "0.54691356", "0.5458135", "0.5453484", "0.5444185", "0.544044", "0.5422919", "0.54109836", "0.54100686", "0.539366", "0.5392348", "0.5378004", "0.5370847", "0.5363176", "0.53444535", "0.53306365", "0.5321931", "0.5317966", "0.5315327", "0.53147954", "0.5312611", "0.5299452", "0.5293861", "0.5287521", "0.52867866", "0.5285107", "0.5284913", "0.52809215", "0.5274716", "0.5272779", "0.5268722", "0.52671653" ]
0.6976282
0
Selects which value to return.
def _return(*args): to_return = () for arg in args: cond, value = arg if cond: to_return += (value,) if len(to_return) == 1: return to_return[0] return to_return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getValue(name, default=None):", "def get_value(self):", "def getValue(self,value):\n if value in self.header.keys():\n return self.header[value]\n if value in self.subintinfo.keys():\n return self.subintinfo[value][-1]\n if self.params is None:\n return None\n return self.params.get(value) #will return None if non-existent", "def get(self, index, default):\n try:\n return self[index].value\n except Exception, e:\n #if default is not None:\n return default\n #else:\n # raise e", "def get_value(self):\r\n return input(\"Enter your choice :\")", "def get_value(self):\n pass", "def getValue(self, state):\n return self.values[state]", "def get_val(self):\n return", "def value(self):\n if self._check_:\n f = self.getChecked\n else:\n f = self.getSelected\n return f()", "def getvalue(self):\n ...", "def getvalue(self):\n ...", "def get_value(self, section, option):\n raise NotImplementedError()", "def get(self, node):\n if node in self.val:\n return self.val[node]\n else:\n return self.initial", "def SelectValue(self, cnxn, col, default=None, where=None, **kwargs):\n row = self.SelectRow(\n cnxn, cols=[col], default=[default], where=where, **kwargs)\n return row[0]", "def get_value(self):\n raise NotImplementedError", "def _get_value(self):\n \n return self._value", "def get_value(self):\n return self._value", "def getValue(self):\n return random.choices(self.indices, weights=self.weights, k=1)[0]", "def _get_value(self):\n return self.__value", "def get_val(self, **kwargs):\n return self._value", "def pick(self):\n\n pickerdict = {}\n current_value = 0\n\n if len(self.choices) == 0:\n return None\n\n if len(self.choices) == 1:\n return self.choices[0][0]\n\n for option in self.choices:\n pickerdict[current_value] = option[0]\n current_value += option[1]\n\n picker = random.randint(0, current_value)\n last_value = 0\n result = None\n sorted_keys = sorted(pickerdict.keys())\n\n found = False\n for key in sorted_keys:\n if key >= picker:\n result = pickerdict[last_value]\n found = True\n continue\n last_value = key\n\n if not found:\n result = pickerdict[sorted_keys[-1]]\n\n return result", "def getValue(self, state):\r\n return self.values[state]", "def select(self):\n\n return self.p[0], self.p[1]", "def getSelected(self):\n selected = self.defaultChoice\n if self.tableSelected is not None:\n selected = self.tableSelected.getString(self.defaultChoice)\n return self.map.get(selected)", "def getValue(self) -> int:\n ...", "def getvalue(self, name, *default):\n try:\n return self.getattr(name).value\n except KeyError:\n if default:\n return default[0]\n raise", "def get_value(self):\n if callable(self.supplier):\n return self.supplier()\n return None", "def result(value):\n return None, value, None", "def selected(self):\n return self._choices[self._selected][0]", "def current_val(self):\n try:\n return self.listbox.get(self.listbox.curselection()[0])\n except IndexError:\n raise KeyError(\"Nothing selected\")", "def select(self):\n idx, c, result_msg, op = self._choose()\n return (c, result_msg)", "def get_value(self, *args, **kwargs) -> Optional[ValueType]: # pragma: no cover\n raise NotImplementedError", "def _fetch_value(cursor, index=1):\n return cursor.fetchone().popitem()[index]", "def getValue(self):\n return self._row[self.name]", "def current_choice(self):\n\t\treturn self.choice_data_list[self.select_index]", "def get(self, key, default=None):\n def find(found_item, _):\n \"\"\" This is the closer function which will be passed to find by key function , if key found than return the value \n otherwise return blanck\"\"\"\n if found_item:\n return found_item[1]\n else:\n return default\n\n return self._find_by_key(key, find)", "def get_val(self, arg_idx):\n\t\tidx = arg_idx-1\n\t\tif idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:\n\t\t\treturn self.memory[self.memory[self.ptr+arg_idx]]\n\t\telif self.__par_modes[idx] == 1:\n\t\t\treturn self.memory[self.ptr + arg_idx]", "def get_value_by_index(self, index):\n return self['value'][index]", "def getSelected(*args):", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getValue(self, state):\n return self.values[state]", "def getval(self):\r\n return self.value", "def choose(self):\n\n i = bisect.bisect(self._p, random.random())\n return self._values[i]", "def get_value(self):\n return None", "def __getitem__(self, v):\r\n return self.unif.get(v, (v, None))[0]", "def _value(self):\n return self.device.value(*self._id[1:])", "def __getitem__(self, value):\n return self.d.get(value, 0)", "def select(self, value) -> str:", "def selected(self):\n return self.__result", "def _get_value(o):\n return value(o, exception=False)", "def select_function(_):\n try:\n self.update_infobox()\n except KeyError:\n pass\n # return self.current_val()", "def _single_getitem(self, key):\n try:\n return self._dict[key]\n except KeyError:\n return self.default", "def _get_value(self, value_column):\n pass", "def returnOne(self):\n try:\n # self.checkValName()\n self.cursor.execute(self.query % self.val)\n self.results = self.conn.fetchone()\n except Exception as e:\n print \"Query failed: %s \" % e", "def __getitem__(self, value):\n\n # Select the correct index\n if isinstance(value, six.integer_types):\n idx = self.by_value\n elif isinstance(value, six.string_types):\n idx = self.by_name\n else:\n raise KeyError(value)\n\n # Look up the value in that index\n return idx[value]", "def select_scalar(self, *args, **kwargs):\n row = self.db_connection.execute(*args, **kwargs).fetchone()\n return None if row is None else row[0]", "def __call__(self):\n return self.value", "def get_selection(self, name):\n print 'hi being selected in plotdata'\n return self.selections.get(name, None)", "def selected_value(self):\n option = self.selected_option\n return option.value if option else None", "def get_value(self):\n if self.name in ['1','2','3','4','5','6','7','8', '9', '10']:\n return int(self.name)\n if self.name in ['J','Q','K']:\n return 10\n if self.name == 'A':\n return 1", "def getValue(self):\n raise NotImplementedError(\"Define in derived class\")", "def get_val(self):\n return self.value", "def getValue(self):\n \n if len(self._possibilities) is 1:\n \n return self._possibilities.copy().pop()\n \n else:\n \n return None", "def get(self, index):\n\n return self.values[index]", "def returnOne(self):\n try:\n # self.checkValName()\n self.dbc.execute(self.query, self.val)\n self.results = self.dbc.fetchone()\n except MySQLdb.Error, e:\n print \"Query failed: %s \" % e", "def get_value(default):\n output(\" [\" + default + \"]: \")\n response = read_chomped_line()\n if response == \"\":\n return default\n else:\n return response", "def get_value(self, key):\n pass", "def _select_single(self, disc):\n sqlstmt = \"SELECT h FROM %s WHERE d=?\" % self.VIEW\n pickup = self.cursor.execute(sqlstmt, (disc,))\n picked = pickup.fetchone()\n if picked is not None:\n # picked = (h,)\n return picked[0]\n else:\n raise KeyError(str(disc))", "def get_value(self, field):\n field = self.find_first(field)\n if field is not None:\n return field.value\n return None", "def find_selected(self):\r\n return None", "def _selection ( self, nick ) :\n \n if not self.__selections_.has_key ( self.name() ) :\n self.__selections_[ self.name() ] = {} \n \n return self.__selections_[ self.name() ].get( nick , None )", "def getValue(self, state):\n util.raiseNotDefined()", "def getValue(self, state):\n util.raiseNotDefined()", "def get_value(self, locator: Locator) -> Optional[str]:\n element = self.ctx.get_element(locator)\n get_value_pattern = self.get_value_pattern(element)\n\n if get_value_pattern:\n func_name = get_value_pattern.__name__\n self.logger.info(\n \"Retrieving the element value with the %r method.\", func_name\n )\n value_pattern = get_value_pattern()\n return value_pattern.Value if value_pattern else None\n\n raise ActionNotPossible(\n f\"Element found with {locator!r} doesn't support value retrieval\"\n )", "def select(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"select\")", "def select(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"select\")", "def value(self, value: Optional[int] = None) -> Optional[int]:\n ...", "def value(\n self, key: _K = 0, default: t.Optional[object] = None\n ) -> t.Any:\n try:\n index = self.index(key)\n except (IndexError, KeyError):\n return default\n else:\n return self[index]", "def get(self, key, default=None):\n key = self._validate_key(key)\n sql = u\"\"\"\n SELECT `value` FROM `{table}` WHERE key = ?\n \"\"\".format(table=self.name)\n\n r = self.conn.execute(sql, (key,)).fetchone()\n\n if r:\n return self.convert_out(r['value'])\n\n return default", "def get(self, index):\n if 0 <= index <= len(self.nums):\n return self.nums[index]\n return -1", "def getValue(self, name):\n values = self.__get('values')\n return values[name]", "def _get_value(self, value):\r\n try:\r\n return int(value)\r\n except ValueError:\r\n return self.registers[value]", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def getValue(self):\n raise Exception(\"getValue function not defined with class {0}\".format(self.__class__.__name__))", "def get_selected(self):\n return self.selected", "def pick(\n self, values: Counter, default: Optional[Union[Callable, Value]] = None,\n raise_error: Optional[bool] = False\n ) -> Value:\n if len(values) == 1:\n return values.most_common(1)[0][0]\n elif not raise_error:\n return default\n raise ValueError('received set of {} values'.format(len(values)))", "def get(self, state):\n return state[self.primary or self]", "def getSelectedItem(*args):", "def __getitem__(self, item):\r\n return self.select(item)" ]
[ "0.6543452", "0.6478195", "0.64064497", "0.6406406", "0.640156", "0.6355146", "0.6348945", "0.634662", "0.634554", "0.6221234", "0.6221234", "0.6220085", "0.6185762", "0.61845344", "0.6182689", "0.61672604", "0.61638516", "0.61610883", "0.6157245", "0.6145623", "0.614436", "0.6143934", "0.6128151", "0.6119701", "0.610969", "0.6108915", "0.610479", "0.6090514", "0.6085301", "0.60548496", "0.60491383", "0.6041142", "0.60369855", "0.6032232", "0.6027032", "0.60234493", "0.60228914", "0.60111517", "0.60081506", "0.6006784", "0.6006784", "0.6006784", "0.6006784", "0.6006784", "0.6006784", "0.6006784", "0.6006784", "0.6006784", "0.6006784", "0.6006784", "0.6006784", "0.599525", "0.5993795", "0.59924114", "0.5984491", "0.5983377", "0.5978891", "0.5976345", "0.59735465", "0.5966652", "0.5965808", "0.5965322", "0.59635097", "0.5958147", "0.5955559", "0.59477764", "0.5941488", "0.59313333", "0.5916049", "0.5902231", "0.5895164", "0.5891614", "0.58898103", "0.5881058", "0.5872152", "0.58509666", "0.5846827", "0.58435994", "0.5842561", "0.584088", "0.58407015", "0.584006", "0.584006", "0.58377844", "0.58329463", "0.58329463", "0.58251476", "0.5821172", "0.5818497", "0.58082813", "0.5802708", "0.5802646", "0.5799716", "0.5799716", "0.5799716", "0.5797895", "0.5795798", "0.57951605", "0.5792799", "0.57906115", "0.5788792" ]
0.0
-1
Fetches data and metadata from an URL.
def get_content(url, etag=None, use_http_compression=True, return_etag=False, return_status_code=False, return_datetime=False, return_response=False): error_msg = 'HTTP GET %s' % (url,) # Sets the headers. headers = {} if etag: headers['If-None-Match'] = etag if not use_http_compression: headers['Accept-Encoding'] = '' downloaded_date = (return_datetime and [timezone.now()] or [None])[0] # Makes the request. try: response = requests.get(url, headers=headers) except StandardError as e: raise RequestsModuleError('%s - Requests module error\n%s' % (error_msg, e)) data = response.content etag = (return_etag and [response.headers.get('ETag', None)] or [None])[0] status_code = response.status_code return _return((True, data), (return_etag, etag), (return_status_code, status_code), (return_datetime, downloaded_date), (return_response, response))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __fetch_from_url(url: str) -> Any:\n song_information: Any = None\n try:\n # Send the request and load the returned contents.\n req = request.Request(url, headers={\n 'User-Agent': Config.Config.get_user_agent()\n })\n response = request.urlopen(req)\n contents: str = response.read().decode('utf-8')\n except (HTTPError, TimeoutError) as ex:\n Logger.Logger.log_error(str(ex))\n Logger.Logger.log_error('Request failed for URL: ' + url)\n return\n # Parse the response from the endpoint as a JSON encoded string\n data: Any = json.loads(contents)\n # Check if response contains at least one result, otherwise return \"None\".\n if data['resultCount'] > 0:\n song_information = data\n return song_information", "def FetchUrlContent(url):\n content = memcache.get(url)\n if content:\n return content\n\n request = urlfetch.fetch(url)\n\n if request.status_code == 200:\n content = request.content\n memcache.add(url, content, 60 * 60)\n return content\n\n raise LookupError('Unable to fetch URL. Response code: ' +\n str(request.status_code))", "async def fetch_data(self, url: str) -> dict:\n async with self.bot.http_session.get(url) as r:\n return await r.json()", "def fetch_url_feed(self, url, **args):\n return self.fetch(\"/url\", url=url, **args)", "def urlfetch(self, url, **kwargs):\n logging.debug('Fetching %s with kwargs %s', url, kwargs)\n resp = urlfetch.fetch(url, deadline=999, **kwargs)\n\n if resp.status_code == 200:\n return resp.content\n else:\n logging.warning('GET %s returned %d:\\n%s',\n url, resp.status_code, resp.content)\n self.handler.response.headers.update(resp.headers)\n self.handler.response.out.write(resp.content)\n raise exc.status_map.get(resp.status_code)(resp.content)", "def fetch_data(data_url):\n return requests.get(data_url).content", "def get_fred_data(url):\n pass", "def fetch(url):\n content = requests.get(url).text\n if \"Error\" in content:\n raise ValueError(f\"Cannot read from: {url}\")\n return content", "def fetch(self,url=URL):\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tlog.info(f'Last samples from {self.edition}')", "def fetchJson(url):", "def fetch(self, url, listener, useCache = True): #$NON-NLS-1$\r", "def fetch_content(self, url):\n # log.debug(\"Fetching content from: %s\", url)\n prepare_curl_callback = lambda x: x.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_GSSNEGOTIATE)\n self.http.fetch(url, self.handle_response, prepare_curl_callback=prepare_curl_callback, auth_username=':')", "def GetDataFromURL(self, url):\n try:\n deftimeout = socket.getdefaulttimeout()\n socket.setdefaulttimeout(1)\n try:\n logging.debug('Slide fetching data from %s' % url)\n u = urllib.urlopen(url)\n data = u.read()\n return data\n except:\n logging.exception('Uh oh!')\n return None\n finally:\n socket.setdefaulttimeout(deftimeout)", "def fetch(cls, url):\n delta = time.time() - cls._time_last_fetched\n wait_time = TIME_TIL_RETRY - delta\n if wait_time > 0:\n time.sleep(wait_time)\n resp = requests.get(url)\n cls._time_last_fetched = time.time()\n resp.raise_for_status()\n return resp", "def downloadData(url):\n \n content = urllib2.urlopen(url)\n return content", "def fetch_url(self, url: str):\n log.debug(f\"Fetching {url}\")\n answer = self.session.get(url, timeout=self.timeout)\n answer.raise_for_status()\n\n # returning raw answer object, because due to redirects we may need to\n # double check answer.url to proceed\n return answer", "def _load_data(self, url, options=None, location=None):\n # Set API key in query parameters\n params = { \"api-key\": self.key }\n\n # Add options to query parameters\n if options is not None:\n params.update(options)\n\n # Load the data from the API, raise error if there's an invalid status code\n res = self.session.get(self.protocol + url, params=params, timeout=(4, 10))\n if res.status_code == 401:\n raise ValueError(\"Invalid API Key\")\n elif res.status_code == 404:\n raise RuntimeError(\"Error 404: This page is not available\")\n res.raise_for_status()\n\n if orjson is None:\n parsed_res = res.json()\n else:\n parsed_res = orjson.loads(res.content)\n\n # Get the data from the usual results location\n if location is None:\n results = parsed_res.get(\"results\")\n\n # Sometimes the results are in a different location, this location can be defined in a list\n # Load the data from that location\n else:\n results = parsed_res\n for loc in location:\n results = results.get(loc)\n\n return results", "def fetch_song_data(url):\r\n response = requests.get(url)\r\n return response.text", "def fetch_url(url):\n logger.info(\"Resolving \" + url)\n try:\n resp = requests.get(url, timeout=1.5)\n resp.raise_for_status()\n return {\n \"resolved_url\": resp.url,\n \"raw_content\": resp.text\n }\n except Exception as e:\n logger.error('Error fetching %s' % url, e)\n return {\n \"resolved_url\": url,\n \"raw_content\": \"\",\n \"url_error\": str(e)\n }", "def url_fetch(self, url):\n user_agent = random.choice(self.conf.user_agents)\n if self.isCompress == True:\n headers = {\n 'Uesr-Agent': user_agent,\n \"Accept-Encoding\": \"gzip,deflate\",\n \"Accept-Charset\" : \"UTF-8,*\"\n }\n else:\n headers = {\n 'Uesr-Agent': user_agent,\n \"Accept-Charset\" : \"UTF-8,*\"\n }\n raw_data = ''\n try:\n conn = httplib.HTTPConnection(self.proxy, timeout=3.0)\n conn.request('GET', url, None, headers)\n response = conn.getresponse()\n raw_data = response.read()\n except Exception as err:\n self.logger.error('connect error[%s]' % err)\n return '999', 'Request failed', ''\n finally:\n conn.close()\n \n content = ''\n if self.isCompress == True:\n if response.status == 200:\n try:\n stream = StringIO.StringIO(raw_data)\n decompressor = gzip.GzipFile(fileobj=stream)\n content = decompressor.read()\n except:\n self.logger.error('status[%s] len_raw_data[%d]' % (response.status, len(raw_data)))\n return '998', 'content err', ''\n else:\n if response.status == 200:\n content = raw_data \n\n return response.status, response.reason, content", "def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)", "def fetch(self, url, headers=DEFAULTHEADERS):\n logger = self.loggers['http']\n request = urllib2.Request(url, headers=headers)\n try:\n response = urllib2.urlopen(request)\n except urllib2.HTTPError:\n logger.error(\"failed to retrieve the resource at %s\" % url)\n raise\n urlgot = response.geturl()\n rawcontent = response.read()\n if urlgot != url:\n logger.info(\"successfully retrieved resource from %s, redirected from %s\" % (urlgot, url))\n self.http['redirect'] = True\n else:\n logger.info(\"successfully retrieved resource from %s\" % url)\n self.http['redirect'] = False\n rheaders = response.info()\n \n # store useful info on the object for later access\n self.http['request'] = {}\n self.http['request']['headers'] = headers\n self.http['urlsought'] = url\n self.http['urlgot'] = urlgot\n self.http['response'] = response\n self.http['response_headers'] = {}\n for k in sorted(rheaders.keys()): \n logger.debug(\"response header %s: '%s'\" % (k, rheaders[k]))\n self.http['response_headers'][k.strip().lower()] = rheaders[k].strip() \n self.documenturl = urlgot\n self.rawcontent = rawcontent", "def fetch_url(url):\n try:\n soup = bs(urlopen(url).read(), 'html.parser')\n return soup\n except:\n print \"Couldnot download the content from the URL\", url\n return \"\"", "def fetch(url, verbose=False):\n\n resp = requests.get(url)\n if verbose:\n print(resp.json())\n if resp.status_code == 200:\n\n resp=resp.json()\n return resp\n else:\n return None", "def fetch_dataset(url, pandas_impl=pandas):\n\n print(f'fetching dataset at {url}')\n return pandas_impl.read_csv(url)", "def fetch_50(url):\n\n results = requests.get(url,headers = headers).json()\n return results", "async def fetch(self, session, url):\n async with session.get(url) as response:\n if response.status != 200:\n response.raise_for_status()\n response = await response.text()\n return json.loads(response)", "def get_data(url):\n response = get(url, timeout=10)\n \n if response.status_code >= 400:\n raise RuntimeError(f'Request failed: { response.text }')\n \n return response.json()", "def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")", "def _by_url(session: Session, url: URL) -> UnifiedDataset:\n r = session.get(str(url))\n if r.status_code == 404:\n raise NotFound(str(url))\n data = response.successful(r).json()\n return _from_json(url, data)", "def fetch(self, path, **kwargs):\n self.http_client.fetch(self.get_url(path), self.stop, **kwargs)\n return self.wait()", "def data_loader(self, url, type_of):\n i = url.rfind('/')\n data_name = url[(i + 1):]\n data_def = {\n \"displayName\": data_name,\n \"url\": url\n }\n if type_of == \"csv\":\n data_loader = self.csv_data_loader\n else:\n data_loader = self.json_data_loader\n\n return Downloader(data_def).download(data_loader)", "def fetch_metadata(requests_impl=requests):\n\n print(f'fetching metadata at {Network.METADATA_URL}')\n return requests_impl.get(Network.METADATA_URL).json()", "def fromurl(cls, url: str):\n return cls.parse_obj(requests.get(url).json())", "def fetch(self, url):\n self.log.info(\"Fetching URL: \" + url)\n\n r = requests.get(url, verify=False)\n # raise an HTTPError on badness\n r.raise_for_status()\n\n # this decodes r.content using a guessed encoding\n return r.text", "def get_data(self, url):\n\n req = urllib2.Request(url)\n # urlencode the query dictionary\n try:\n r = urllib2.urlopen(req)\n result = r.read()\n except:\n result = 'The url: %s is not responding.' % (url)\n return result", "def load_url_content(url):\n try:\n r = requests.get(url)\n if r.ok:\n return r.text\n else:\n return None\n except Exception:\n return None", "def _fetch(url, ssl_verify = True):\n req = Request(url)\n if ssl_verify:\n page = urlopen(req)\n else:\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n page = urlopen(req, context=ctx)\n content = page.read().decode('utf-8')\n page.close()\n return content", "def fetch_url(self, url: str) -> Union[Dict, None]:\n\n try:\n req = requests.get(url)\n req.raise_for_status()\n res = req.json()\n except (requests.HTTPError, json.JSONDecodeError) as e:\n logging.warning(f'{self.__class__.__name__} failed to retrieve/parse {url}')\n # logging.debug(e)\n return\n\n # safe-check for empty response from server\n if not res:\n logging.warning(f\"{self.__class__.__name__} empty response from {url}\")\n return\n\n return res", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data", "def fetch(self, url: furl) -> str:\n try:\n contents = self._download(url)\n except requests.ConnectionError as err:\n logger.exception(f\"Request failed with {err}\")\n click.secho(\n f\"The URL {url} could not be downloaded. Either your network is unreachable or the URL is broken.\"\n f\" Check the URL, fix your connection, or use \"\n f\" {OptionEnum.OFFLINE.as_flake8_flag()} / {OptionEnum.OFFLINE.as_envvar()}=1\",\n fg=\"red\",\n err=True,\n )\n return \"\"\n return contents", "def load(self, url):\n pass", "def load(self, url):\n pass", "def get_url_data(self, url):\n # print \"opening: \" + url\n request = urllib2.Request(url)\n base64string = '%s:%s' % (self.username, self.key)\n request.add_header(\"Authorization\", \"ApiKey %s\" % base64string)\n response = urllib2.urlopen(request)\n data = json.loads(response.read())\n return data", "def _raw_get(self, url):\n logger.debug('Fetching URL %s', url)\n self._conn.request('GET', url, None, {\n 'Accept-Encoding': 'gzip',\n 'User-Agent': USER_AGENT,\n 'Cookie': self.cookie,\n 'Connection': 'keep-alive',\n 'DNT': '1',\n })\n self._resp = self._conn.getresponse()\n if self.cookie == '':\n complete_cookie = self._resp.getheader('Set-Cookie')\n # Cookie won't be available is already blocked\n if complete_cookie is not None:\n self.cookie = complete_cookie[:complete_cookie.find(';')]\n logger.debug('Cookie: %s' % self.cookie)", "async def fetch(url, session):\n async with session.get(url) as response:\n return await response.read()", "def fetch_object(url):\n print(' GET ' + url)\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=15)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n r = session.get(url)\n # Covering internal server errors by retrying one more time\n if r.status_code == 500:\n time.sleep(5)\n r = requests.get(url, allow_redirects=True)\n elif r.status_code != 200:\n print(f\"Problem with request: {str(r)}\")\n raise RuntimeError(\"Non-200 status code\")\n return r", "def _get(self, url, **kwargs):\n return self._http.get(self.cluster + url, timeout=self.timeout, **kwargs)", "async def fetch(session, url):\n async with session.get(url) as response:\n return await response.text()", "def get_data(self, url):\n return self.get(url).get('data', [])", "def get(self, url):\n\n\t\ttry:\n\t\t\trequest = urllib2.Request(url)\n\t\t\trequest.add_header('User-Agent', self.user_agent)\n\n\t\t\tlogging.debug('Get.get - getting url ' + url)\n\n\t\t\tresult = urllib2.urlopen(request)\n\n\t\texcept: raise RuntimeError('unable to open url')\n\n\t\treturn result", "def get(self, url):\n return json.loads(self.as_source.urlopen(url).read())", "def http_get(url):\n\n count = 0\n result = ''\n ok = False\n while count < 3 and not ok:\n try:\n result = urllib.urlopen(url).read()\n ok = True\n except:\n count += 1\n return result", "def fetch(self):\n # This method also sets self._results_filtered and\n # self._urltable.\n page = self._conn.fetch_page(self._ddg_url.relative())\n\n if logger.isEnabledFor(logging.DEBUG):\n import tempfile\n fd, tmpfile = tempfile.mkstemp(prefix='ddgr-response-')\n os.close(fd)\n with open(tmpfile, 'w', encoding='utf-8') as fp:\n fp.write(page)\n logger.debug(\"Response body written to '%s'.\", tmpfile)\n\n parser = DdgParser(news=self._ddg_url.news)\n parser.feed(page)\n\n self.results = parser.results\n self._results_filtered = parser.filtered\n self._urltable = {}\n for r in self.results:\n self._urltable.update(r.urltable())", "async def _fetch(self, session, url, proxy=None, raw=False, which_site=False):\n print(url)\n result = None\n site = None\n if 'hare' in url: # {'Unknown': -1, 'Pixnet': 0, 'Hares': 1}\n site = self._websites['Hares']\n elif 'pixnet' in url:\n site = self._websites['Pixnet']\n else:\n site = self._websites['Unknown']\n\n count = 1\n while count <= 2:\n soup = ''\n status = 0\n try:\n async with session.get(url, proxy=proxy) as response:\n source_code = await response.text('utf-8')\n status = response.status\n soup = source_code if raw else BeautifulSoup(source_code, 'lxml')\n except Exception as e:\n print('Connection error: ' + str(e))\n soup = None\n finally:\n result = (url, soup, status, site) if which_site else (url, soup, status)\n if status != 0:\n return result\n if 'searcharticle' not in url:\n count += 1\n result = (url, soup, status, site) if which_site else (url, soup, status)\n return result", "def data_loader(self, url, type_of):\n\n data_loader = None\n if type_of == \"csv\":\n data_loader = self.csv\n elif type_of == \"json\":\n data_loader = self.json\n elif type_of == \"parquet\":\n data_loader = self.parquet\n elif type_of == \"avro\":\n data_loader = self.avro\n else:\n RaiseIt.type_error(data_loader, [\"csv\", \"json\", \"parquet\", \"avro\", ])\n\n i = url.rfind('/')\n data_name = url[(i + 1):]\n data_def = {\n \"displayName\": data_name,\n \"url\": url\n }\n return Downloader(data_def).download(data_loader, type_of)", "def _fetch(\n cls, url: str, headers: Mapping[str, str], params: Mapping[str, Any]\n ) -> Tuple[List[EventType], Optional[str]]:\n status_url = cls._post_query(url, headers, params)\n # Await a while before polling the results\n time.sleep(0.1)\n result_url = cls._poll_status(status_url, headers, params)\n data, headers = cls._get_results(result_url, headers, params)\n result = json.loads(data)\n return result, headers.get(\"x-next-token\")", "def _extract_data(self):\n if self.URL_type == \"youtube\" or self.URL_type == \"ytmusic\":\n self._get_youtube_data_url()\n elif self.URL_type == \"soundcloud\":\n self._get_soundcloud_data()", "def load_data(url: str):\n\n page = requests.get(url=url)\n soup = BeautifulSoup(page.content, 'html.parser')\n return soup", "def request_data(url): \n requests_cache.install_cache('data_cache')\n while True:\n data = requests.get(url)\n if not data.status_code == 200 or \"try again later\" in data.text:\n continue\n else:\n break\n return data.text", "async def fetch(self, url=None, method='GET', body=None):\n log.debug('fetching \\n method: [%s] \\n url: %s \\n body: %s',\n method,\n url,\n body)\n if not method:\n method = HttpMethod.GET\n status: int = None\n text: str = None\n if method == HttpMethod.GET:\n async with self._http_session.get(url) as response:\n status = response.status\n text = await response.text()\n elif method == HttpMethod.POST:\n async with self._http_session.post(url, data=body) as response:\n log.debug('fetch POST response: %r', response)\n status = response.status\n text = await response.text()\n else:\n raise NotImplementedError(\n f\"HTTP requst method {method} not implemented yet. \"\n \"Contributions welcome!\")\n log.debug('fetch result status: %d, text: %s', status, text)\n return (status, text)", "def get_data_with_http_request(self, url):\n _LOGGER.debug(\"[SERVICES][HTTPCLIENT] sendHTTPRequest\")\n try:\n request = self.request(url)\n response = self.create_response(request)\n data = self.decode_response(response)\n except TypeError:\n _LOGGER.error(\"Could not fetch data from \"+url)\n return None\n return data", "def read_url(url):\n response = requests.get(url)\n return response.text", "async def fetch(self, url, params={}, loop=None, max_workers=5, **extra):\n\n result = {}\n pool = ProcessPoolExecutor(max_workers)\n extra.update({\n \"loop\": loop\n })\n parsed_url = url_concat(url, **params)\n if inspect.iscoroutinefunction(self.on_fetch):\n result = await self.on_fetch(parsed_url, extra)\n else:\n loop = loop or asyncio.get_event_loop()\n result = await loop.run_in_executor(pool, self.on_fetch, parsed_url, extra)\n return result", "def fetch_temp_data(url):\n res = requests.get(url)\n return res.json()", "def request(self, url):\n requested_data = None\n try:\n requested_data = urllib.request.Request(url, headers=self.headers)\n except urllib.error.URLError as err:\n _LOGGER.error(\"Could not connect: \" + err.reason)\n except urllib.error.HTTPError as err:\n _LOGGER.error(\"Server return code \" + err.code + \". \" +\n err.reason + \" Error caused by the following \"\n \"header(s): \" + err.headers + \".\")\n except urllib.error.ContentTooShortError as err:\n _LOGGER.error(\"Could not connect! The amount of the downloaded \"\n \"data is less than the expected amount (given by \"\n \"the Content-Length header).\")\n except ValueError as err:\n _LOGGER.error(\"Invalid url, please insert a valid url.\")\n return requested_data", "def fetch_csv_from_url(url):\n\t\n\t#cache avoidance.\n\twith requests_cache.disabled():\n\t\tr = requests.get(url)\n\t\tif r.status_code == 200:\n\t\t\treturn r.iter_lines()", "def _FetchBuilderData(builder_url):\n data = None\n try:\n url = urllib2.urlopen(builder_url)\n except urllib2.URLError, e:\n print ('urllib2.urlopen error %s, waterfall status page down.[%s]' % (\n builder_url, str(e)))\n return None\n if url is not None:\n try:\n data = url.read()\n except IOError, e:\n print 'urllib2 file object read error %s, [%s].' % (builder_url, str(e))\n return data", "def fetch(self) -> None:\n workflow_spec_path = os.path.join(self._output_dir, self._spec)\n self._download_file(self._parsed_url.original_url, workflow_spec_path)", "def get(self, url):\n \n content = \"\"\n if hasattr(http.client, \"HTTPSConnection\"): \n url_options = urlparse(url)\n\n conn = http.client.HTTPSConnection(url_options.netloc)\n conn.request('GET', url_options.path + '?' + url_options.query)\n content = conn.getresponse().read().decode('utf-8')\n conn.close()\n else: \n p = os.popen('curl -k \"' + url + '\"')\n content = p.read()\n p.close() \n\n return content", "def download_data(self, format = 'srt'):\n resp, content = httplib2.Http(\".cache\").request(self.url, \"GET\")\n suburl = json.loads(content)['url']\n resp, content = httplib2.Http(\".cache\").request(suburl, \"GET\")\n\n return content", "def read_url(url):\n return requests.get(url).text", "def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n logging.info(\"Fetch housing data.....\")\n os.makedirs(housing_path, exist_ok=True)\n tgz_path = os.path.join(housing_path, \"housing.tgz\")\n urllib.request.urlretrieve(housing_url, tgz_path)\n housing_tgz = tarfile.open(tgz_path)\n housing_tgz.extractall(path=housing_path)\n housing_tgz.close()", "async def fetch(url: str, session: ClientSession) -> Tuple[str, bytes]:\n async with session.get(url) as response:\n resp = await response.read()\n return url, resp", "def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])", "def _get(self, url):\n return self._request(url)", "def parse(self):\n \n r = requests.get(self.url)\n if r:\n self.title = fetch_title(self.url)\n self.domain = self.fetch_domain()\n self.favicon = self.fetch_favicon()\n self.topics = self.classify_topics()\n self.description = self.fetch_description()\n return self", "def _getFeedContent(self, url, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):\r\n parameters = {}\r\n if excludeRead:\r\n parameters['xt'] = 'user/-/state/com.google/read'\r\n if continuation:\r\n parameters['c'] = continuation\r\n parameters['n'] = loadLimit\r\n if since:\r\n parameters['ot'] = since\r\n if until:\r\n parameters['nt'] = until\r\n contentJson = self.httpGet(url, parameters)\r\n return json.loads(contentJson, strict=False)", "def fetch_meta(url, filter=None):\n\n link = LinkMeta(url)\n link.parse()\n\n # returns the local variables in this functions scope as a \n # dict.\n return link", "def get_records_from_url(url):\n with requests.get(url) as response:\n source = response.text\n return parseString(source)", "async def http_get(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as r:\n return await r.text()", "def fetch(url: str, raise_for_status: bool=True, **query_params) -> Tuple[int, Dict[str, Any]]:\n # HINT carefully read the requests documentation to figure out the cleanest way to raise a requests error.\n #query_params is a dict, **query_params is keyword arg \n res_obj = requests.get(url, params=query_params)\n if raise_for_status:# default to True \n res_obj.raise_for_status()# if 200 result is None \"All is well\"\n return (res_obj.status_code, res_obj.json()) # returns code and dict ", "def loadu(self, url, **kwargs):\n return self.load(self.open(url, **kwargs), **kwargs)", "def _get_result(url, etag=None, last_modified=None, use_discovery=False):\n _validate_url(url)\n\n result = feedparser.parse(url, etag=etag, modified=last_modified)\n # update URL for any redirects that feedparser followed\n url = result.get('href', url)\n\n if _is_not_modified_result(result):\n raise FeedNotModifiedError\n elif not _is_valid_result(result):\n if use_discovery:\n url = _discover_url(result)\n return _get_result(url)\n else:\n _fail(url, \"Failed to download or parse feed\")\n else:\n return url, result", "def __get(self, url, headers=None):\n return self.__req(url, \"GET\", headers=headers)", "def get_data_from_web():\n pass", "def downloadData(url):\r\n\r\n data = urllib2.urlopen(url)\r\n csvdata = data.read()", "def _get(url):\n url = urlparse(url)\n conn = HTTPConnection(url.hostname, url.port)\n conn.request('GET', url.path+url.query)\n return conn.getresponse().fp.read()", "def _fetch_data(url: str, d: datetime) -> pd.DataFrame:\n return pd.read_json(url)", "def download_data(self, url: str, source_type: str) -> None:\n r = None # request\n\n # download data from nextcloud\n if source_type == \"nextcloud\":\n token = url\n r = requests.get(\n os.environ[\"NC_WEBDAV_URL\"], auth=(token, os.environ[\"NC_PASSWORD\"])\n )\n\n # download data from generic URLs\n if source_type == \"generic_url\":\n s = requests.Session()\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0\"\n }\n s.headers.update(headers)\n r = s.get(url)\n\n f_name = None # file name\n\n if \"content-disposition\" in r.headers.keys():\n d = r.headers[\"content-disposition\"]\n f_name = re.findall('filename=\"(.+)\"', d)[0]\n else:\n f_name = url.split(\"/\")[-1]\n\n # save file\n try:\n with open(Path(os.environ[\"DATA_PATH\"]) / f_name, \"wb\") as f:\n for chunk in r.iter_content(self.chunk_size):\n f.write(chunk)\n except OSError:\n print(f\"Error: {list(Path(os.environ['DATA_PATH']).iterdir())}\")", "def get(self, url):\r\n response = self.requestHelper.get(url)\r\n return self.process(response)", "def fetch_url(session, url):\n return session.get(url).text", "def get_at_url(self, url):\n class NullDevice():\n def write(self, s):\n pass\n\n def get_gallery_item(id):\n \"\"\"\n Special helper method to get gallery items.\n\n The problem is that it's impossible to distinguish albums and\n images from each other based on the url. And there isn't a common\n url endpoints that return either a Gallery_album or a Gallery_image\n depending on what the id represents. So the only option is to\n assume it's a Gallery_image and if we get an exception then try\n Gallery_album. Gallery_image is attempted first because there is\n the most of them.\n \"\"\"\n try:\n # HACK: Problem is that send_request prints the error message\n # from Imgur when it encounters an error. This is nice because\n # this error message is more descriptive than just the status\n # code that Requests give. But since we first assume the id\n # belong to an image, it means we will get an error whenever\n # the id belongs to an album. The following code temporarily\n # disables stdout to avoid give a cryptic and incorrect error.\n\n # Code for disabling stdout is from\n # http://coreygoldberg.blogspot.dk/2009/05/\n # python-redirect-or-turn-off-stdout-and.html\n original_stdout = sys.stdout # keep a reference to STDOUT\n sys.stdout = NullDevice() # redirect the real STDOUT\n return self.get_gallery_image(id)\n # TODO: Add better error codes so I don't have to do a catch-all\n except Exception:\n return self.get_gallery_album(id)\n finally:\n sys.stdout = original_stdout # turn STDOUT back on\n\n if not self.is_imgur_url(url):\n return None\n\n objects = {'album': {'regex': \"a/(?P<id>[\\w.]*?)$\",\n 'method': self.get_album},\n 'comment': {'regex': \"gallery/\\w*/comment/(?P<id>[\\w.]*?)$\",\n 'method': self.get_comment},\n 'gallery': {'regex': \"(gallery|r/\\w*?)/(?P<id>[\\w.]*?)$\",\n 'method': get_gallery_item},\n # Valid image extensions: http://imgur.com/faq#types\n # All are between 3 and 4 chars long.\n 'image': {'regex': \"(?P<id>[\\w.]*?)(\\\\.\\w{3,4})?$\",\n 'method': self.get_image},\n 'user': {'regex': \"user/(?P<id>[\\w.]*?)$\",\n 'method': self.get_user}\n }\n parsed_url = urlparse(url)\n for obj_type, values in objects.items():\n regex_result = re.match('/' + values['regex'], parsed_url.path)\n if regex_result is not None:\n obj_id = regex_result.group('id')\n initial_object = values['method'](obj_id)\n if obj_type == 'image':\n try:\n # A better version might be to ping the url where the\n # gallery_image should be with a requests.head call. If\n # we get a 200 returned, then that means it exists and\n # this becomes less hacky.\n original_stdout = sys.stdout\n sys.stdout = NullDevice()\n if getattr(initial_object, 'section', None):\n sub = initial_object.section\n return self.get_subreddit_image(sub, obj_id)\n return self.get_gallery_image(obj_id)\n except Exception:\n pass\n finally:\n sys.stdout = original_stdout\n return initial_object", "def _get_raw_data(self, url, series):\n url = self._get_url(url, series)\n try:\n response = self.http.request(url, headers=self._reqheaders)\n except httplib2.ServerNotFoundError as e:\n raise TVDBConnectError(e.message), None, sys.exc_info()[2]\n rep = response[0]\n log.debug(\n 'http-status:%s,content:%s', \n rep['status'], \n rep['content-type']\n )\n if int(rep['status']) >= 400:\n raise TVDBConnectError(\n 'Failed to get \"%s\" from thetvdb. errno:%s' % (\n series, rep['status']),\n rep['status']\n ) \n return response[1]", "def __fetch_data(self, url):\n try:\n response = urlopen(url)\n root = ET.fromstring(response.read())\n except HTTPError as exc:\n root = ET.fromstring(exc.read())\n raise ValueError(root.get('message'))\n return root", "def download_http(self, url):\n\n # Set things up.\n # ==============\n\n out = None\n headers = {}\n if (url.username is not None) and (url.password is not None):\n tmp = base64.b64encode(':'.join([url.username, url.password]))\n headers['Authorization'] = \"Basic %s\" % tmp\n\n\n # Toe the waters.\n # ===============\n # We start with an HTTP HEAD request to check the status.\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"HEAD\", url.path, '', headers)\n r = conn.getresponse()\n conn.close()\n if self.verbose:\n print >> sys.stderr, url, r.status, ''\n\n\n # Bail.\n # =====\n # Short-cut when we just care whether it's a package.\n\n if url.path.endswith('/'):\n out = r.status == 200\n\n\n elif r.status == 200:\n\n # Wade in.\n # ========\n # If the status is positive we check to see if we've already\n # downloaded the latest copy.\n\n etag = r.getheader('etag', '')\n lm = r.getheader('last-modified', '')\n key = sha.new(str(url) + etag + lm).hexdigest()\n\n if not self.cachedir:\n raise ValueError(\"netimp.importer.cachedir not set\")\n if not os.path.isdir(self.cachedir):\n raise IOError( \"netimp.importer.cachedir not found \"\n + \"(%s)\" % self.cachedir\n )\n\n path = join(self.cachedir, key)\n if os.path.isfile(path):\n out = open(path, 'rb')\n else:\n\n # Dive in!\n # ========\n # We don't have this module locally yet: download it for real.\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"GET\", url.path, '', headers)\n r = conn.getresponse()\n if r.status == 200: # just in case!\n fp = open(path, 'w+b')\n fp.write(r.read())\n fp.flush()\n fp.close()\n out = open(path, 'rb')\n conn.close()\n\n return out", "def fetch(self, url, data = None, headers = None):\n if not headers:\n headers = {}\n self.url = urllib.parse.urljoin(self.url, url)\n if len(self.fost):\n signed, signed_headers = 'X-FOST-Headers', []\n for header, value in list(self.fost['headers'].items()):\n signed += ' ' + header\n signed_headers.append(value)\n headers[ header ] = value\n utcnow = str(datetime.datetime.utcnow())\n path = urllib.parse.urlsplit(self.url).path\n document = '%s %s\\n%s\\n%s\\n%s' % (\n \"POST\" if data else \"GET\", path,\n utcnow,\n '\\n'.join([signed] + signed_headers),\n data or urllib.parse.urlsplit(self.url).query\n )\n headers['X-FOST-Timestamp'] = utcnow\n headers['X-FOST-Headers'] = signed\n headers['Authorization'] = \"FOST %s:%s\" % (\n self.fost['key'],\n sha1_hmac(self.fost['secret'], document)\n )\n return self.opener.open(urllib.request.Request(self.url, data, headers))", "def get(self, url):\n return self._request('GET', url)", "def read_url(url):\n try:\n response = requests.get(url)\n except requests.ConnectionError:\n content = '{\"error\": \"Bad Connection\"}'\n except MissingSchema: # The url does not exist\n content = '{\"error\": \"Bad Url\"}'\n else:\n if response.status_code == 200:\n content = response.text\n else:\n content = '{\"error\": \"' + response.reason + '\"}'\n\n return content", "def downloadData(url : str, descriptor : str):\n assets = datapackage.Package(url).resources\n\n for data in filter(lambda x: x.tabular and x.descriptor['name'] == descriptor, assets):\n response = requests.get(data.descriptor['path'])\n return io.StringIO(response.content.decode('utf-8'))", "async def _fetch(self, session, timeout=10):\n\n self.log.debug(\"Fetching url: %s\", self.url)\n with async_timeout.timeout(timeout):\n try:\n async with session.get(self.url) as response:\n if response.status != 200:\n self.log.error(\"HTTP Error %s fetching feed %s\", response.status, self.url)\n return await self._handle_fetch_failure('no data', f\"HTTP error {response.status}\")\n return await response.text()\n\n except asyncio.exceptions.CancelledError:\n self.log.error(\"Timeout fetching feed %s\", self.url)\n await self._handle_fetch_failure('Timeout', \"Timeout while fetching feed\")\n\n except Exception as e:\n self.log.exception(\"Error fetching feed %s\", self.url)\n etype = '.'.join((type(e).__module__, type(e).__name__))\n await self._handle_fetch_failure('Exception', f\"{etype} fetching feed: {e}\")" ]
[ "0.7141515", "0.68348056", "0.67148185", "0.6683118", "0.6625893", "0.6613337", "0.6553073", "0.6546756", "0.6502782", "0.6493208", "0.6449416", "0.6427715", "0.6386104", "0.6324223", "0.6319196", "0.63145965", "0.6295364", "0.6295271", "0.6283739", "0.6245581", "0.6242461", "0.6239492", "0.6230558", "0.6229188", "0.6207095", "0.61692715", "0.61570865", "0.6145608", "0.61339843", "0.60997796", "0.6098545", "0.6092657", "0.6085226", "0.6083567", "0.60658973", "0.6062517", "0.60578734", "0.6055739", "0.6047762", "0.6021646", "0.6012883", "0.60120934", "0.60120934", "0.6011198", "0.60094994", "0.59973454", "0.599367", "0.5992037", "0.59890616", "0.598259", "0.5959343", "0.59538454", "0.5950832", "0.59464604", "0.59405035", "0.5933654", "0.5926804", "0.59198934", "0.5911495", "0.5905699", "0.59048295", "0.59041744", "0.589894", "0.5896556", "0.5894485", "0.58748186", "0.5863224", "0.58624935", "0.58619845", "0.58542436", "0.5849535", "0.5827586", "0.5826082", "0.58139944", "0.5786807", "0.57687724", "0.5768277", "0.5764176", "0.576319", "0.5752358", "0.5747254", "0.5745874", "0.57387227", "0.5738461", "0.5737201", "0.5728473", "0.57262045", "0.57249236", "0.5723925", "0.57232463", "0.5714161", "0.57103455", "0.57090336", "0.5707313", "0.5702784", "0.5698398", "0.56933963", "0.5687352", "0.5681599", "0.56807464", "0.5676295" ]
0.0
-1
Takes input ends of all feed pipes and feeds odd numbers starting from low until high both inclusive. in a round robin fashion. process ends by feeding 1 to all pipes. 1 is a sentinel value.
def distributor(ls_feed_pipe_open,low,high): def getNumber(low,high): i = low if i%2 == 0: #if i is even, then start from i+1 odd. i += 1 while i<=high: yield i i+=2 #no need to check for even numbers, so skip it here at begining yield -1 #when generator yields -1, it reached high, so terminate next_pipe = 0 number = getNumber(low,high) while True: msg = next(number) if msg == -1: #to check when generator reached high. break else: #feed pipes in a round robin fashion, #so that over time each generatePrime process experiences same load. ls_feed_pipe_open[next_pipe].send(msg) next_pipe += 1 if next_pipe == len(ls_feed_pipe_open): next_pipe = 0 for p in ls_feed_pipe_open: p.send(-1) #-1 is sentinel value for all generatePrime processs return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def odd():\n num = 0\n while True:\n yield num * (num & 1)\n num += 1", "def infinite_odd_generator():\n current = 1\n while True:\n yield current\n current = current + 2", "def input_pipe():\n x = ''\n while True:\n x = yield x\n yield # to keep the generator in lock step with input", "def odd_generator(limit):\n current = 1\n while current < limit:\n yield current\n current = current + 2", "def fission_pipes():\n def _pipes(num):\n return [base.BasePipe(i) for i in range(1, num + 1)]\n yield _pipes\n base.reset()", "def stage1(self):\n n = self.min\n while True:\n n, bin_ = self.sort_to_bin(n)\n if n is None:\n n = self.get_new_n(bin_)\n if n is None:\n break\n if self.viz:\n yield", "def testNumberPipeTwoLines(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4')\n self.assertFalse(pl.inPipeline)\n self.assertEqual(4, pl.stdin)\n repl.runCommandLine('')\n self.assertEqual(4, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)", "def generatePrime(ls_primes, feed_pipe,return_dict):\n local_primes = []\n while True:\n n = feed_pipe.recv()\n if n == -1: # sentinel given by distributor.\n break\n else:\n is_prime = True\n\n ##check for divisibility\n ## no need to check for 2 since all are odd numbers\n for prime in ls_primes[1:]:\n if n%prime == 0:\n is_prime = False\n break\n\n ##if the number is prime, append to global list\n if is_prime:\n local_primes.append(n)\n if len(local_primes) >0:\n return_dict[os.getpid()] = local_primes\n return return_dict\n return 0", "def io_pipe():\n r_fd, w_fd = os.pipe()\n with io.open(r_fd, 'rb', 0) as r, \\\n \t io.open(w_fd, 'wb', 0) as w:\n \tyield r, w", "def fission_pipe():\n yield base.BasePipe(1)\n base.reset()", "def run(self):\n assert len(self.elements) >= 2, \"In order flow, pipe needs 2 or more elements\"\n in_pipe = self.elements[0]\n other_pipes = self.elements[1:-1]\n out_pipe = self.elements[-1]\n\n self.make_assertions(in_pipe, other_pipes, out_pipe)\n\n for data in in_pipe.grasp():\n write = True\n\n for element in other_pipes:\n if isinstance(element, elements.DataPypElement):\n data = element.extend(data)\n elif isinstance(element, elements.FilterPypElement):\n if not element.stay(data):\n write = False\n break\n if write:\n out_pipe.extract(data)", "def pipemeter(cmd1, cmd2):\n\n proc1 = subprocess.Popen(cmd1, bufsize=0, shell=True, stdout=subprocess.PIPE)\n proc2 = subprocess.Popen(cmd2, bufsize=0, shell=True, stdin=subprocess.PIPE)\n bytes_piped = 0\n\n while True:\n data = proc1.stdout.read(CHUNKSIZE)\n length = len(data)\n if length == 0:\n break\n\n written = proc2.stdin.write(data)\n if written != length:\n raise RuntimeError(\"Write failed, wanted to write: {}, written={}\".format(length, written))\n\n bytes_piped += length\n\n proc1.stdout.close()\n proc2.stdin.close()\n\n return proc1.wait(), proc2.wait(), bytes_piped", "def run(self, data, rewrap=False, prefetch=0):\n if rewrap:\n data = [data]\n\n for pipe in self._pipes:\n pipe.feed(data)\n data = pipe\n else:\n iterable = self._prefetch_callable(data, prefetch) if prefetch else data\n for out_data in iterable:\n yield out_data", "def test_1_single_process():\n\n # ********************************************************\n # We will put this function in its own thread in test_1()\n def put_data_in_stream(stream):\n num_steps=5\n step_size=4\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n stream.extend(data)\n run()\n return\n\n # ********************************************************\n # We will put these lines in a separate process in test_1()\n x = Stream('x')\n y = Stream('y')\n double(x, y)\n\n # *********************************************************\n # We will put these lines in a separate process in test_1().\n s = Stream(name='s')\n increment(y, s)\n print_stream(s, name=s.name)\n\n # *********************************************************\n # This function is executed in a separate thread in test_1().\n put_data_in_stream(x)", "def test_1_single_process():\n\n # ********************************************************\n # We will put this function in its own thread in test_1()\n def put_data_in_stream(stream):\n num_steps=5\n step_size=4\n for i in range(num_steps):\n data = list(range(i*step_size, (i+1)*step_size))\n stream.extend(data)\n run()\n return\n\n # ********************************************************\n # We will put these lines in a separate process in test_1()\n x = Stream('x')\n y = Stream('y')\n double(x, y)\n\n # *********************************************************\n # We will put these lines in a separate process in test_1().\n s = Stream(name='s')\n increment(y, s)\n print_stream(s, name=s.name)\n\n # *********************************************************\n # This function is executed in a separate thread in test_1().\n put_data_in_stream(x)", "def get_pipes(self, num = 1):\n if self.api is None:\n self.api = ChessAPI(self)\n self.api.start()\n return [self.api.create_pipe() for _ in range(num)]", "def testNumberPipeOneLine(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('4 |')\n self.assertAlmostEqual(4, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)", "def task_10_generator_of_simple_numbers() -> Generator[int, None, None]:\n def is_num_simple(n):\n \"\"\"\n Return: True if n is a simple number or False if it is not\n \"\"\"\n for i in range(n, 1, -1):\n if n % i == 0 and i < n and n != 1:\n return False\n return True\n\n # generator part\n n = 2\n while n < 200:\n if is_num_simple(n):\n yield n\n n = n + 1", "def next_p2 (num):\n rval = 1\n while rval<num:\n rval <<= 1\n return rval", "def run_alternate(bandit1:SlotMachine, bandit2: SlotMachine, num_pulls_to_do):\n using = bandit1\n other = bandit2\n\n num_pulls_so_far = 0\n winning_count = 0\n while num_pulls_so_far < num_pulls_to_do:\n did_I_win = using.pull()\n num_pulls_so_far += 1\n winning_count += did_I_win # an alternative to if did_I_win: winning_count += 1\n\n temp = using\n using = other\n other = temp\n\n return winning_count", "def part_two(data: List[int]) -> int:\n app = IntCodeApplication(data, name=\"BOOST Part II\", flexible_memory=True)\n app.stdin.put(2)\n app.run()\n return app.stdout.get()", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def test_io_in_out_loop(self):\n self.l.output(conf_io=0x1, state_io=0x0)\n for i in range(10):\n state_d, state_io, count = self.l.output(state_io=0x1)\n self.assertTrue(state_io & 0x2)\n state_d, state_io, count = self.l.output(state_io=0x0)\n self.assertTrue(not state_io & 0x2)", "def incr(n=1):\n for i in xrange(n):\n pulse_hi(INCR)", "def next_p2(num):\n rval = 1\n while rval < num:\n rval <<= 1\n return rval", "def inout(input_, output_):\n while True:\n chunk = input_.read(1024)\n if not chunk:\n break\n output_.write(chunk)", "def split_into_steps(processes, input_limit=None, input_liquid_limit=None, belt_type='blue'):\n\tdef limit(item, input=False):\n\t\tif input and is_liquid(item) and input_liquid_limit is not None:\n\t\t\treturn input_liquid_limit\n\t\telif input and not is_liquid(item) and input_limit is not None:\n\t\t\treturn input_limit\n\t\telse:\n\t\t\treturn line_limit(item, belt_type)\n\n\tresults = []\n\tinputs = []\n\tfor process in processes.values():\n\t\tsteps = max(\n\t\t\t[\n\t\t\t\tthroughput / limit(item, process.is_input)\n\t\t\t\tfor item, throughput in process.inputs().items()\n\t\t\t] + [\n\t\t\t\tthroughput / limit(item, process.is_input)\n\t\t\t\tfor item, throughput in process.outputs().items()\n\t\t\t]\n\t\t)\n\n\t\t# note steps is fractional. by dividing original throughput by perfect number of steps,\n\t\t# each such step would be maximal - the problem is there would need to be a fractional\n\t\t# step at the end. So we put down floor(steps) maximal steps, followed by a step\n\t\t# scaled down to represent the fractional step.\n\t\twhole_steps, leftover = divmod(steps, 1)\n\t\tmaximal_step = process.rescale(process.throughput / steps)\n\t\tfractional_step = maximal_step.rescale(maximal_step.throughput * leftover)\n\n\t\tpart = [maximal_step] * whole_steps\n\t\tif leftover:\n\t\t\tpart.append(fractional_step)\n\n\t\tif process.is_input:\n\t\t\tinputs += part\n\t\telse:\n\t\t\tresults += part\n\n\treturn results, inputs", "def infinite_increment():\n i = 0\n while 1:\n yield i\n i += 1", "def _limit_helper(stream: Union[BinaryIO, Generator, List], limit: int) -> Generator:\n for value in stream:\n yield value\n if limit == 1:\n return\n else:\n limit = limit - 1 # FIXME", "def pipeline(filters):\n pipe = partial(reduce, lambda acc, f: f(acc), filters)\n bil = bilateral()\n\n def procme(img):\n img = bil(img)\n return pipe(img)\n\n return lambda img: map(procme, [img[:, :, 0], img[:, :, 1], img[:, :, 2]])", "def _LessPipe():\n try:\n # pylint: disable=unexpected-keyword-arg\n proc = subprocess.Popen(['less'],\n stdin=subprocess.PIPE,\n stdout=sys.stdout,\n encoding='utf-8')\n yield proc.stdin\n proc.stdin.close()\n proc.wait()\n except IOError:\n pass # Happens when less is quit before all data is written.\n except KeyboardInterrupt:\n pass # Assume used to break out of less.", "def test_pipe2():\n parser = CmdParser([posandtwo, valprog])\n out = parser.parse(\"posandtwo | valprog | posandtwo\")\n assert isinstance(out[0], ProgramNode)\n assert isinstance(out[1], PipeNode)\n assert isinstance(out[2], ProgramNode)\n assert isinstance(out[3], PipeNode)\n assert isinstance(out[4], ProgramNode)\n assert isinstance(out[5], EndOfCommandNode)", "def skip(n):\n\n if n >= 0:\n @filters\n def _dagpype_internal_fn_act_p(target):\n remaining = n\n try:\n while True:\n e = (yield)\n if remaining == 0:\n target.send(e)\n continue\n t = e.shape[0]\n if t > remaining:\n target.send(e[remaining :])\n remaining = 0\n else:\n remaining -= t\n except GeneratorExit:\n target.close()\n\n return _dagpype_internal_fn_act_p\n\n @filters\n def _dagpype_internal_fn_act_n(target):\n m = -n\n pending = collections.deque([])\n try:\n while True:\n pending.append((yield))\n while len(pending) > 0:\n first = pending.popleft()\n if sum((e.shape[0] for e in pending)) >= m: \n target.send(first)\n else:\n pending.appendleft(first)\n break\n except GeneratorExit:\n if sum((e.shape[0] for e in pending)) < m:\n target.close()\n return\n while m > 0:\n e = pending.pop()\n if e.shape[0] < m:\n m -= e.shape[0]\n else:\n e = e[: e.shape[0] - m]\n if e.shape[0] > 0:\n pending.append(e)\n break\n while len(pending) > 0:\n e = pending.pop()\n target.send(e)\n target.close()\n\n return _dagpype_internal_fn_act_n", "def stream_handler(args_dict: dict):\n\n color_sequence = args_dict['color_sequence']\n color_seq_len = args_dict['color_seq_len']\n color_itr = args_dict['color_itr']\n n_leds = args_dict['n_leds']\n\n step_sequence = [color_sequence[c % color_seq_len] for c in range(color_itr, n_leds + color_itr)]\n\n # Updating step for the next iteration.\n args_dict['color_itr'] = (color_itr + 1) % color_seq_len\n\n return step_sequence", "def next(self, *input):\n self.log.info(\"Starting next for task %s\" % self.__class__.__name__)\n\n self.comm.Barrier()\n\n # This should only be called once.\n try:\n if self.done:\n raise pipeline.PipelineStopIteration()\n except AttributeError:\n self.done = True\n\n # Extract a list of the tags for all input arguments\n input_tags = [\n (\n str(icont.attrs.get(\"tag\"))\n if isinstance(icont, memh5.MemDiskGroup)\n else \"\"\n )\n for icont in input\n ]\n\n # Process input and fetch output\n if self._no_input:\n if len(input) > 0:\n # This should never happen. Just here to catch bugs.\n raise RuntimeError(\"Somehow `input` was set.\")\n output = self.process()\n else:\n output = self.process(*input)\n\n # Return immediately if output is None to skip writing phase.\n if output is None:\n return\n\n # Insert the input tags into the output container\n output.attrs[\"input_tags\"] = input_tags\n\n output = self._process_output(output)\n\n # Increment internal counter\n self._count = self._count + 1\n\n self.log.info(\"Leaving next for task %s\" % self.__class__.__name__)\n\n # Return the output for the next task\n return output", "def power_readings():\n chain = [sin(x / (XMAX * 0.1)) * 0.1 + 0.6 for x in range(0, XMAX + 1)]\n cnt = 0\n\n def next():\n nonlocal chain, cnt\n next_reading = chain[cnt % len(chain)]\n cnt += 1\n return next_reading\n\n return next", "def inner_pipe (linkp, pn, dt, links1, links2, utype, dtype, p,\n H0, V0, H, V, H10, V10, H20, V20, pump, valve,\n friction, dVdt, dVdx,\n dVdt10, dVdx10, dVdt20, dVdx20):\n\n # Properties of current pipe\n g = 9.8 # m/s^2\n link1 = [p[abs(i)-1] for i in links1]\n link2 = [p[abs(i)-1] for i in links2]\n n = linkp.number_of_segments # spatial discretization\n\n # inner nodes\n if friction == 'steady':\n H[1:-1], V[1:-1] = inner_node_steady(linkp, H0, V0, dt, g)\n elif friction == 'quasi-steady':\n H[1:-1], V[1:-1] = inner_node_quasisteady(linkp, H0, V0, dt, g)\n else:\n H[1:-1], V[1:-1] = inner_node_unsteady(linkp, H0, V0, dt, g,\n dVdx, dVdt)\n\n # Pipe start\n V1 = V10; H1 = H10 #list\n V2 = V0[1]; H2 = H0[1]\n dVdx1 = dVdx10 ; dVdt1 = dVdt10\n dVdx2 = dVdx[0]; dVdt2 = dVdt[1]\n\n if utype[0] == 'Pipe':\n if linkp.start_node.transient_node_type == 'SurgeTank':\n shape = linkp.start_node.tank_shape\n H[0], V[0], Qs = surge_tank(shape, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.start_node.water_level = H[0]\n linkp.start_node.tank_flow = Qs\n elif linkp.start_node.transient_node_type == 'Chamber':\n shape = linkp.start_node.tank_shape\n H[0], V[0], Qs, zp = air_chamber(shape, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.start_node.water_level = zp\n linkp.start_node.tank_flow = Qs\n else:\n elev = linkp.start_node.elevation\n emitter_coeff = linkp.start_node.emitter_coeff + linkp.start_node.demand_coeff\n block_per = linkp.start_node.block_per\n H[0], V[0] = add_leakage(emitter_coeff, block_per, link1, linkp, elev,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif utype[0] == 'Pump':\n pumpc = pump[0]\n H[0], V[0] = pump_node(pumpc, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif utype[0] == 'Valve':\n valvec = valve[0]\n H[0], V[0] = valve_node(valvec, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n # Pipe end\n V1 = V0[n-1]; H1 = H0[n-1]\n V2 = V20; H2 = H20\n dVdx1 = dVdx[n-1] ; dVdt1 = dVdt[n-1]\n dVdx2 = dVdx20; dVdt2 = dVdt20\n if dtype[0] == 'Pipe':\n if linkp.end_node.transient_node_type == 'SurgeTank':\n shape = linkp.end_node.tank_shape\n H[n], V[n], Qs = surge_tank(shape, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.end_node.water_level = H[n]\n linkp.end_node.tank_flow = Qs\n elif linkp.end_node.transient_node_type == 'Chamber':\n shape = linkp.end_node.tank_shape\n H[n], V[n], Qs,zp = air_chamber(shape, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.end_node.water_level = zp\n linkp.end_node.tank_flow = Qs\n else:\n elev = linkp.end_node.elevation\n emitter_coeff = linkp.end_node.emitter_coeff + linkp.end_node.demand_coeff\n block_per = linkp.end_node.block_per\n H[n], V[n] = add_leakage(emitter_coeff, block_per,linkp, link2, elev,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif dtype[0] == 'Pump':\n pumpc = pump[1]\n H[n], V[n] = pump_node(pumpc, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif dtype[0] == 'Valve':\n valvec = valve[1]\n H[n], V[n] = valve_node(valvec, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n return H, V", "def run(outs, ins_filter='/dev/ttyUSB.*', newport=lambda conn: None, write_queue=None):\r\n data_queue = multiprocessing.Queue()\r\n\r\n multiprocessing.Process(\r\n target=writer,\r\n args=(data_queue, write_queue, outs)\r\n ).start()\r\n\r\n readers = {}\r\n\r\n while True:\r\n\r\n for (path, _, _) in serial.tools.list_ports.grep(ins_filter):\r\n\r\n if path not in readers.keys() or not readers[path].is_alive():\r\n\r\n readers[path] = multiprocessing.Process(\r\n target=reader, args=(data_queue, path, newport)\r\n )\r\n readers[path].start()", "def interleave_bits(odd, even):\n val = 0\n max0 = max(odd, even)\n n = 0\n while (max0 > 0):\n n += 1\n max0 >>= 1\n for i in xrange(n):\n bitMask = 1 << i\n a = (1 << (2*i)) if (even & bitMask) > 0 else 0\n b = (1 << (2*i+1)) if (odd & bitMask) > 0 else 0\n val += a + b\n return val", "def Barrier(nprocesses, signalIN, signalOUT):\t\n\twhile True:\n\t\tfor i in range (0,nprocesses):\n\t\t\tsignalIN()\n\t\tfor i in range (0,nprocesses):\n\t\t\tsignalOUT(0)", "def readlines(self, n = None):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\tresult = []\n\t\twhile n == None or n > 0:\n\t\t\tline = self.readline()\n\t\t\tif not line: return result\n\t\t\tresult.append(line)\n\t\t\tn -= 1\n\t\treturn result", "def _worker(pipelines: List[Pipeline], source: Queue, sink: Queue):\n pipelines = list(pipelines)\n for i, p in enumerate(pipelines):\n if isinstance(p, ConvertT2S):\n pipelines[i] = ConvertT2S()\n\n def processor(article):\n for p in pipelines:\n article = p(article)\n return article\n\n while True:\n article = source.get()\n if article == 'EXIT':\n return\n article = list(processor(article))\n sink.put(article)", "def xorRange(num: int, floor: int, higher: int) -> Generator[Tuple[int, int], None, None]:\r\n assert 0 <= floor, \"floor must be non-negative\"\r\n assert 0 <= num, \"num must be non-negative\"\r\n\r\n digit = 0\r\n while floor < higher:\r\n if floor & 1:\r\n yield (floor ^ num) << digit, ((floor ^ num) + 1) << digit\r\n floor += 1\r\n if higher & 1:\r\n higher -= 1\r\n yield (higher ^ num) << digit, ((higher ^ num) + 1) << digit\r\n floor >>= 1\r\n higher >>= 1\r\n num >>= 1\r\n digit += 1", "def consume(pipeline, seq):\n try:\n for value in seq:\n pipeline.send(value)\n except StopConsumption:\n pass", "def odd_only(CC):\n if CC % 2 == 0 & CC > 0:\n CC -= 1\n if CC < 0:\n CC = 0\n while CC % 2 == 1:\n yield CC\n CC -= 2\n if CC == -1:\n break", "def run(self):\r\n for pipe in self.inputs:\r\n for row in pipe.rows():\r\n self.put(row)", "def broken_pipe_handler(func: Callable[[List[str]], int], arguments: List[str]) -> int:\n try:\n returncode = func(arguments)\n sys.stdout.flush()\n except BrokenPipeError:\n devnull = os.open(os.devnull, os.O_WRONLY)\n os.dup2(devnull, sys.stdout.fileno())\n # Convention is 128 + whatever the return code would otherwise be\n returncode = 128 + 1\n return returncode", "def __call__(self, input=None): # pragma: no cover\n while False:\n yield None", "def processes(start, end, processes):\n end_things = [processes[x][2] for x in range(len(processes))]\n if start == end or end not in end_things:\n return []\n\n seq = []\n seen = 0\n inp = ''\n out = ''\n do = ''\n \n for i in range(len(processes)):\n if processes[i][2] == end:\n out = processes[i][2]\n inp = processes[i][1]\n do = processes[i][0]\n seq.append(do)\n seen += 1\n break\n\n while seen < len(processes):\n for i in range(len(processes)):\n if processes[i][2] == inp:\n out = processes[i][2]\n inp = processes[i][1]\n do = processes[i][0]\n seq.append(do)\n seen += 1\n\n seq.reverse()\n return seq", "def zpipe(ctx):\n a = ctx.socket(zmq.PAIR)\n b = ctx.socket(zmq.PAIR)\n a.linger = b.linger = 0\n a.hwm = b.hwm = 1\n iface = f\"inproc://{binascii.hexlify(os.urandom(8))}\"\n a.bind(iface)\n b.connect(iface)\n return a, b", "def better_grouper(inputs, n):\n iters = [iter(inputs)] * n\n return zip(*iters)", "def microbit_process(pipe):\n gamepad_listener = MicroBitListener(pipe)\n gamepad_listener.listen()", "def parity(it):\n \n return sum(it)%2", "def __rshift__(self, next: 'IO[TResult]') -> 'IO[TResult]':\n return self.bind(lambda _: next)", "def rm_odd_numbers(int_list):\n output = []\n\n for item in int_list:\n if not item % 2:\n output.append(item)\n\n return output", "def _split(self):\r\n \r\n temp = [self.upstream.demand]\r\n for item, p in zip(self.downstream, self.priority):\r\n temp.append(item.supply/p)\r\n \r\n flow = min(temp) # total flow\r\n \r\n self.upstream.outflow = flow\r\n \r\n for item, p in zip(self.downstream, self.priority):\r\n item.inflow = p * flow", "def readValues(n):\n for i in xrange(n):\n yield int(raw_input().strip())", "def readValues(n):\n for i in xrange(n):\n yield int(raw_input().strip())", "def test_pipe_simple():\n\n def transform(array):\n \"\"\"Turns the (n,2) array into a (n,4) array.\"\"\"\n assert array.shape == (10, 2)\n new = Array(columns=\"abcd\")\n for x, y in array:\n new.append([x, y, x + y, x * y])\n return new\n\n group = Pipe(Group({\"a\": Numerical(), \"b\": Numerical()}), transform)\n for _ in range(10):\n group.set_a(1e-6 + random())\n group.set_b(1e-6 + random())\n group.push()\n\n array = group.array()\n assert array.shape == (10, 4)\n\n for row in array:\n assert row[0] > 0.0 and row[1] > 0.0\n assert row[2] == row[0] + row[1]\n assert row[3] == row[0] * row[1]", "def output_to_pipe(pipe_in):\n os.dup2(pipe_in, 1) # stdout\n # os.dup2(pipe_in, 2) # stderr", "def sort_012(input_list):\n i = 0\n next_0 = 0\n next_2 = len(input_list) - 1\n\n while i <= next_2:\n if input_list[i] == 0:\n input_list[i] = input_list[next_0]\n input_list[next_0] = 0\n next_0 += 1\n i += 1\n elif input_list[i] == 2:\n input_list[i] = input_list[next_2]\n input_list[next_2] = 2\n next_2 -= 1\n else:\n i += 1\n\n return input_list", "def piped(self):\n\t\tpass", "def process(self):\n\n count = 0\n total = 0\n\n while total < 200 and count < 10:\n digits = self._stream.read(2)\n if len(digits) < 2:\n break\n \n number = int(digits)\n \n total += number\n \n count += 1\n\n return count", "def pair_stream(n):\n n_str = str(n)\n if len(n_str) % 2 != 0:\n n_str = '0' + n_str\n pairs = textwrap.wrap(n_str, 2)\n digit_pairs = [int(p) for p in pairs]\n return itertools.chain(digit_pairs, itertools.repeat(0))", "def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()", "def demoChained():\n \n class ChainingSOFP(StreamingOutputFormattingProcess):\n def onStdoutLine(self, tag, line):\n super().onStdoutLine(tag, \"[{}] {}\".format(tag, line.decode()).encode())\n\n def onStderrLine(self, tag, line):\n super().onStderrLine(tag, \"[{}] {}\".format(tag, line.decode()).encode())\n if tag == \"main\" and b\"trigger\" in line:\n cmdChained = \"bash -c 'echo chained stdout && sleep 2 && echo stderr chained 1>&2 && sleep 1 && echo done chained'\"\n self.run(cmdChained, \"chained\")\n\n cmdInitial = \"bash -c 'echo stdout && sleep 1 && echo trigger chained 1>&2 && sleep 1 && echo more output && sleep 1 && echo done && exit 3'\"\n Spec = ChainingSOFP.OutputSpec\n sofp = ChainingSOFP(Spec(\"STDOUT: {}\"), Spec(\"STDERR: {}\", sys.stderr))\n status = sofp.run(cmdInitial, \"main\")\n print(\"Initial finished with status: {}.\".format(status))", "def read(self, n = None):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\tsaved = self._state.read(n)\n\t\tif n == None:\n\t\t\treturn saved + pservlet.pipe_read(self._pipe_desc)\n\t\telif n == len(saved): \n\t\t\treturn saved\n\t\telse:\n\t\t\treturn saved + pservlet.pipe_read(self._pipe_desc, n - len(saved))", "def next ( num = 1 ) :\n return run ( num )", "def stream(_) -> int:\n return 1 << 9", "def stream(_) -> int:\n return 1 << 9", "def cmd_iter(cmd):\n\n def thread_enqueue(label, f, q):\n t = threading.Thread(target=enqueue_output, args=(label, f, q))\n t.daemon = True ## thread dies with the program\n t.start()\n return t\n\n def enqueue_output(label, out, queue):\n prev_line = None\n for line in out.read():\n if prev_line is not None:\n queue.put((label, \"%s\\n\" % prev_line))\n prev_line = line\n # print(\"%s: %r\" % (label, line))\n # print(\"END of %s\" % (label, ))\n if prev_line:\n queue.put((label, prev_line))\n out.close()\n\n proc = Proc(cmd)\n proc.stdin.close()\n q = Queue()\n t1 = thread_enqueue(\"out\", proc.stdout, q)\n t2 = thread_enqueue(\"err\", proc.stderr, q)\n running = True\n while True:\n try:\n yield q.get(True, 0.001)\n except Empty:\n if not running:\n break\n proc.poll()\n running = proc.returncode is None or \\\n any(t.is_alive() for t in (t1, t2))\n\n # print(\"%s: %r\" % (\"errlvl\", proc.returncode))\n yield \"errorlevel\", proc.returncode", "def make_paired_end_reads(sequence):\n \n R1 = sequence[0:n]\n R2 = sequence[len(sequence) - n:len(sequence)]\n\n #one reads are reverse complement, so make reverse complement of R2\n R2 = make_reverse_complement(R2)\n\n return [R1, R2]", "def _pipe_and_accumulate(val, fns):\n for fn in fns:\n val = fn(val)\n yield val", "def partition(seq):\n\n return 0", "def unroll_stream(\n stream: Generator, skip_first: bool = False, pbar: Union[bool, Sequence[Any]] = True\n) -> Any:\n # init\n obs = next(stream)\n\n obs_flat, treedef = tree_flatten(obs)\n num_leaves = len(obs_flat)\n\n # stream_scan\n def _init_outputs():\n if skip_first:\n return [[]] * num_leaves\n else:\n return list(map(lambda x: [x], obs_flat))\n\n outputs = _init_outputs()\n\n if pbar:\n stream = tqdm(stream, desc=\"stream_unroll\")\n\n for obs in stream:\n obs_flat = tree_leaves(obs)\n assert len(obs_flat) == num_leaves\n for y, x in zip(outputs, obs_flat):\n y.append(x)\n\n # stack outputs\n for i in range(num_leaves):\n outputs[i] = onp.stack(outputs[i])\n\n # transpose outputs\n return tree_unflatten(treedef, outputs)", "def Piping(T_in, p_in, m_dot, d_inner, l_pipe, f, epsilon_pipe, T_shield, N):\r\n\r\n ## Estimation of the influence of the arcs\r\n # Calculation according to VDI Heatatlas 2013\r\n # Assumption isoenthalpic flow\r\n state_Arc = FlowRestriction(T_in, p_in, m_dot, d_inner, f)\r\n p_Arc = state_Arc.get(\"p\")\r\n T_Arc = state_Arc.get(\"T\")\r\n\r\n ## Estimation of the influence of thermal radiation on the compressible flow\r\n\r\n # Emission coefficent for an enclosed vessel\r\n # Assuming much bigger hot surface -> emissivity of hot surface doesnt matter anymore, just the cold one\r\n # Thus the simple equation can be used\r\n q_pipe = epsilon_pipe * sp.constants.Stefan_Boltzmann * (T_shield**4 - T_Arc**4) #W\r\n\r\n # Calling of the function SimplePipe\r\n state_out = SimplePipe(T_Arc, p_Arc, m_dot, d_inner, l_pipe, N, 0, q_pipe)\r\n #Transfer results\r\n p_out = state_out.get(\"p\")\r\n T_out = state_out.get(\"T\")\r\n h_out = state_out.get(\"h\")\r\n state_out = {\"h\": h_out, \"T\": T_out, \"p\": p_out}\r\n\r\n return state_out", "def process(self, count):\n self.data.add_node(0)\n for index in range(1, count + 1):\n # print(\"{}.: {}\".format(index, self.data))\n self.data.move_circular(self.stepforward)\n self.data.add_node(index)\n return self.data.get_next()", "def main():\n dt = DropToken()\n play = True\n while play:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n if not line:\n break\n play = dt.inputProcess(line)", "def generator(factor, current, condition=0):\n while True:\n next_current = factor * current % 2147483647\n if condition == 0 or (next_current % condition == 0):\n yield next_current\n current = next_current", "def get_even_numbers(x, stop, z):\r\n result = []\r\n counter = 0\r\n while len(result) is not x:\r\n if counter % 2 == 0 and counter < stop and counter // z:\r\n result.append(counter)\r\n counter += 1\r\n return result", "def e_seq():\n yield 2;\n for n in count(2, 2):\n yield 1\n yield n\n yield 1", "def pipe(*functions):\n\n return reduce(compose, functions, identity)", "def task5(count):\n number_1, number_2 = 1, 1\n for _ in range(count):\n yield number_1\n number_1, number_2 = number_2, number_1 + number_2", "def connect_pipes(input, output, service=VoidService, config={}):\n return connect_stream(PipeStream(input, output), service=service, config=config)", "def _order_streams(in1, in2, out1, out2):\n if in1.T < in2.T:\n return in1, in2, out1, out2\n else:\n return in2, in1, out2, out1", "def pipes(self):\n for name in self._pipes:\n yield name, self._data[name]", "def progression(first_item:int, amount:int,func):\n item = first_item\n count = 0\n stop = False\n while count < amount and not stop:\n stop = yield item\n item = func(item)\n count += 1", "def ring_filter(data):\n out = 1\n for src in data:\n out *= src\n return out", "def pow2(limit):\n i = 0\n bin_num = 1\n while bin_num <= limit:\n yield bin_num\n i += 1\n bin_num = 2 ** i", "def step(nodes, outputs, edges):\n flowed = []\n for node_name in nodes.copy():\n if node_name in flowed:\n continue\n if len(nodes[node_name]) == 2:\n if node_name in flowed:\n continue\n node = [int(value) for value in nodes[node_name]]\n low_value, high_value = min(node), max(node)\n low_flow, high_flow = edges[node_name] \n low_dictionary, low_node_name = low_flow\n high_dictionary, high_node_name = high_flow\n low_node = low_dictionary.get(low_node_name, tuple())\n high_node = high_dictionary.get(high_node_name, tuple())\n low_dictionary[low_node_name] = low_node + (str(low_value),)\n high_dictionary[high_node_name] = high_node + (str(high_value),)\n nodes[node_name] = tuple()\n if low_dictionary is nodes:\n flowed.append(low_node_name)\n if high_dictionary is nodes:\n flowed.append(high_node_name)\n return nodes, outputs, edges", "def test_pipe():\n parser = CmdParser([posandtwo, valprog])\n out = parser.parse(\"posandtwo | valprog\")\n assert isinstance(out[0], ProgramNode)\n assert out[0].program_desc == posandtwo\n assert isinstance(out[1], PipeNode)\n assert isinstance(out[2], ProgramNode)\n assert out[2].program_desc == valprog\n assert isinstance(out[3], EndOfCommandNode)", "def mapper():\n\n for line in sys.stdin:\n data = line.strip().split(',')\n if len(data) != 22 or data[5] != 'REGULAR':\n continue\n print('{}\\t{}'.format(data[1], data[6]))", "def binario(num):\n\tbi = []\n\tnum = abs(int(num))\n\twhile num >0:\n\t\tres = num % 2\n\t\tbi.append(res)\n\n\t\tnum = num //2\n\n\treturn bi", "def processData(pipe, event, pulses):\n logging.info(\"Started processData\")\n start_time = time.time()\n for pulse in range(pulses):\n samples = pipe.get()\n# time.sleep(0.001)\n end_time = time.time()\n elapsed = end_time - start_time\n samplesProcessed = (pulses * len(samples[0]) * len(samples))\n logging.info(\"processData processed %d Msamples in %.3f s\",\n samplesProcessed / 1e6,\n elapsed)\n logging.info(\"processData rate: %.3f Msa/s in lumps of %d samples\",\n samplesProcessed / elapsed / 1e6,\n dig.pointsPerCycle)", "def part2():\n\n program = IntCodeProcessor.load_program('day13input.txt')\n program[0] = 2\n cpu = IntCodeProcessor(program)\n result = None\n next_input = None\n ball_pos = None\n paddle_pos = None\n score = None\n while result is None:\n try:\n result = cpu.execute_program(next_input, reset=False)\n except ExecutionError as err:\n assert err.reason == ExecutionCode.NEED_INPUT\n\n ball_pos, paddle_pos, score = process_output(cpu.outputs, ball_pos, paddle_pos, score)\n cpu.outputs = []\n next_input = next_input_for(ball_pos, paddle_pos)\n print(f'Part 2 answer: {score}')", "def algo(a: int, b: int) -> int:\n\n while b != 0:\n a, b = b, a % b\n return a", "def even_odd_sums(seq):\n even = seq[0::2]\n odd = seq[1::2]\n return [sum(even), sum(odd)]", "def stream():\n while True:\n yield random_point()", "def range_with_status(total):\n n = 0\n while n < total:\n done = '#' * (n+1)\n todo = '-'*(total-n-1)\n s = '<{0}>'.format(done+todo)\n if not todo:\n s += '\\n'\n if n > 0:\n s = '\\r'+s\n print(s, end='')\n yield n\n n += 1" ]
[ "0.5789352", "0.5761657", "0.5741719", "0.564393", "0.52422297", "0.51917", "0.50984246", "0.50861835", "0.5054945", "0.50331575", "0.50145054", "0.5011917", "0.50026035", "0.49603093", "0.49603093", "0.4935153", "0.49219003", "0.49094537", "0.4875977", "0.4841634", "0.48397136", "0.4836972", "0.4836972", "0.4836903", "0.4833675", "0.4786137", "0.4757106", "0.47484192", "0.47403568", "0.47188297", "0.46804073", "0.4680056", "0.46638858", "0.46535045", "0.46494767", "0.4640372", "0.46219885", "0.4616029", "0.4614172", "0.45958653", "0.45601532", "0.45490527", "0.45465764", "0.45464134", "0.45383313", "0.45381302", "0.45289934", "0.45063296", "0.44881156", "0.44873834", "0.44822907", "0.44757962", "0.44757175", "0.44571972", "0.44567752", "0.44527715", "0.4452481", "0.44428644", "0.44428644", "0.44414747", "0.4440443", "0.4440408", "0.4438423", "0.4433434", "0.44272584", "0.44245097", "0.44186425", "0.4409343", "0.44079348", "0.44056123", "0.44056123", "0.4402442", "0.43886682", "0.4387215", "0.43811455", "0.43806687", "0.43659988", "0.43630558", "0.43622518", "0.43620506", "0.43543762", "0.43530148", "0.4351027", "0.4346529", "0.43351042", "0.4334508", "0.43311596", "0.43236965", "0.43235353", "0.43196285", "0.4318981", "0.43175405", "0.43173966", "0.4316531", "0.43110257", "0.43098044", "0.43086824", "0.4307655", "0.43069398", "0.43057218" ]
0.68875015
0
will take numbers sequentially from feed_pipe, verify if it is prime. any primes found will be returned as a dict to main process. dict contains only one key value pair. val is always a list.
def generatePrime(ls_primes, feed_pipe,return_dict): local_primes = [] while True: n = feed_pipe.recv() if n == -1: # sentinel given by distributor. break else: is_prime = True ##check for divisibility ## no need to check for 2 since all are odd numbers for prime in ls_primes[1:]: if n%prime == 0: is_prime = False break ##if the number is prime, append to global list if is_prime: local_primes.append(n) if len(local_primes) >0: return_dict[os.getpid()] = local_primes return return_dict return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def worker(nums, outdict):\n for n in nums:\n outdict[n] = primes2(n)", "def primes():\n D = {} # map composite integers to primes witnessing their compositeness\n q = 2 # first integer to test for primality\n while True:\n if q not in D:\n yield q # not marked composite, must be prime\n D[q*q] = [q] # first multiple of q not already marked\n else:\n for p in D[q]: # move each witness to its next multiple\n D.setdefault(p+q,[]).append(p)\n del D[q] # no longer need D[q], free memory\n q += 1", "def distributor(ls_feed_pipe_open,low,high):\n def getNumber(low,high):\n i = low\n if i%2 == 0: #if i is even, then start from i+1 odd.\n i += 1\n while i<=high:\n yield i\n i+=2 #no need to check for even numbers, so skip it here at begining\n yield -1 #when generator yields -1, it reached high, so terminate\n\n next_pipe = 0\n number = getNumber(low,high)\n while True:\n msg = next(number)\n if msg == -1: #to check when generator reached high.\n break\n else:\n #feed pipes in a round robin fashion,\n #so that over time each generatePrime process experiences same load.\n ls_feed_pipe_open[next_pipe].send(msg)\n next_pipe += 1\n if next_pipe == len(ls_feed_pipe_open):\n next_pipe = 0\n for p in ls_feed_pipe_open:\n p.send(-1) #-1 is sentinel value for all generatePrime processs\n return 0", "def getNums():\n key = allprimes() # Empty list for key is created\n\n # Runs code endlessly as no instruction was\n while True: # given to end the code\n num = input(\"Please enter a number:\") # Changed number to integer as it's outputted\n try: # as a string from input\n selected_num = int(num) # Asked for number with try function\n except:\n print(\"\\n Please input only a number!\") # Only accepts a number\n continue\n if selected_num > 100: # Limits number to 100 as that was limit\n print(\"Please only select a number up to 100.\")\n continue\n if selected_num in key:\n print(\"You have picked a prime number please select another number.\")\n continue\n for i, number in enumerate(key): # Iterator function to run through key\n complementary = selected_num - number # Initiated formula\n if complementary in key[i:]: # Obtained complimentary number if available\n print(str(selected_num) + \" = {} + {}\".format(number, complementary))\n break # Printed values as requested for assignment", "def allprimes():\n\n key = [] # The empty list is initiated\n\n for val in range(2, 101): # Set to obtain all prime values from 2 to 100\n if val >= 2: # They are then stored into the list\n for n in range(2, val): # The values have to be greater than 2 as 1 cannot\n if not (val % n): # be included\n break # Pulls all prime numbers by iterating through them\n else: # If a number does not obtain a remainder that means\n key.append(val) # it cannot be divisable by anything but it's own\n # number it is appended as a prime number\n return key", "def primes(count):\n\n prime_list = []\n num = 2\n\n while count > 0:\n\n if prime_checker(num):\n prime_list.append(num)\n count -= 1\n num += 1\n\n return prime_list", "def primes(count):\n\n prime_numbers = [2]\n next_num = 3 \n\n def is_prime(next_num):\n if next_num % 2 == 0:\n return False \n \n for i in range(3, next_num, 2):\n if next_num % i == 0:\n return False \n return True \n\n while count > len(prime_numbers): \n if is_prime(next_num): \n prime_numbers.append(next_num)\n next_num += 1\n\n return prime_numbers", "def test_prime_12(self):\n\t self.assertTrue(prime_generator(12), [2, 3, 5, 7, 11])", "def test_prime_10(self):\n\t self.assertTrue(prime_generator(10), [2, 3, 5, 7])", "def get_primes(self, startnum=2):\n i = startnum\n while True:\n if self.is_prime(i):\n yield i\n i += 1", "def primes():\n yield 1\n primes = []\n for n in itertools.count(2):\n if not any(n % p == 0 for p in primes):\n # No divisor found among previous primes\n yield n\n primes.append(n)", "def prime_generator():\r\n # map of composites (key) with at least one prime factor in list as value\r\n D = {}\r\n\r\n # first number to test if prime\r\n q = 2\r\n\r\n while 1:\r\n if q not in D:\r\n # next prime found\r\n yield q\r\n # add it's square as a composite to D\r\n D[q**2] = [q]\r\n else:\r\n # update dictionary entries based on composite and its listed primes\r\n for p in D[q]:\r\n D.setdefault(p+q, []).append(p)\r\n del D[q]\r\n q += 1", "def primes():\r\n try:\r\n args = request.args\r\n start_num, end_num = validate_request(args)\r\n # cache key\r\n key = f'primes:{start_num}:{end_num}'\r\n rv = cache.get(key)\r\n if rv is None: # not in cache\r\n job = get_primes_list.queue(start_num, end_num)\r\n print(job.get_id())\r\n cache.set(key, job.get_id(), timeout=3600)\r\n return jsonify(job.get_id()), 200\r\n else:\r\n return jsonify(rv), 200\r\n except Exception as e:\r\n raise InvalidUsage(\"Error Processing request {}\".format(e))", "def gen_primes():\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current\n # number being tested.\n\n D = {}\n\n # The running integer that's checked for primeness\n\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next\n # multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1", "def primes():\n yield 2\n found_primes = [2]\n a = 3\n while True:\n for p in found_primes:\n if p**2 > a:\n found_primes.append(a)\n yield a\n a += 2\n break\n elif a % p == 0:\n a += 2\n break", "def find_prime(num):\n\n if not isinstance(num, int) or isinstance(num, bool):\n raise TypeError(\"number input must be an integer\")\n\n if num <= 1:\n raise ValueError(\"number must be greater than 1\")\n\n pri_num = [2]\n\n # The code below will test if every iteration of 'var' is a prime number\n for var in range(2, num + 1):\n res = 0\n for var2 in pri_num:\n if var == 2:\n break\n elif (var % var2) == 0:\n break\n elif (var2 == pri_num[-1]):\n res = var\n if res:\n pri_num.append(res)\n print(pri_num)\n\n return 0", "def gen_primes():\n\n # Maps composites to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\"\n # indefinitely, but only as long as required by the current number\n # being tested\n\n D = {}\n\n # The runing integer that is checked for primeness\n q = 2\n\n while True:\n if q not in D:\n # q is a new prime.\n # Yield it and mark its first multiple that isn't\n # already marked in previous iterations\n\n yield q\n D[q * q] = [q]\n else:\n # q is composite. D[q] is the list of primes that\n # divide it. Since we've reached q, we no longer\n # need it in the map, but we'll mark the next multiples\n # of its witnesses to prepare for larger numbers\n\n for p in D[q]:\n D.setdefault(p + q, []).append(p)\n del D[q]\n\n q += 1", "def gen_primes():\n\n # Maps composites (=non-primes) to primes witnessing their compositeness.\n # This is memory efficient, as the sieve is not \"run forward\" indefinitely,\n # but only as long as required by the current number being tested.\n D = {}\n\n q = 1 # the running integer that is checked for primeness\n while (q := q+1):\n if q not in D:\n # q is a new prime. Yield it and mark its first multiple that is\n # not already marked in previous iterations\n yield q\n D[q*q] = [q]\n else:\n # q is composite. D[q] is the list of primes that divide it. Since\n # we have reached q, we no longer need it in the map, but we will\n # mark the next multiples of its witnesses to prepare for larger\n # numbers\n for p in D[q]:\n D.setdefault(p+q, []).append(p)\n del D[q]", "def test_prime_2(self):\n\t self.assertTrue(prime_generator(2), [2])", "def primes():\n yield 2\n found = []\n for i in itertools.count(start=3, step=2):\n for p in found:\n if i % p == 0:\n break\n else:\n yield i\n found.append(i)", "def check_prime_worker(job_queue):\n while True:\n # your code here\n # 1. get next available number from queue\n try:\n number = job_queue.get(block=False)\n print(f\"Process {current_process()} checks number {number}\")\n except Empty:\n break\n\n # 2. print the number and whether it\n # is prime or not, use is_prime()\n if is_prime(number):\n print(f\"{number} is prime\")\n else:\n print(f\"{number} is not prime\")\n\n # 3. use try/except to catch Empty exception\n # and quit the loop if no number remains in queue\n # done in step 1", "def primeIterator(no = 0,lessThan = None ):\r\n \r\n prmd = {2:1,3:2}\r\n sqrtn = 2\r\n l = 1\r\n count = 0\r\n #or (no==-1 and not lessThan) l < no or:\r\n print(\"no\", no)\r\n while ((no!=0 and count < no) or ( (no==0) and (lessThan and l<lessThan ) or (not lessThan ) ))and (l<4) :\r\n if l in prmd:\r\n count += 1\r\n yield l\r\n l+=1\r\n l=5\r\n add = 2\r\n \r\n while (no!=0 and count < no) or ( (no==0) and ( (lessThan and l<lessThan ) or (not lessThan )) ) : #check only 6n-1 and 6n+1\r\n if l > sqrtn**2:\r\n sqrtn = l**0.5\r\n for i in prmd:\r\n if i > sqrtn:\r\n prmd[l] = len(prmd)\r\n add = 2 if add==4 else 2\r\n count +=1\r\n yield l\r\n break\r\n if l%i ==0 : \r\n break\r\n l+=add", "def test_15(self):\n\t self.assertTrue(prime_generator(15), [2, 3, 5, 7, 11, 13])", "def getPrime(bits):\n\twhile(True) :\n\t\t# on continue a tirer des nombres tant que l'on n'a pas trouve de nombre premier\n\t\tp = getrandbits(bits)\n\t\tif(miller_rabin(p,100)) :\n\t\t\treturn p", "def prime_generator() -> int:\n \n #Start with the first prime.\n counter = count(2)\n candidate = next(counter)\n cache: list = [candidate]\n yield candidate\n \n # Set a flag.\n divisible = False\n while True:\n candidate = next(counter)\n # Check if the candidate is prime.\n for number in cache:\n # If number is greater than the squareroot of candidate, we are done.\n if number * number > candidate:\n break\n # If number divides candidate, candidate is not prime.\n if candidate % number == 0:\n divisible = True\n break\n # If is is prime, add it to the list.\n if not divisible:\n cache.append(candidate)\n yield candidate\n # Reset the flag.\n divisible = False", "def getPrime(self, group=17):\n default_group = 17\n\n primes = {\n 5: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF,\n 14: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF,\n 15: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF,\n 16: 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF,\n 17:\n 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF,\n 18:\n 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF\n }\n\n if group in primes.keys():\n return primes[group]\n else:\n print(\"Error: No prime with group %i. Using default.\" % group)\n return primes[default_group]", "def primes(count):\n\n # store lst of prime numbers to be returned @ end\n primes = []\n\n # set up prime # generator from other fn starting at 2\n num = 2\n\n while count > 0:\n\n # check if prime\n if is_prime(num):\n # if so, append to primes lst\n primes.append(num)\n # decrement by 1; used to keep track of how many primes enter lst\n count -= 1\n\n # check next number if prime, etc.\n num += 1\n\n # return final lst\n return primes", "def primes(count):\n\n prime_nums = [2]\n prime = 3\n\n for i in range(1, count):\n\n while prime not in [3, 5, 7] and (\n prime % 3 == 0 or prime % 5 == 0 or prime % 7 == 0\n ):\n prime += 2\n\n prime_nums.append(prime)\n prime += 2\n\n return prime_nums", "def prime_numbers(x: int):\n A = [True] * x\n A[0] = A[1] = False\n for i in range(2, x, 1):\n if is_simple_number(i):\n for m in range(2 * i, x, i):\n A[m] = False\n n = 0\n for k in range(x):\n print(k, \"is prime\" if A[k] else \"is not prime\")\n if A[k]:\n n += 1\n\n B = [0] * n\n n = 0\n for k in range(x):\n if A[k]:\n B[n] = k\n n += 1\n return B", "def primes(count):\n\n primes = []\n number_to_check = 2\n\n while len(primes) < count:\n # check if number is prime\n # if prime, add to list\n # if not prime, move on\n # increment number to check\n \n is_prime = True\n\n for num in range(2,number_to_check):\n if number_to_check % num == 0 and num != number_to_check:\n is_prime = False\n break\n \n if is_prime == True:\n primes.append(number_to_check)\n\n number_to_check += 1\n\n return primes", "def generate_primes():\n # David Eppstein, UC Irvine, 28 Feb 2002\n # Source : http://code.activestate.com/recipes/117119/\n yield 2\n\n D = {} # map composite integers to primes witnessing their compositeness\n for q in count(start=3, step=2):\n if q not in D:\n yield q # not marked composite, must be prime\n D[q*q] = [q] # first multiple of q not already marked\n else:\n for p in D[q]: # move each witness to its next multiple\n D.setdefault(2*p+q,[]).append(p)\n del D[q] # no longer need D[q], free memory", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def primes(count):\n\n # START SOLUTION\n\n primes = []\n num = 2\n\n while count > 0:\n\n if is_prime(num):\n primes.append(num)\n count -= 1\n\n num += 1\n\n return primes", "def loop():\n num = 1\n while(len(primes) < didget ):\n if(isprime(num) == True):\n primes.append(num)\n \n num = num + 1", "def getPrimeFactors(num):\n n = num\n primes = {}\n\n p = 2\n sqrt = math.sqrt(num)\n\n def checkAndUpdate(inc):\n nonlocal n\n nonlocal p\n nonlocal primes\n if n % p == 0:\n if str(p) in primes.keys():\n primes[str(p)] += 1\n else:\n primes[str(p)] = 1\n n /= p\n else:\n p += inc\n \n while p == 2 and p <= n:\n checkAndUpdate(1)\n while p <= n and p <= sqrt:\n checkAndUpdate(2)\n if len(primes.keys()) == 0:\n primes[str(num)] = 1\n elif n != 1:\n primes[str(n)] = 1\n return primes", "def gen_primes():\n\n n = 1\n while True:\n while not isPrime(n):\n n += 1\n\n yield n\n n += 1", "def prime():\n prime_set = {2} # Set of prime numbers that have been found\n yield 2 # First prime\n for x in itertools.count(3, 2): # Check odd numbers, starting with 3\n primes_below_sqrt = {i for i in prime_set if i <= sqrt(x)} \n for prime in primes_below_sqrt:\n if x % prime == 0:\n break # x is divisible by a prime factor, so it is not prime\n else:\n prime_set.add(x) # x has been shown to be prime\n yield x", "def primes(max_number_of_primes) -> iter:\n number_primes = count(1)\n prime = prime_generator()\n while next(number_primes) <= max_number_of_primes:\n yield next(prime)", "def primes(numOfPrimes):\n\n primes = []\n # we want to start at 2003, which is the first prime after 2000, seeing as\n # we absolutely need to fit all 2000 keys on the hash table,\n i = 2003\n\n while len(primes) < numOfPrimes:\n isPrime = True\n\n for k in range(2, i):\n if i % k == 0:\n isPrime = False\n break\n\n if isPrime:\n primes.append(i)\n i += 1\n\n return primes", "def gen_Primes(pp, K, Q, L):\r\n global MAX_PRIME\r\n global MAX_NUMBER\r\n global cnt_divisions\r\n global cnt_no_primes\r\n global cnt_compares\r\n global cnt_numbers\r\n global dict_P_runlen\r\n global dict_N_runlen\r\n # get the primes by testing the remainder\r\n # after division with formerly computed primes\r\n # stop when found square of prime greater current number\r\n cnt_divisions = 0\r\n cnt_no_primes = 0\r\n cnt_compares = 0\r\n cnt_numbers = 0\r\n X = gen_Numbers(0, L)\r\n # we already know some primes - skip them\r\n while True:\r\n x = next(X)\r\n if x >= pp: # also ignore pp - it's a square\r\n break # we are at the edge\r\n # now compare numbers revers against squares - think about roots\r\n # and test the remainder ...\r\n while True:\r\n x = next(X)\r\n if x > MAX_NUMBER:\r\n break\r\n cnt_numbers += 1\r\n prime_found = True # assume tha's a prime\r\n cnt_runlength = 0\r\n for p, q in zip(K, Q):\r\n cnt_compares += 1\r\n if q > x:\r\n # compare current x against the sqare of known primes\r\n # if gretaer than we need no more divisors from list\r\n break\r\n cnt_runlength += 1\r\n cnt_divisions += 1\r\n if x % p == 0:\r\n # primitive test for a prime\r\n prime_found = False # wrong assumptions - get next x\r\n cnt_no_primes += 1\r\n break\r\n else:\r\n prime_found = False\r\n assert False, \"PrimeDivisorError/SquareRuleError\"\r\n if prime_found:\r\n # ok - we found one - also store new values for future compares\r\n K.append(x)\r\n Q.append(x*x)\r\n dict_P_runlen[cnt_runlength] = dict_P_runlen.get(\r\n cnt_runlength, 0) + 1\r\n yield x\r\n else:\r\n dict_N_runlen[cnt_runlength] = dict_N_runlen.get(\r\n cnt_runlength, 0) + 1\r\n return", "def is_prime(self, it):\n return it > 0 \\\n and (it == 2 or it % 2 != 0) \\\n and (it == 1 or not (any(it % number == 0 for number in range(3, it // 2, 2))))", "def is_prime(self):\n pass", "def main():\n user_input = int(input(\"Enter an integer which is greater than 1, we will\"\n + \" print out all of prime numbers: \"))\n\n while user_input <= 1:\n print(\"Invalid input, please enter again: \")\n user_input = int(input(\"\"))\n else:\n prime_num = PrimeGenerator()\n prime_list = prime_num.primes_to_max(user_input)\n\n print(prime_list)", "def prime_gen():\n for i in memo_primes: yield i\n x = memo_primes[-1] + 1\n \n while True:\n if prime_with(x, memo_primes):\n yield x\n memo_primes.append(x)\n x += 1", "def prime_generator() -> Iterator[int]:\n\n num = 2\n while True:\n if is_prime(num):\n yield num\n num += 1", "def is_prime(value):\n if value < 4:\n return True\n \n lower_bound = 2\n upper_bound = value-1\n \n prime = True\n test_value = lower_bound\n \n while test_value < upper_bound:\n #print \"testing divisibility of %d for %d\" % (value, test_value)\n if value % test_value == 0:\n prime = False\n test_value += 1\n return prime", "def count_prime():\n nums = []\n for i in range(2, 10000):\n if is_prime(i):\n nums.append(i)\n return nums", "def prime_generator():\n i = 0 # prime numbers counter\n num = 0 # current number\n while True:\n num += 1\n if is_prime(num):\n i += 1\n yield i, num", "def get_primes(maxi):\n\n is_prime = [True] * (maxi + 1)\n \n is_prime[0] = False\n is_prime[1] = False\n # is_prime[2] = True and all other even numbers are not prime\n for i in range(2,maxi+1):\n if is_prime[i]: # if current is prime, set multiples to current not prime\n for j in range(2*i, maxi+1, i):\n is_prime[j] = False\n\n return is_prime", "def start_prime_test():", "def gen_primes():\n\tyield 2\n\tyield 3\n\tprime_list = [2, 3]\n\twhile 1:\n\t\tnext = prime_list[-1] + 2\n\t\ti = 0\n\t\twhile i < len(prime_list):\n\t\t\tif next%prime_list[i] == 0:\n\t\t\t\tnext+=2\n\t\t\t\ti=0\n\t\t\telse:\n\t\t\t\ti+=1\n\t\tprime_list.append(next)\n\t\tyield next", "def factorize(n:int,primesDict:dict = primesDict):\r\n\r\n \r\n if isPrime(n,primesDict):\r\n return {n:1}\r\n\r\n factors = {}\r\n\r\n lastPrime = getLastPrime(primesDict)\r\n print (lastPrime,\"Lastprimes\")\r\n if lastPrime < n:\r\n print (\"Creating DictS\")\r\n\r\n prma(n,lastPrime,primesDict)\r\n\r\n for i in primesDict:\r\n if n%i == 0 :\r\n count = 0\r\n while n % i**(count+1) == 0 :\r\n count+=1 \r\n factors[i]= count\r\n\r\n return factors", "def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False", "def divide(self, val):\n ancien_pri = 999999\n ancien_chunck = 1\n for pri in prime_array:\n if val % pri == 0 and pri >= self.MINIMUM_NUMBER_OF_CHUNK and val / pri < self.MAXIMUM_SIZE_PER_CHUNK:\n ancien_pri = int(pri)\n ancien_chunck = int(val / pri)\n print({\"size\": ancien_pri, \"chunck\": ancien_chunck})\n self.divide(ancien_chunck)\n\n return {\"size\": ancien_pri, \"chunck\": ancien_chunck}", "def get_primes_in(self, grange):\n for n in grange:\n if self.is_prime(n):\n yield n", "def prime():\n number = random.randint(1, 100)\n if len(primfacs(number)) == 1:\n return number, 'yes'\n return number, 'no'", "def find_all_primes(x=22):\n allprimes = []\n for i in range(2, x + 1):\n ##allows all the numbers between 2(smallest prime) and x be divided by x \n if is_prime(i):\n #using the function that set up just now\n allprimes.append(i)\n #\n print(\"There are %d primes between 2 and %d\" % (len(allprimes), x))\n return allprimes", "def count_prime_args(num):\n nums = []\n for i in range(2, num):\n if is_prime(i):\n nums.append(i)\n return nums", "def factor_primes(x, iter):\n factors = []\n for factor in prime:\n while x % factor == 0:\n x = x / factor\n factors.append(factor)\n if x == 1:\n break\n return factors", "def main():\n primes = getNPrime(100)\n\n write_primes(primes, 'output.csv')\n\n prime_list = read_primes('output.csv')\n\n print(prime_list)", "def findPrimes(prime_bit, N, num):\n primes = []\n total_bits = 0\n prime = pow(2, prime_bit - 1)\n while len(primes) != num:\n prime = nextprime(prime)\n if prime % (2 * N) == 1:\n primes.append(prime)\n total_bits += prime.bit_length()\n\n return primes, total_bits", "def prime_checker(num):\n\n assert num > 0\n\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n n = 3\n\n while n * n <= num:\n\n if num % n == 0:\n return False\n\n else:\n num += 2\n\n return True", "def is_prime(num1):\n num2 = 2\n while num2 < num1:\n if num1 % num2 == 0:\n return False\n num2 += 1\n return True", "def prime_numbers(max_number_eval=100):\n prime_numbers_list = list(next_prime(max_number_eval))\n print('The prime numbers from 2 to {} are:{}'.format(max_number_eval, prime_numbers_list))", "def primes(n):\n return [i for i, v in enumerate(prime_cache(n)) if v]", "def is_prime(num: int) -> bool:\n if num < 2:\n return False\n low_primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73,\n 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,\n 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251,\n 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443,\n 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557,\n 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647,\n 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757,\n 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863,\n 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983,\n 991, 997]\n if num in low_primes:\n return True\n for prime in low_primes:\n if num % prime == 0:\n return False\n return rabin_miller(num)", "def Primes():\n candidate = 1\n _primes_so_far = [2] # first prime, only even prime\n yield _primes_so_far[-1]\n while True:\n candidate += 2 # check odds only from now on\n for prev in _primes_so_far:\n if prev**2 > candidate:\n yield candidate\n _primes_so_far.append(candidate)\n break\n if not divmod(candidate, prev)[1]: # no remainder!\n break # done looping", "def is_prime(value):\n\n if value < 2: raise ValueError\n\n for i in range(2, value):\n if value % i == 0:\n return False\n\n return True", "def test_prime(n):\n if SIEVE[n]:\n return True\n else:\n return False", "def run_test_sum_until_prime_input():\n print()\n print(\"--------------------------------------------------\")\n print(\"Testing the sum_until_prime_input function:\")\n print(\"--------------------------------------------------\")\n\n sum_until_prime_input()", "def prime_facts(primes, number):\n\n facts = set()\n sqr = number ** 0.5\n for prime in primes:\n if prime > sqr:\n if number > 1:\n facts.add(number)\n break\n while number % prime == 0:\n facts.add(prime)\n number /= prime\n sqr = number ** 0.5\n\n return facts", "def primes(n):\n primfac = {}\n primfac = defaultdict(lambda: 0, primfac)\n while (n % 2) == 0:\n primfac[2] += 1 \n n //= 2\n d = 3\n while d*d <= n:\n while (n % d) == 0:\n primfac[d] += 1 # supposing you want multiple factors repeated\n n //= d\n d += 2\n if n > 1:\n primfac[n] = 1\n return primfac", "def find_prime_divisors(self, num):\n # If the number is prime, it is only divisible by itself.\n if pe_005.is_prime(num) or num < 2:\n return {num: 1}\n\n # If there were no primes searched for, then search for primes.\n if len(self._primes) <= 0:\n self.find_primes(num)\n\n results = dict()\n # Loop through the sorted primes list and stop when the prime is larger than the given number.\n for prime in self._primes[::-1]:\n if num <= 0:\n break\n\n # Count the number of divisions of the prime number into the current number.\n count, num = pe_005.count_divisions(num, prime)\n if count > 0:\n results[prime] = count\n\n return results", "def prime_generator(num):\n prime_list = [i for i in range(1,num+1,2) if prime_checker(i)]\n\n if num > 1:\n prime_list.insert(0,2)\n\n return prime_list", "def comprobar_primo(num):\n primo = True\n for i in range(2, num):\n if num%i == 0:\n primo = False\n return primo", "def prime_factors(number: int) -> dict:\n f = {}\n i = 2\n while number > 1 and number >= i:\n if number % i == 0:\n if i not in f:\n f[i] = 1\n else:\n f[i] += 1\n number //= i\n else:\n i += 1\n return f", "def getPrimes(self):\n try:\n primes_file = open(self.primes_path, 'r')\n except FileNotFoundError:\n logger.warning(f\"Unable to open moduli file '{self.primes_path}'. This will reduce the number of\"\n f\"available key exchange algorithms, and may affect compatibility.\")\n return {}\n\n try:\n primes = {}\n for line in primes_file:\n line = line.strip()\n if not line or line[0] == '#':\n continue\n tim, typ, tst, tri, size, gen, mod = line.split()\n size = int(size) + 1\n gen = int(gen)\n mod = int(mod, 16)\n if size not in primes:\n primes[size] = []\n primes[size].append((gen, mod))\n return primes\n finally:\n primes_file.close()", "def generatePrimesSieve(count):\n\tif count < 1:\n\t\treturn None\n\n\tsieve = itertools.count(3, 2)\n\tlastPrime = 2\n\tfor i in xrange(1, count):\n\t\tlastPrime = sieve.next()\n\t\tprint lastPrime\n\t\tsieve = filterPrime(sieve, lastPrime)\n\treturn lastPrime", "def get_primes(self, state, action): #make direct call to environment's get_primes\n pass", "def async_is_prime(x):\n if x < 2:\n return False\n for i in range(2, int(math.sqrt(x)) + 1):\n time.sleep(0.1)\n if x % i == 0:\n return False\n yield from async_sleep(0)\n return True", "def prime():\n from transformers import pipeline\n\n primer = pipeline(\"feature-extraction\")\n result = primer([\"hello\"])", "def get_primes(lower: int, upper: int) -> typing.Generator[int, None, None]:\r\n for num in range(lower, upper + 1):\r\n if num > 1:\r\n for i in range(2, int(math.sqrt(num)) + 1):\r\n if num % i == 0:\r\n break\r\n else:\r\n yield num", "def all_primes(nums):\n return list(filter(lambda x: all(x % i != 0 for i in range(2, x)), nums))\n # assume is_prime is defined:\n # def is_prime(n):\n # return all(n % i != 0 for i in range(2, n))\n # return list(filter(is_prime, nums))", "def primos(x):\n def esprimo(n):\n \"\"\"\n Determines whether a natural number is a prime number\n :param n: Agiven natural number\n :return: True if prime, False otherwise\n \"\"\"\n toret = False\n if x == 2:\n toret = True\n elif x % 2 == 0:\n toret = False\n else:\n for i in range(3, x, 2):\n if x % i == 0:\n break\n else:\n toret = True\n # Se ejecuta cuando no se rompe el bucle\n\n return toret\n\n toret = []\n for i in range(0, x):\n if esprimo(i):\n toret.append(i)\n\n return toret", "def is_prime(num):\n\n if num == 2:\n return True\n for i in range(2, num):\n if num % i == 0:\n return False\n return True", "def problem077():\n\n cond = lambda n: num_prime_sum_ways(n) > 5000\n ans = next(filter(cond, itertools.count(2)))\n return ans", "def isprime(n):\r\n\treturn is_prime(n)", "def is_prime(num):\n if num < 2:\n return False\n\n for i in range(2, num):\n if num % i == 0:\n return True", "def get_set_prime_numbers(number):\n prime_numbers = set()\n\n for i in range(2, number + 1):\n for j in prime_numbers:\n if i % j == 0:\n break\n else:\n prime_numbers.add(i)\n\n return prime_numbers", "def gen_primes(N):\n primes = set()\n for n in range(2, N):\n if all(n % p > 0 for p in primes):\n primes.add(n)\n yield n", "def is_prime(number):\n if number <=3:\n return True\n \n for i in range(2, number):\n if number % i == 0:\n return False\n \n return True", "def check_valves(self):\n for name in self._pipes:\n if self._data[name].check_valve:\n yield name", "def return_prime_numbers_less_tahn_100():\r\n primes = []\r\n for num in range(100):\r\n is_prime = True\r\n for i in range(2, num):\r\n if num % i == 0:\r\n is_prime = False \r\n if is_prime:\r\n primes.append(num)\r\n return primes", "def prime_generator():\r\n for i in itertools.count(start=1):\r\n for j in ((6 * i) - 1, (6 * i) + 1):\r\n if is_prime(j): yield(j)", "def primes(upper_bound):\n global cache\n lower_bound = 2\n prime_set = new_primes(upper_bound, cache, lower_bound)\n prime_set.update(cache)\n cache = prime_set\n\n return prime_set", "def test_if_it_outputs_correct_output(self):\n self.assertEquals(prime_numbers(5), [2, 3, 5])", "def is_prime(num):\n\n assert num >= 0, \"Num should be a positive integer!\"\n\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n n = 3\n while n * n <= num:\n if num % n == 0:\n return False\n n += 2\n\n return True", "def prevPrimes(n):\r\n from numbers import Integral as types #'Integral' is a class of integers/long-numbers\r\n if not isinstance(n, types): raise TypeError(\"n must be int, not \" + str(type(n)))\r\n if n < 2: raise ValueError(\"n must greater than 2\")\r\n primes_dict = {i : True for i in range(2, n + 1)} # initializes the dictionary\r\n for i in primes_dict:\r\n if primes_dict[i]: #avoids going through multiples of numbers already declared False\r\n num = 2\r\n while (num * i <= n): #sets all multiples of i (up to n) as False\r\n primes_dict[num*i] = False\r\n num += 1\r\n return [num for num in primes_dict if primes_dict[num]]", "def xprimes(step=1000):\n\n\tif step % 2:\n\t\traise ValueError(\"step is not even\")\n\n\tprimes = [2]\n\tmultiples = [4] # least multiple of prime at index i in primes not yet marked\n\tlower = 2\n\tupper = 4\n\t\n\twhile True:\n\t\t\n\t\t# non-prime numbers will live here\n\t\tnums = set()\n\t\t\n\t\tfor i, p in enumerate(primes):\n\n\t\t\t# You've marked everything worth marking (for now)\n\t\t\tif p * p > upper:\n\t\t\t\tbreak\n\t\t\t# Pick up marking where you left off\n\t\t\tm = multiples[i]\n\t\t\t\n\t\t\t# Do some marking\n\t\t\twhile m < upper: # upper is even, cannot be prime\n\t\t\t\tnums.add(m)\n\t\t\t\tm += p\n\t\t\t\n\t\t\t# Left off on this multiple (save for later)\n\t\t\tmultiples[i] = m\n\t\t\n\t\t# Collect primes between lower and upper\n\t\tfor i in xrange(lower, upper): # upper is even, cannot be prime\n\t\t\tif i not in nums:\n\t\t\t\tyield i\n\t\t\t\tprimes.append(i)\n\t\t\t\tmultiples.append(i + i) # 2 * i (i is a new prime)\n\t\t\n\t\t# Got all the primes in this interval; move it up\n\t\tlower = upper + 1\n\t\tupper += min(upper, step)" ]
[ "0.62043977", "0.60420763", "0.6040586", "0.59742284", "0.5961007", "0.59362507", "0.5920342", "0.5811969", "0.5799837", "0.57528454", "0.5740674", "0.5735719", "0.5734782", "0.5734576", "0.5731913", "0.5719384", "0.5716878", "0.5673636", "0.5661564", "0.56440914", "0.5617547", "0.5578173", "0.5564181", "0.5558922", "0.55565923", "0.5552255", "0.555117", "0.5508194", "0.5504439", "0.55032545", "0.55011624", "0.5487311", "0.5487311", "0.54709274", "0.54572856", "0.54407775", "0.54397815", "0.54036415", "0.5392185", "0.53752345", "0.53607804", "0.53607565", "0.53553426", "0.535175", "0.5343479", "0.53317267", "0.5323993", "0.5319864", "0.53195685", "0.53178823", "0.5305513", "0.5301813", "0.5297095", "0.5290977", "0.52860576", "0.5275716", "0.5270868", "0.5270836", "0.52675486", "0.5242941", "0.52350926", "0.5231719", "0.5218083", "0.5207062", "0.5186822", "0.51792186", "0.51734275", "0.5160786", "0.51425296", "0.51405346", "0.51399857", "0.5136314", "0.5135468", "0.5129973", "0.5128546", "0.51267517", "0.5119112", "0.5119068", "0.51160455", "0.5114746", "0.5111454", "0.5105649", "0.5105542", "0.5103376", "0.5102204", "0.5102189", "0.5101645", "0.50802064", "0.50772744", "0.507401", "0.50720185", "0.50702053", "0.50654787", "0.5062128", "0.50594157", "0.50522876", "0.5051625", "0.5049884", "0.50385785", "0.50350606" ]
0.78329885
0
Reject unsuported chain parts
def _select_simple_chainparts(chain_parts): for cp in chain_parts: if reject_substr_res.search(cp['chainPartName']): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_chain():", "def reject(self):\n pass", "def test_blind_sig_chain_wrong_intermediary(self): # pylint: disable=too-many-locals\n\n test_levels = 4\n msg = os.urandom(1024)\n wrong_level = 2\n\n ca = ECCBlind()\n signer_obj = ca\n fake_intermediary = ECCBlind()\n\n output = bytearray()\n\n for level in range(test_levels):\n if not level:\n output.extend(ca.pubkey())\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n child_obj = ECCBlind()\n point_r = signer_obj.signer_init()\n pubkey = child_obj.pubkey()\n\n if level == test_levels - 1:\n msg_blinded = requester_obj.create_signing_request(point_r,\n msg)\n else:\n msg_blinded = requester_obj.create_signing_request(point_r,\n pubkey)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n if level == wrong_level:\n output.extend(fake_intermediary.pubkey())\n elif level != test_levels - 1:\n output.extend(pubkey)\n output.extend(signature)\n signer_obj = child_obj\n verifychain = ECCBlindChain(ca=ca.pubkey(), chain=str(output))\n self.assertFalse(verifychain.verify(msg, 1))", "def _filter_committees_failing_weak_representation(self, profile: list[set[int]], committees: list[list[int]]) -> list[list[int]]:\n unique_approval_scores = self._compute_unique_approval_scores(profile)\n parties_deserving_representation = {party for party in self.parties if unique_approval_scores[party] >= self.n / self.k}\n possible_committees = [committee for committee in committees if parties_deserving_representation.issubset(set(committee))]\n return possible_committees", "def test_exact_nonsupercontrolled_decompose(self):\n with self.assertWarns(UserWarning, msg=\"Supposed to warn when basis non-supercontrolled\"):\n TwoQubitBasisDecomposer(UnitaryGate(Ud(np.pi / 4, 0.2, 0.1)))", "def test_blind_sig_chain_wrong_msg(self): # pylint: disable=too-many-locals\n\n test_levels = 4\n msg = os.urandom(1024)\n fake_msg = os.urandom(1024)\n\n ca = ECCBlind()\n signer_obj = ca\n\n output = bytearray()\n\n for level in range(test_levels):\n if not level:\n output.extend(ca.pubkey())\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n child_obj = ECCBlind()\n point_r = signer_obj.signer_init()\n pubkey = child_obj.pubkey()\n\n if level == test_levels - 1:\n msg_blinded = requester_obj.create_signing_request(point_r,\n msg)\n else:\n msg_blinded = requester_obj.create_signing_request(point_r,\n pubkey)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n if level != test_levels - 1:\n output.extend(pubkey)\n output.extend(signature)\n signer_obj = child_obj\n verifychain = ECCBlindChain(ca=ca.pubkey(), chain=str(output))\n self.assertFalse(verifychain.verify(fake_msg, 1))", "def valid_chain(chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n #print(last_block)\n #print(block)\n #print(\"\\n-----------\\n\")\n # Check that the hash of the block is correct\n if block['previous_hash'] != hash(last_block):\n return False\n\n # Check that the Proof of Work is correct\n #Delete the reward transaction\n transactions = block['transactions'][:-1]\n # Need to make sure that the dictionary is ordered. Otherwise we'll get a different hash\n transaction_elements = ['sender_address', 'recipient_address', 'value']\n transactions = [OrderedDict((k, transaction[k]) for k in transaction_elements) for transaction in transactions]\n\n if not valid_proof(transactions, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def squeeze_accept(partition):\n Write a function that\n - Sort districts by most Democratic heavy and most Republican heavy\n\n - Assign a base value of competitiveness for each district\n - Run chain, accept only if districts satisfy values under or order\n \"\"\"\n\n#--- CONSTRAINTS\n\n\"\"\"", "def remove_incompatible_operations(pipelines):\n\n def find_duplicates(pipelines):\n for idx in range(len(pipelines)):\n for idx_ in range(idx + 1, len(pipelines)):\n if pipelines[idx] == pipelines[idx_]:\n return idx\n return -1\n\n\n def _remove_illegal_combination(pipelines, combination):\n illegal_pipes = []\n pipelines_ = []\n for idx, pipeline in enumerate(pipelines):\n combination_ = list(set.intersection(set(pipeline.keys()), set(combination)))\n actives = [pipeline[key] != None for key in pipeline if key in combination_]\n\n if sum(actives) > 1:\n illegal_pipes.append(idx) # Store the index of bad combination\n for param in combination_: # Generate substituting legal combinations\n if pipeline[param] != None: # we need to make new pipe\n pipeline_ = pipeline.copy()\n for param_ in combination_: # Set ALL conflicting parameters to None\n pipeline_[param_] = None\n pipeline_[param] = pipeline[param] # Set current parameter back to original value\n pipelines_.append(pipeline_)\n\n new_pipelines = [i for j, i in enumerate(pipelines) if j not in illegal_pipes]\n # new_pipelines.extend(pipelines_)\n return new_pipelines, pipelines_\n\n illegal_combinations = [['BASELINE', 'MSC', 'EMSC', 'RNV', 'SNV', 'LSNV'],\n ['SMOOTH', 'SAVGOL']]\n\n for combination in illegal_combinations:\n pipelines, new_pipes = _remove_illegal_combination(pipelines, combination)\n\n pipelines.extend(new_pipes)\n pipelines_set = {json.dumps(pipeline, sort_keys=True) for pipeline in pipelines}\n pipelines = [json.loads(item) for item in pipelines_set]\n\n\n return pipelines", "def extract_mixed_chains(raw_chains):\n chain_isolation_regex = re.compile(r'^\\w+\\s+\\d+\\s+(.*)')\n\n mixed_chains = [\n re.search(chain_isolation_regex,\n raw_chain).group(1).strip() # remove whitespace\n for raw_chain in raw_chains\n ]\n return mixed_chains", "def test_unrequired_chain_delete(self):\n self.txn.store_delete(\"felix-c\")\n self.assertEqual(self.txn.affected_chains, set([\"felix-c\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([]))\n self.assertEqual(self.txn.chains_to_delete, set([\"felix-c\"]))\n self.assertEqual(self.txn.referenced_chains,\n set([\"felix-b\", \"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [],\n \"felix-b\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-b\", \"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-b\": set([\"felix-a\"]),\n \"felix-stub\": set([\"felix-a\"])})", "def valid_chain(chain):\n\n for i in range(len(chain) - 1):\n parent_edge = chain[i]\n child_edge = chain[i + 1]\n # verify that the child of the parent edge (second node) matches the parent of the child edge (first node)\n if not parent_edge[1] == child_edge[0]:\n # if this isn't\n return False\n return True", "def resolve_conflicts(self):\n neighbours = self.nodes\n new_chain = None\n # Look only for chains longer than this\n max_length = len(self.chain)\n # Get and verify the chains from all the nodes in the network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n # Check if chain is longer and valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n # Replace this chain if a longer valid chain is discovered\n if new_chain:\n self.chain = new_chain\n return True\n return False", "def validate_blockchain(chain):\n assert isinstance(chain, list)\n\n for hook in chain[::-1]:\n pass", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(f'{last_block}')\n print(f'{block}')\n print(\"\\n----------------\\n\")\n # verify hash integrity\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # verify proof integrity\n if not self.valid_proof(last_block['proof'], block['proof']):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def route_rejected(self, prefix, next_hop, as_path):", "def test_fail_missing_signature_fragment_underflow(self):\n # Adjust bundle balance, since we will also remove the change\n # transaction.\n self.bundle[0].value += self.bundle[-1].value\n\n # Remove the last input's second signature fragment, and the change\n # transaction.\n del self.bundle.transactions[-2:]\n for txn in self.bundle:\n txn.last_index -= 2\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Transaction 4 has invalid signature (using 2 fragments).',\n ],\n )", "def resolve_conflicts(self):\n neighbours = self.nodes\n new_chain = None\n\n # We are only looking for chains longer that ours\n max_length = len(self.chain)\n\n # Checking for the length of each chain in our network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n if length > max_length and self.validate_chain(chain):\n new_chain = chain\n max_length = length\n\n # Replace our chain with a new, longer, valid chain in our network (if present)\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "def chain(self):\n return ValueError(\"chain function not set.\")", "def chain_rangeValid(start, stop):\r\n for i in range(start, stop):\r\n chain = chain_153(i)\r\n if len(chain) > 1 or chain[0] == 153:\r\n for j in chain_153(i):\r\n print(j)", "def resolve_conflicts(self):\n\n neighbours = self.nodes\n new_chain = None\n\n # We're only looking for chains longer than ours\n max_length = len(self.chain)\n\n # Grab and verify the chains from all the nodes in our network\n for node in neighbours:\n response = requests.get(f'http://{node}:5000/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # Check if the length is longer and the chain is valid\n if length > max_length:\n max_length = length\n new_chain = chain\n\n # Replace our chain if we discovered a new, valid chain longer than ours\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "def test_blind_sig_chain_wrong_ca(self): # pylint: disable=too-many-locals\n\n test_levels = 4\n msg = os.urandom(1024)\n\n ca = ECCBlind()\n fake_ca = ECCBlind()\n signer_obj = fake_ca\n\n output = bytearray()\n\n for level in range(test_levels):\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n child_obj = ECCBlind()\n if not level:\n # unlisted CA, but a syntactically valid pubkey\n output.extend(fake_ca.pubkey())\n point_r = signer_obj.signer_init()\n pubkey = child_obj.pubkey()\n\n if level == test_levels - 1:\n msg_blinded = requester_obj.create_signing_request(point_r,\n msg)\n else:\n msg_blinded = requester_obj.create_signing_request(point_r,\n pubkey)\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n signature = requester_obj.unblind(signature_blinded)\n if level != test_levels - 1:\n output.extend(pubkey)\n output.extend(signature)\n signer_obj = child_obj\n verifychain = ECCBlindChain(ca=ca.pubkey(), chain=str(output))\n self.assertFalse(verifychain.verify(msg, 1))", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n # print(f'{last_block}')\n # print(f'{block}')\n # print(\"\\n-----------\\n\")\n # Check that the hash of the block is correct\n last_block_hash = self.hash(last_block)\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # Check that the Proof of Work is correct\n if not self.valid_proof(last_block['proof'], block['proof'], last_block_hash):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def valid_chain(self, chain):\n\n last_block = chain[0]\n current_index = 1\n \n while current_index < len(chain):\n block = chain[current_index]\n # Check correctness of last block's hash\n if block['previous_hash'] != self.hash(last_block): \n return False\n # Check correctness of proof-of-work\n if not self.valid_proof(last_block['proof'], block['proof'], block['previous_hash']):\n return False\n last_block = block \n current_index += 1\n\n return True", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(last_block)\n print(block)\n print(\"\\n--------\\n\")\n \n #check that the hash of the previous block is correct\n\n if block[\"previous_hash\"] != self.hash(last_block):\n print(\"Previous hash does not match\")\n return False\n\n if not self.valid_proof(block):\n print(\"Block proof of work is invalid\")\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def resolve_conflicts(self):\n neighbors = self.nodes\n print(neighbors)\n new_chain = None\n\n # We only care about chains longer than our own\n max_length = len(self.chain)\n\n # Get and verify all neighbors chains\n for node in neighbors:\n try:\n url = 'http://{}/chain'.format(node)\n response = requests.get(url)\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # Check if longer and chain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n except:\n return False\n # Replace our chain if necessary\n if new_chain:\n self.chain = new_chain\n self._write_chain()\n return True\n\n return False", "def resolve_conflicts(self):\n\n neighbours = self.nodes\n new_chain = None\n\n # We're only looking for chains longer than ours\n max_length = len(self.chain)\n\n # Grab and verify the chains from all the nodes in our network\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # Check if the length is longer and the chain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n # Replace our chain if we discovered a new, valid chain longer than ours\n if new_chain:\n self.rewrite_chain(new_chain)\n return True\n\n return False", "def test_sort_chain_two_content_3():\n chain = N.Node(2, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result.data <= result.next.data, \"sort_chain returned chain out of order given input chain size 2 with dupicates\"", "def skip_sub_components(self, reason):\n pass", "def test_sort_chain_multiple_content_decreasing():\n n = 17\n data = range(n)\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n prev = None\n seen = [False]*n\n for i in range(n):\n assert walker.data in data, \"sort_chain created extraneous data {} given chain with values decreasing\".format(walker.data)\n seen[walker.data] = True\n if prev is not None:\n assert prev.data <= walker.data, \"sort_chain placed {} before {} given chain with values decreasing\".format(prev.data, walker.data)\n prev = walker\n walker = walker.next\n\n for i,b in enumerate(seen):\n assert b, \"sort_chain omitted data value {} from returned chain given chain with values decreasing\".format(i)", "def test_specific_peer_prefixes_rejected(self):\n rejected_prefixes = self.pybird.get_peer_prefixes_rejected(\"PS1\")\n self.assertEquals(len(rejected_prefixes), 1)\n self.assertEquals(rejected_prefixes[0]['as_path'], '8954 20144')", "def test_block_bad_consensus(self):\n pass", "def valid_chain(self, chain):\n\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(f'{last_block}')\n print(f'{block}')\n print(\"\\n-----------\\n\")\n # Check that the hash of the block is correct\n last_block_hash = self.hash(last_block)\n if block['previous_hash'] != last_block_hash:\n return False\n\n # Check that the Proof of Work is correct\n if not self.valid_proof(last_block['proof'], block['proof'], last_block_hash):\n return False\n\n last_block = block\n current_index += 1\n\n return True", "def pruneblockchain(self) -> None:\n return self.rpc_call(\"pruneblockchain\")", "def test_reject_proposal_demand(self):\n pass", "def chainIsValid(self):\n for i in range(1, len(self.blocks)):\n prev_block = self.blocks[i-1]\n cur_block = self.blocks[i]\n if cur_block.header['prevBlockH'] != getHashBlock(prev_block):\n return False\n return True", "def test_rewrite_existing_chain_remove_normal_dependency(self):\n self.txn.store_rewrite_chain(\"felix-a\", [\"foo\"], set([\"felix-stub\"]))\n self.assertEqual(self.txn.affected_chains, set([\"felix-a\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([]))\n self.assertEqual(self.txn.chains_to_delete, set([]))\n self.assertEqual(self.txn.referenced_chains, set([\"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [\"foo\"],\n \"felix-b\": [],\n \"felix-c\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-stub\": set([\"felix-a\"])})", "def verify_chain():\n for (index, block) in enumerate(blockchain):\n if index == 0:\n continue\n if block['previous_hash'] != hash_block(blockchain[index - 1]):\n return False\n # Here [:-1] excludes the reward from being a part of validation\n if not valid_proof(block['transactions'][:-1], block['previous_hash'], block['proof']):\n print('Proof of work is invalid.')\n return False\n return True", "def compose(\n # Left side positive filters\n chainLeftIn,resiNumLeftIn,resiNameLeftIn,atomSerialLeftIn,\n atomNameLeftIn,\n # Left side negative filters\n chainLeftOut,resiNumLeftOut,resiNameLeftOut, atomSerialLeftOut,\n atomNameLeftOut,\n # Right side positive filters\n chainRightIn,resiNumRightIn,resiNameRightIn,atomSerialRightIn,\n atomNameRightIn,\n # Right side negative filters\n chainRightOut,resiNumRightOut,resiNameRightOut,atomSerialRightOut,\n atomNameRightOut,\n # Contact Area\n contactAreaMin,contactAreaMax,\n # Minimal distance\n minimalDistanceMin,minimalDistanceMax,\n # Sequence separation\n seqSeparationMin,seqSeparationMax\n ):\n\n output=''\n\n match_first=''\n match_first=append_to_local_output(match_first, 'c', Generic(chainLeftIn))\n match_first=append_to_local_output(match_first, 'r', Generic(resiNumLeftIn))\n match_first=append_to_local_output(match_first, 'a', Generic(atomSerialLeftIn))\n match_first=append_to_local_output(match_first, 'R', Generic(resiNameLeftIn))\n match_first=append_to_local_output(match_first, 'A', Generic(atomNameLeftIn))\n output=append_to_global_output(output, '--match-first', match_first)\n\n match_first_not=''\n match_first_not=append_to_local_output(match_first_not, 'c', Generic(chainLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'r', Generic(resiNumLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'a', Generic(atomSerialLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'R', Generic(resiNameLeftOut))\n match_first_not=append_to_local_output(match_first_not, 'A', Generic(atomNameLeftOut))\n output=append_to_global_output(output, '--match-first-not', match_first_not)\n\n match_second=''\n match_second=append_to_local_output(match_second, 'c', Generic(chainRightIn))\n match_second=append_to_local_output(match_second, 'r', Generic(resiNumRightIn))\n match_second=append_to_local_output(match_second, 'a', Generic(atomSerialRightIn))\n match_second=append_to_local_output(match_second, 'R', Generic(resiNameRightIn))\n match_second=append_to_local_output(match_second, 'A', Generic(atomNameRightIn))\n output=append_to_global_output(output, '--match-second', match_second)\n\n match_second_not=''\n match_second_not=append_to_local_output(match_second_not, 'c', Generic(chainRightOut))\n match_second_not=append_to_local_output(match_second_not, 'r', Generic(resiNumRightOut))\n match_second_not=append_to_local_output(match_second_not, 'a', Generic(atomSerialRightOut))\n match_second_not=append_to_local_output(match_second_not, 'R', Generic(resiNameRightOut))\n match_second_not=append_to_local_output(match_second_not, 'A', Generic(atomNameRightOut))\n output=append_to_global_output(output, '--match-second-not', match_second_not)\n\n output=append_to_global_output(output, '--match-min-area', Float(contactAreaMin))\n output=append_to_global_output(output, '--match-max-area', Float(contactAreaMax))\n\n output=append_to_global_output(output, '--match-min-dist', Float(minimalDistanceMin))\n output=append_to_global_output(output, '--match-max-dist', Float(minimalDistanceMax))\n\n output=append_to_global_output(output, '--match-min-seq-sep', Int(seqSeparationMin))\n output=append_to_global_output(output, '--match-max-seq-sep', Int(seqSeparationMax))\n\n return output", "def test_mediate_transfer_fails_if_intermediate_trustline_frozen(\n currency_network_contract_with_frozen_trustline, accounts\n):\n network = currency_network_contract_with_frozen_trustline\n\n path = [accounts[4], accounts[0], accounts[1], accounts[2]]\n\n with pytest.raises(eth_tester.exceptions.TransactionFailed):\n network.functions.transfer(10, 10, path, b\"\").transact({\"from\": accounts[4]})", "def resolve_conflict(self):\n neighbours = self.nodes\n new_chain = None\n #We're only looking for chains Longer than ours\n max_length = len(self.chain)\n #Grab and verify the chains from all the other nodes in our netwrok\n for node in neighbours:\n response = requests.get(f'http://{node}/chain')\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n #check if the lentgh is longer and the cain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n #replace our chain if we're discovered a new valid chain, Longer than ours\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "def filter_invalid_combos(m: 'Mods') -> 'Mods':\n if m & (Mods.DOUBLETIME | Mods.NIGHTCORE) and m & Mods.HALFTIME:\n m &= ~Mods.HALFTIME\n if m & Mods.EASY and m & Mods.HARDROCK:\n m &= ~Mods.HARDROCK\n if m & Mods.RELAX and m & Mods.AUTOPILOT:\n m &= ~Mods.AUTOPILOT\n if m & Mods.PERFECT and m & Mods.SUDDENDEATH:\n m &= ~Mods.SUDDENDEATH\n\n return m", "def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n\n # Check that the hash of block is correct\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # Check the Proof of Work\n if not self.valid_proof(last_block['proof'], block['proof']):\n return False\n \n last_block = block\n current_index += 1\n return True", "def strip_loan(chain):\n while chain[-1]['action'] == 'LOAN':\n chain.pop()\n\n return chain", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def badMuons(self, allmuons, allvertices):\n\n muons = list(m for m in allmuons) # make it a python list\n goodMuon = []\n\n if len(allvertices) < 1: raise RuntimeError\n PV = allvertices[0].position()\n \n out = [] \n for mu in muons:\n if (not(mu.isPFMuon()) or mu.innerTrack().isNull()):\n goodMuon.append(-1); # bad but we don't care\n continue;\n if (self.preselection(mu)):\n dxypv = abs(mu.innerTrack().dxy(PV));\n dzpv = abs(mu.innerTrack().dz(PV));\n if (self.tighterId(mu)):\n ipLoose = ((dxypv < 0.5 and dzpv < 2.0) or mu.innerTrack().hitPattern().pixelLayersWithMeasurement() >= 2);\n goodMuon.append(ipLoose or (not(self.selectClones_) and self.tightGlobal(mu)));\n elif (self.safeId(mu)):\n ipTight = (dxypv < 0.2 and dzpv < 0.5);\n goodMuon.append(ipTight);\n else:\n goodMuon.append(0);\n else:\n goodMuon.append(3); # maybe good, maybe bad, but we don't care\n\n n = len(muons)\n for i in range(n):\n if (muons[i].pt() < self.ptCut_ or goodMuon[i] != 0): continue;\n bad = True;\n if (self.selectClones_):\n bad = False; # unless proven otherwise\n n1 = muons[i].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n for j in range(n):\n if (j == i or goodMuon[j] <= 0 or not(self.partnerId(muons[j]))): continue\n n2 = muons[j].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n if (deltaR(muons[i],muons[j]) < 0.4 or (n1 > 0 and n2 > 0 and ROOT.muon.sharedSegments(muons[i],muons[j]) >= 0.5*min(n1,n2))):\n bad = True;\n break;\n if (bad):\n out.append(muons[i]);\n return out", "def filter_unknown_bases(self):\n self.failed[\"unknowns\"] = self.stats.index[\n self.stats[\"unknowns\"] > self.tolerance[\"unknowns\"]\n ]\n self.passed = self.stats.drop(self.failed[\"unknowns\"])", "def is_chain_valid(self, chain):\r\n previous_block = chain[0]\r\n block_index = 1\r\n while block_index < len(chain):\r\n block = chain[block_index]\r\n if block['previous_hash'] != self.hash(previous_block):\r\n return False\r\n previous_proof = previous_block['proof']\r\n proof = block['proof']\r\n hash_operation = self.hash(block)\r\n if hash_operation[:4] != '0000':\r\n return False\r\n previous_block = block\r\n block_index += 1\r\n return True", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 468 or num_heads > 531", "def validate_chain(self):\n chain_length = len(self.chain)\n isValid = None\n\n if(chain_length == 1):\n return \"Add blocks and then validate chain.\"\n\n for x in range(0, chain_length, 1):\n if(x < chain_length-1):\n # Hash the current block\n hash = self.hash(self.chain[x])\n next_block_prev_hash = self.chain[x+1]['prev_hash']\n\n if(hash == \"0\" and x == 0):\n isValid = True\n\n if(hash != next_block_prev_hash):\n isValid = False\n return isValid\n else:\n isValid = True\n\n return isValid", "def test_ensure_passage_is_not_removed(self):\n simple = self.TEI.getPassage(MyCapytain.common.reference.Reference(\"1.pr.1-1.2.5\"))\n orig_refs = self.TEI.getValidReff(level=3)\n self.assertIn(\"1.pr.1\", orig_refs)\n self.assertIn(\"1.1.1\", orig_refs)\n self.assertIn(\"1.2.4\", orig_refs)\n self.assertIn(\"1.2.5\", orig_refs)\n\n simple = self.TEI.getPassage(MyCapytain.common.reference.Reference(\"1.pr-1.2\"))\n orig_refs = self.TEI.getValidReff(level=3)\n self.assertIn(\"1.pr.1\", orig_refs)\n self.assertIn(\"1.1.1\", orig_refs)\n self.assertIn(\"1.2.4\", orig_refs)\n self.assertIn(\"1.2.5\", orig_refs)", "def check_auto_reject(self):\r\n for pr in self:\r\n if not pr.line_ids.filtered(lambda l: l.cancelled is False):\r\n pr.write({'state': 'rejected'})", "def repair(self):\n self.dot = self.dot.replace('()', '..').replace('(.)', '...').replace('(..)', '....').replace('(...)', '.....')\n self.matrix = pair_matrix(self)\n length = len(self.seq)\n for x in range(length):\n for y in range(x, length):\n if self.matrix[x, y] == 1:\n if not is_pair_allowed(self.seq[x], self.seq[y]):\n self.dot = self.dot[:x] + '.' + self.dot[x + 1:y] + '.' + self.dot[y + 1:]\n return self", "def diagnose_chain(chain):\n if chain[0] == 'all':\n dir = data.meta_dir_base()\n if os.path.exists(dir):\n for chain_id in os.listdir(dir):\n if utils.valid_chain_id(chain_id):\n diagnose_server(chain_id)\n else:\n consoler.info(' No published chain exist, do nothing.')\n else:\n for i in range(len(chain)):\n chain_get = chain[i].split(':')\n if len(chain_get) == 1:\n if utils.valid_chain_id(chain_get[0]):\n diagnose_server(chain_get[0])\n else:\n consoler.info(\n ' skip, invalid chain_id, chain_id is %s', chain_get[0])\n elif len(chain_get) == 2:\n if utils.valid_chain_id(chain_get[0]):\n if utils.valid_ip(chain_get[1]):\n ansible.diagnose_module(\n chain_get[1], ansible.get_dir() + '/' + chain_get[0])\n else:\n consoler.info(\n ' skip, invalid host, chain_id is %s, host is %s', chain_get[0], chain_get[1])\n else:\n consoler.info(\n ' skip, invalid chain_id, chain_id is %s, host is %s', chain_get[0], chain_get[1])\n else:\n consoler.info(\n ' skip, invalid format, not chain_id:host, input %s', chain_get)", "def checkGuide(seq, plen, pam, rpam, is_upstream_pam):\n if is_upstream_pam:\n if pam.match(seq[:plen]):\n yield seq, \"+\"\n if rpam.match(seq[-plen:]):\n yield reverseComplement(seq), \"-\"\n else:\n if pam.match(seq[-plen:]):\n yield seq, \"+\"\n if rpam.match(seq[:plen]):\n yield reverseComplement(seq), \"-\"\n #yield \"\", \"\"", "def badExitPrevMolecule(self):\n if self.molecules > 0:\n # collect list of any atoms where num departed is not expected num per molecule\n departErrors = [(atom.name, count) for atom, count in self.departed.items() if self.departed[atom] != atom.value]\n if len(departErrors) > 0:\n print(\"too many or too few atoms exited between previous and this molecule creations.\")\n print( \"Exit counts:\", departErrors)\n return False\n return True", "def test_sort_chain_two_structure_3():\n chain = N.Node(2, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result is not None, \"sort_chain returned empty chain given input chain size 2 with dupicates\"\n assert result.next is not None, \"sort_chain returned singleton chain given input chain size 2 with dupicates\"\n assert result.next.next is None, \"sort_chain returned extended chain given input chain size 2 with dupicates\"", "def check_seps(seps):\n if seps:\n seps = seps.keys()\n seps.sort()\n NodeWarning.emit(\n \"Ignoring separator node(s) without accompanying content \"\n \"node: %s\" % (', '.join(map(repr, seps)),), stacklevel=5\n )", "def without(self, *args):\n return self.reject(lambda x: x in args)", "def pre_flight(self):\n\n def check_already_chain():\n \"\"\"In case a user runs the command twice on the same input\"\"\"\n all_are_already_rigids = True\n for transform, _ in self._pairs:\n if not transform.shape(type=\"rdRigid\"):\n all_are_already_rigids = False\n break\n\n assert not all_are_already_rigids, (\n \"Every transform is already dynamic\"\n )\n\n def check_hierarchy():\n \"\"\"Ensure incoming chain reflects a physical hierarchy\n\n For example, this is what a valid selection looks like,\n including a non-selected offset group that is still part\n of the hierarchy.\n\n o- clavicle\n |-o upperArm\n |-o lowerArm\n |-o ( offsetGroup )\n |-o hand\n\n \"\"\"\n\n pairs = self._pairs[:]\n while pairs:\n transform, _ = pairs.pop()\n expected_parent, _ = pairs[-1] if pairs else (None, None)\n\n if not expected_parent:\n break\n\n # Walk up the hierarchy until you find what\n # is supposed to be the parent.\n #\n # .\n # |--o a /|\\\n # |--o B |\n # |--o c |\n # |--o d |\n # |\n #\n valid = False\n for parent in transform.lineage():\n if parent == expected_parent:\n valid = True\n break\n\n problem = (\n \"%s was not a parent of %s\" % (\n expected_parent, transform)\n )\n\n # Ok, so the prior link isn't a parent, but we\n # also must make it isn't a child of the subsequent\n # link, as that would mean a cycle\n #\n # |\n # |--o a |\n # |--o B |\n # |--o c |\n # |--o d |\n # \\ /\n # `\n if not valid:\n is_child = False\n for child in transform.descendents():\n if child == expected_parent:\n is_child = True\n break\n\n # It's valid if the Maya parent isn't a Ragdoll child\n valid = not is_child\n\n # This flips the problem on its head\n problem = (\n \"%s cannot be a child of %s, that's a cycle\" % (\n expected_parent, transform)\n )\n\n assert valid, problem\n\n def pre_cache():\n \"\"\"Pre-cache attributes to avoid needless evaluation\"\"\"\n for transform, _ in self._pairs:\n world_matrix = transform[\"worldMatrix\"][0].asMatrix()\n parent_matrix = transform[\"parentMatrix\"][0].asMatrix()\n matrix = transform[\"matrix\"].asMatrix()\n translate = transform[\"translate\"].as_vector()\n rotate = transform[\"rotate\"].as_euler()\n\n if \"jointOrient\" in transform:\n joint_orient = transform[\"jointOrient\"].as_quaternion()\n else:\n # Only joints have these\n joint_orient = cmdx.Quaternion()\n\n self._cache[(transform, \"worldMatrix\")] = world_matrix\n self._cache[(transform, \"parentMatrix\")] = parent_matrix\n self._cache[(transform, \"matrix\")] = matrix\n self._cache[(transform, \"translate\")] = translate\n self._cache[(transform, \"rotate\")] = rotate\n self._cache[(transform, \"jointOrient\")] = joint_orient\n\n def remember_existing_inputs():\n # Remember existing animation\n for transform, _ in self._pairs:\n transform.data[\"priorConnections\"] = {}\n for channel in (\"translate\", \"rotate\"):\n for axis in \"XYZ\":\n src = transform[channel + axis]\n src = src.connection(plug=True, destination=False)\n\n if src is not None:\n anim = transform.data[\"priorConnections\"]\n dst = \"in%s%s1\" % (channel.title(), axis)\n anim[src] = dst\n\n check_already_chain()\n check_hierarchy()\n remember_existing_inputs()\n pre_cache()\n\n return True", "def resolve_conflict(self):\n neighbours = self.nodes\n new_chain = None\n\n max_length = len(self.chain)\n\n for node in neighbours:\n response = requests.get('http://{}/chain'.format(node))\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n if new_chain:\n self.chain = new_chain\n return True\n return False", "def solve_part1(start):\n all_ilists = load_inputs()\n\n allergen_map = get_allergen_map(all_ilists)\n all_ingredients = get_all_ingredients(all_ilists)\n\n all_potential_bad_ingredients = set()\n\n for l in allergen_map.values():\n all_potential_bad_ingredients.update(l)\n\n safe_ingredients = [a for a in all_ingredients if a not in all_potential_bad_ingredients]\n\n safe_ingred_count = 0\n for ilist in all_ilists:\n this_ingredients = ilist.get_ingredients()\n this_safe_ingredients = [a for a in this_ingredients if a in safe_ingredients]\n safe_ingred_count += len(this_safe_ingredients)\n\n return safe_ingred_count", "def _filter_committees_failing_pareto_optimality(self, profile: list[set[int]], committees: list[list[int]]) -> list[list[int]]:\n pareto_dominated_parties = set()\n for party1 in self.parties:\n for party2 in self.parties:\n if self._pareto_dominance(party1, party2, profile):\n pareto_dominated_parties.add(party2)\n possible_committees = [committee for committee in committees if set(committee).isdisjoint(pareto_dominated_parties)]\n return possible_committees", "def _filter_committees_failing_weak_representation_clever(self, profile: list[set[int]], committees: list[list[int]],\n value_of_committee_for_ballot: dict) -> list[list[int]]:\n unique_approval_scores = self._compute_unique_approval_scores(profile)\n parties_deserving_representation = {party for party in self.parties if\n unique_approval_scores[party] >= self.n / self.k}\n\n required_number_of_approved_members = {}\n for ballot in profile:\n ballot_as_tuple = tuple(ballot)\n if len(ballot) == 1 and ballot_as_tuple[0] in parties_deserving_representation:\n required_number_of_approved_members[ballot_as_tuple] = 1\n else:\n required_number_of_approved_members[ballot_as_tuple] = 0\n\n reduced_profile = [ballot for ballot in profile if not ballot.issubset(parties_deserving_representation)]\n reduced_profile.reverse()\n 'By our construction, profiles are always ordered such that A_i subset A_j if and only if i <= j.'\n 'By reversing the profile, all subsets of a voters ballots are therefore right of it.'\n 'This is required for _profile_contains_subset_list as this profile looks from the start index to the right'\n 'side for find finding subset lists.'\n for index in range(0, len(reduced_profile)):\n if self._profile_contains_subset_list(reduced_profile, reduced_profile[index], index + 1, math.ceil(self.n / self.k)-1):\n required_number_of_approved_members[tuple(reduced_profile[index])] = \\\n len(parties_deserving_representation.intersection(reduced_profile[index])) + 1\n return [committee for committee in committees\n if all([value_of_committee_for_ballot[tuple([entry for item in [committee, ballot] for entry in item])]\n >= required_number_of_approved_members[tuple(ballot)] for ballot in profile])]", "def test_bad_curie_in_list():\n with pytest.raises(ValidationError):\n pub = Publication(id='PMID:123', mesh_terms=['foo:bar', 'bad_curie'])", "def remove_ill_matched_pair(phi1,S1,TU1,TV1): #---- remove ill matched pair\r\n #--- mark inlier= 1; outlier= 0 ---\r\n mask, phi0= pano_tools.remove_outlier(phi1);\r\n mask, S0 = pano_tools.remove_outlier(S1 ,Nstd=2, mask= mask);\r\n mask, TU0 = pano_tools.remove_outlier(TU1 ,Nstd=2, mask= mask);\r\n mask, TV0 = pano_tools.remove_outlier(TV1 ,Nstd=2, mask= mask); \r\n mask, phi0= pano_tools.remove_outlier(phi1,Nstd=3, mask= mask);\r\n mask, S0 = pano_tools.remove_outlier(S1 ,Nstd=3, mask= mask);\r\n mask, TU0 = pano_tools.remove_outlier(TU1 ,Nstd=3, mask= mask);\r\n #--- select reliable data pair ---\r\n # mask is M*M matrix: 1= reliable pair combination;\r\n M = phi1.shape[0];\r\n sumx= np.sum(mask,axis=0); # large number= reliable\r\n seq = []; # chosen reliable data\r\n for k in range(0, int(M*0.7)):\r\n maxx = np.argmax(sumx);\r\n seq.append(maxx);\r\n sumx[maxx]= 0; \r\n return seq, phi0, S0, TU0, TV0", "def test_stratis_bad_subcommand(self):\n for command_line in [\n [\"notasub\"],\n [\"daemon\", \"notasub\"],\n [\"pool\", \"notasub\"],\n [\"blockdev\", \"notasub\"],\n [\"filesystem\", \"notasub\"],\n ]:\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def test_sort_chain_multiple_structure_decreasing():\n n = 14\n data = range(n)\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n for i in range(n):\n assert walker is not None, \"sort_chain returned chain of length {} given chain with values decreasing\".format(i)\n walker = walker.next\n\n assert walker is None, \"sort_chain returned chain longer than length {} given chain with values decreasing\".format(n)", "def clean_edges(self):", "def unprocessed(self):\n for v in self.iter():\n if v.intersect and not v.checked:\n yield True", "def test_sort_chain_two_content_2():\n chain = N.Node(3, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result.data <= result.next.data, \"sort_chain returned chain out of order given input chain size 2 in reverse order\"", "def purgeTrp(atoms):\n for a in atoms:\n found = False\n if getAtype(a) == \"N\":\n for c in atoms:\n if not c == a and dist(c,a) < COVALENT_BOND_DIST:\n found = True\n if not found:\n atoms.remove(a)\n return atoms\n if DEBUG: print \"Warning! Residue %s appears to be incomplete\" % (atoms[0][17:20]+atoms[0][22:26]+atoms[0][21])\n return False", "def normalize_chain(cls, chain):\n\n if isinstance(chain, cls):\n chain = (chain,)\n return tuple(x for x in chain if x is not None)", "def verify_chain():\n for (index,block) in enumerate(blockchain):\n if index ==0:\n continue\n if block['previous_hash'] != hash_block(blockchain[index-1]):\n return False\n if not valid_proof(block['transactions'][:-1],block['previous_hash'],block['proof']):\n print('Proof of Work is Invalid')\n return False\n return True", "def violated(self) -> bool:\n ...", "def fork_choice(chainA, chainB):\n\n if not chainA:\n if validate_all_transactions_and_blocks(chainB):\n print(\"There's no ChainA, and ChainB is valid!\")\n return chainB\n elif chainB.get_size() > chainA.get_size():\n if validate_all_transactions_and_blocks(chainB):\n print(\"ChainB is longer and valid!\")\n return chainB\n return chainA", "def test_specific_peer_prefixes_accepted_nonexistant_peer(self):\n accepted_prefixes = self.pybird.get_peer_prefixes_accepted(\"PS99\")\n self.assertEquals(len(accepted_prefixes), 0)", "def test_reject_negative(self):\n self.spawn(\"./binary\").stdin(\"-1\").reject()", "def test_transfer_blocked(chain, token, shareholder1, boogieman):\n\n set_state(chain, token, canTransferFlag=False)\n with pytest.raises(ValueError):\n token.transact({\"from\": shareholder1}).transfer(boogieman, 4000)", "def test_disallow_feature() -> None:\n # Given 'a', this FST will reject\n # Given 'b', this FST will reject\n # Given 'c', this FST will print both 'a', and 'b'\n fst = make_fst(\n # 1 -@D.x@-> 5; 5 -0:a-> (2)\n \"1 150 5 0\",\n \"5 0 97 2 0\",\n # 1 -@D.x@-> 6; 6 -0:b-> (2)\n \"1 150 6 0\",\n \"6 0 98 2 0\",\n )\n\n assert set(fst.generate(\"a\")) == set()\n assert set(fst.generate(\"b\")) == set()\n assert set(fst.generate(\"c\")) == {\"a\", \"b\"}", "def test_heuristic_abort(self):\n graph = {}\n for u in self.complete:\n graph[u] = set()\n for v in self.complete[u]:\n if u != v: # ignore self-loop\n graph[u].add(v)\n next_node = min_fill_in_heuristic(graph)\n if next_node is None:\n pass\n else:\n assert False", "def test_no_combine_with_fee(self):\n unspents_single = [Unspent(5000, 0, '', '', 0)]\n unspents_original = [Unspent(5000, 0, '', '', 0), Unspent(5000, 0, '', '', 0)]\n outputs_original = [(RETURN_ADDRESS, 1000, 'satoshi')]\n\n unspents, outputs = sanitize_tx_data(\n unspents_original,\n outputs_original,\n fee=1,\n leftover=RETURN_ADDRESS,\n combine=False,\n message=None,\n version='test',\n )\n\n unspents_single, outputs_single = sanitize_tx_data(\n unspents_single,\n outputs_original,\n fee=1,\n leftover=RETURN_ADDRESS,\n combine=False,\n message=None,\n version='test',\n )\n\n assert unspents == [Unspent(5000, 0, '', '', 0)]\n assert unspents_single == [Unspent(5000, 0, '', '', 0)]\n assert len(outputs) == 2\n assert len(outputs_single) == 2\n assert outputs[1][0] == RETURN_ADDRESS\n assert outputs_single[1][0] == RETURN_ADDRESS\n assert outputs[1][1] == outputs_single[1][1]", "def empty_chain(self, chain, wrap=True):\n chained_rules = [rule for rule in self.rules\n if rule.chain == chain and rule.wrap == wrap]\n if chained_rules:\n self.dirty = True\n for rule in chained_rules:\n self.rules.remove(rule)", "def test_sort_chain_multiple_content_increasing():\n n = 11\n data = range(n)\n chain = None\n for item in data:\n chain = N.Node(n-item-1, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n prev = None\n seen = [False]*n\n for i in range(n):\n assert walker.data in data, \"sort_chain created extraneous data {} given chain with values increasing\".format(walker.data)\n seen[walker.data] = True\n if prev is not None:\n assert prev.data <= walker.data, \"sort_chain placed {} before {} given chain with values increasing\".format(prev.data, walker.data)\n prev = walker\n walker = walker.next\n\n for i,b in enumerate(seen):\n assert b, \"sort_chain omitted data value {} from returned chain given chain with values increasing\".format(i)", "def test_sort_chain_two_content():\n chain = N.Node(1, N.Node(2))\n result = A8.sort_chain(chain)\n\n assert result.data <= result.next.data, \"sort_chain returned chain out of order given input chain size 2 already in order\"", "def reject_fairness(experiment: List[bool]) -> bool:\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def test_negate_tips_to_keep(self):\r\n t = DndParser(\"((S5:0.00014,S7:0.00015)0.752:0.45762,(S3:0.00014,\"\r\n \"seq6:0.00014)0.180:0.00015,(Seq1:0.00014,s2:0.00014)0.528:1.0466);\")\r\n\r\n tips_to_keep = [\"S5\", \"Seq1\", \"s2\"]\r\n expected = [\"S7\", \"S3\", \"seq6\"]\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)\r\n\r\n tips_to_keep = [\"S5\", \"Seq1\"]\r\n expected = [\"S7\", \"S3\", \"seq6\", \"s2\"]\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)\r\n\r\n tips_to_keep = []\r\n expected = [\"S7\", \"S3\", \"seq6\", \"s2\", \"S5\", \"Seq1\"]\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)\r\n\r\n tips_to_keep = [\"S7\", \"S3\", \"seq6\", \"s2\", \"S5\", \"Seq1\"]\r\n expected = []\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)", "def check_canonical(g):\n if not is_separated(g):\n raise ValueError(\"Non-separated grammar was given\")\n\n nonts = nonterminals(g)\n\n broken_rules = set()\n\n ends = {nont: set() for nont in nonts}\n counts = {nont: 0 for nont in nonts}\n\n for prod in g.productions():\n ends[prod.lhs()].add(prod.rhs()[-1])\n counts[prod.lhs()] += 1\n\n for item in prod.rhs():\n if item == g.start():\n broken_rules.add(1)\n\n for end in ends.values():\n if len(end) == 1:\n if is_nonterminal(end.pop()):\n broken_rules.add(6)\n else:\n broken_rules.add(4)\n\n for nont, num in counts.items():\n if nont == g.start():\n continue\n\n if num == 1:\n broken_rules.add(3)\n\n trash1 = unproductive(g)\n trash2 = unreachable(g)\n\n if trash1 or trash2:\n broken_rules.add(2)\n\n for n1, n2 in itertools.combinations(nonts, 2):\n if nonterm_equal(g, n1, n2):\n broken_rules.add(5)\n\n return broken_rules", "def test_heuristic_abort(self):\n graph = {}\n for u in self.complete:\n graph[u] = set()\n for v in self.complete[u]:\n if u != v: # ignore self-loop\n graph[u].add(v)\n\n deg_heuristic = MinDegreeHeuristic(graph)\n node = deg_heuristic.best_node(graph)\n if node is None:\n pass\n else:\n assert False", "def test_block_bad_signature(self):\n pass", "def filter_some_usages(EN):\n bad_markers = [\n # 'ecclesiastical', actually not a good idea:\n # reachtaire\n # - rector (ecclesiastical)\n # - master of ceremonies\n ]\n ret = '\\n'.join([line for line in EN.split('\\n') if\n (not line.endswith(')')\n or\n line.rsplit('(', 1)[1].rstrip(')')\n not in bad_markers)])\n if ret:\n return ret\n return EN", "def reject(self):\n self.new_rom = None\n self.new_reductor = None", "def test_invalid_overprovision_value(self):\n command_line = [\"pool\", \"overprovision\", \"thispool\", \"1.2\"]\n for prefix in [[], [\"-propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)", "def unusedFromKDOTDataPreparation():", "def test_case_05_not_legal_triangle(self):\n self.__assert_equals_test_case([(4, 6, 11)], 'NotATriangle')", "def test_mark_all_OR_predecessors_dead(self):\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_element_perturbation([\"8\", \"15\"])\n survived_nodes_mark = nx.get_node_attributes(F.G, 'mark')\n\n mark_all_OR_predecessors_dead = {\n '1': '1',\n '2': '2',\n '3': '3',\n '4': '4',\n '5': '5'\n }\n\n self.assertDictEqual(\n mark_all_OR_predecessors_dead,\n survived_nodes_mark,\n msg=\"MARK failure: all OR predecessors dead\")", "def test_fail_signed_in_wrong_order(self):\n # Swap the signatures from the first and second keys.\n # Note that the keys have security level of 3, so we need to swap\n # out a total of 6 signatures.\n sig_1_1 = self.bundle[1].signature_message_fragment\n sig_1_2 = self.bundle[2].signature_message_fragment\n sig_1_3 = self.bundle[3].signature_message_fragment\n\n sig_2_1 = self.bundle[4].signature_message_fragment\n sig_2_2 = self.bundle[5].signature_message_fragment\n sig_2_3 = self.bundle[6].signature_message_fragment\n\n self.bundle[1].signature_message_fragment = sig_2_1\n self.bundle[2].signature_message_fragment = sig_2_2\n self.bundle[3].signature_message_fragment = sig_2_3\n\n self.bundle[4].signature_message_fragment = sig_1_1\n self.bundle[5].signature_message_fragment = sig_1_2\n self.bundle[6].signature_message_fragment = sig_1_3\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Transaction 1 has invalid signature (using 8 fragments).',\n ],\n )", "def __eliminate_unused_constraits (self, objects):\n result = []\n for c in self.constraints_:\n if c [0] in objects and c [1] in objects:\n result.append (c)\n\n return result" ]
[ "0.67994744", "0.5958347", "0.5583729", "0.5497606", "0.54252285", "0.54134977", "0.53917265", "0.53915894", "0.53751105", "0.53494143", "0.53485787", "0.5343248", "0.5343216", "0.53384286", "0.5323294", "0.5297185", "0.5291411", "0.52887464", "0.5273759", "0.5243703", "0.52368015", "0.5216658", "0.5214974", "0.52129567", "0.5207504", "0.52067095", "0.52059996", "0.5198885", "0.51921374", "0.51851195", "0.5170005", "0.5168752", "0.51602995", "0.5146614", "0.51400197", "0.5122444", "0.5118202", "0.5109533", "0.5106572", "0.5101549", "0.50914806", "0.50712615", "0.5066726", "0.5065884", "0.5034874", "0.5034874", "0.5034874", "0.5028246", "0.5021224", "0.500627", "0.50020915", "0.49962646", "0.49921164", "0.49907815", "0.4978422", "0.49730694", "0.4963848", "0.49587452", "0.49571538", "0.49540332", "0.4938657", "0.4924031", "0.49206716", "0.4910158", "0.4903441", "0.4901376", "0.4896516", "0.48955765", "0.48938903", "0.4888983", "0.48837322", "0.48836434", "0.4879291", "0.48683658", "0.48626375", "0.48558304", "0.48491797", "0.48481908", "0.48427138", "0.4838914", "0.4832985", "0.48304164", "0.48285562", "0.4824232", "0.48204783", "0.4817906", "0.4809234", "0.48066613", "0.4799421", "0.47930953", "0.4789976", "0.47871166", "0.47828612", "0.47819713", "0.47816426", "0.47721747", "0.47689673", "0.47664788", "0.4763652", "0.47621733" ]
0.6198813
1
Marshal information deom the selected chainParts to create a
def _make_simple_label(chain_parts): if not _select_simple_chainparts(chain_parts): msg = 'Jet Configuration error: '\ 'chain fails substring selection: not "simple" ' raise NotImplementedError(msg) label = 'simple([' for cp in chain_parts: smcstr = str(cp['smc']) jvtstr = str(cp['jvt']) if smcstr == 'nosmc': smcstr = '' for i in range(int(cp['multiplicity'])): # condition_str = '(%set,%s,%s)' % (str(cp['threshold']), # str(cp['etaRange']), # smcstr,) condition_str = '(%set,%s' % (str(cp['threshold']), str(cp['etaRange']),) if smcstr: # Run 2 chains have "INF" in the SMC substring condition_str += ',%s)' % smcstr.replace('INF','') elif jvtstr: condition_str += ',%s)' % jvtstr else: condition_str += ')' label += condition_str label += '])' return label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeBinaryChains():\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\n\t# Do some basic argument checking for this model\n\tif (len(types) < 2):\n\t\tprint \"Number of defined types must equal two for binary chain calculations.\"\n\t\treturn\n\tif (maxsize == 0):\n\t\tprint \"Must specify a valid maximum number for one or more components.\"\n\t\treturn\n\n\tallChains = []\n\tnewChainsA = [[]]\n\tnewChainsB = []\n\t\n\ttypeA = types[0]\n\ttypeB = types[1]\n\t\n\t# start the chain with a single type A component\n\taddComponent(newChainsA[0],typeA,0,0)\n\n\tdepth = 0\n\tfor n in range(maxsize):\n\t\tdepth+=1\n\t\t\n\t\t# go through all the chains created last iteration and append B components\n\t\tnewChainsB = []\n\t\tfor thisChain in newChainsA:\n\n\t\t\t# get a list of new available sites in the provided chain\n\t\t\t# by setting depth -1, we will only add to components added last round\n\t\t\topenSites = makeSiteList(thisChain,typeB,depth-1)\n\t\t\t\n\t\t\t# make all the descendants from the current chain and append them to the pool\n\t\t\tif (n == 0) and (typeA['sym']): #if the starting binder is symmetric, no need to start chains at all its sites\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,-1)\n\t\t\telse:\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,depth)\n\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsB))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsB\n\t\t\n\t\tdepth+=1\n\t\t\n\t\t# add an additional component to all the previously modified chains\n\t\tnewChainsA = []\n\t\tfor thisChain in newChainsB:\n\n\t\t\topenSites = makeSiteList(thisChain,typeA,depth-1)\n\t\t\tnewChainsA = newChainsA + fillSites(openSites,thisChain,typeA,depth)\n\t\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsA))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsA\n\n\treturn allChains", "def serialize(self):\n return pickle.dumps([block.serialize() for block in self.chain])", "def _encode_structure(self):\n pass", "def _encode_structure(self):\n pass", "def marshal(self):\n ...", "def __rechaindict__(c):\n from TriggerMenu.menu.DictFromChainName import DictFromChainName\n dfcn = DictFromChainName()\n\n pl1 = []\n for pch in c['chainParts']:\n pl1.append(pch['L1item'])\n\n newname = c['chainName'].replace('dv_','').replace('TestChain','j')\n nchlist = [ newname ,c['chainCounter'],c['L1item'],pl1,c['stream'],\n c['groups'],c['EBstep'] ]\n \n return dfcn.getChainDict(nchlist)", "def add_chain_to_model(chain, model, atoms):\n\n if chain[\"type\"] == \"polymer\" or chain[\"type\"] == \"branched\":\n polymer = {\n \"internal_id\": chain[\"internal_id\"], \"sequence\": chain[\"sequence\"],\n \"helices\": [], \"strands\": [], \"residues\": {}\n }\n for i, group in enumerate(chain[\"groups\"], start=1):\n add_het_to_dict(group, chain, atoms, polymer[\"residues\"], number=i)\n add_ss_to_chain(polymer)\n model[\"polymer\"][chain[\"id\"]] = polymer\n else:\n for group in chain[\"groups\"]:\n add_het_to_dict(group, chain, atoms, model[chain[\"type\"]])", "def _serialise(self):\n # TODO (M Foley)\n pass", "def pack(self):\n data = {\n 'name': self._name,\n 'piece': self._piece,\n 'pos': self._pos,\n 'cash': self._cash,\n 'properties': []\n }\n\n for i in self._properties:\n data['properties'].append({'name': i.name, 'value': i.value})\n\n return data", "def marshal(self):\n raise NotImplementedError", "def adapt_chain(chain):\n type_chain = check_type(chain)\n name = chain.id\n if type_chain == \"nucleic_acid\":\n new_chain = Bio.PDB.Chain.Chain(name)\n chain = copy.copy(chain)\n for residue in chain:\n new_chain.add(residue.copy())\n\n for residue in new_chain:\n for atom in residue:\n if atom.id == \"C1'\":\n atom.id = \"CA\"\n residue.add(atom.copy())\n return new_chain\n else:\n return chain", "def create_from_segments(self, segment, origin=0):\r\n n = origin\r\n if segment[origin]['T'] != 'soma': # if it's a soma, only one compartment\r\n while (len(segment[n]['children']) == 1) and (segment[n]['T'] != 'soma'): # Go to the end of the branch\r\n n += 1\r\n # End of branch\r\n branch = segment[origin:n + 1]\r\n # Set attributes\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = \\\r\n zip(*[(seg['diameter'], seg['length'], seg['area'], seg['x'], seg['y'], seg['z']) for seg in branch])\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = array(self.diameter), array(self.length), \\\r\n array(self.area), array(self.x), array(self.y), array(self.z)\r\n self.type = segment[n]['T'] # normally same type for all compartments in the branch\r\n # Create children (list)\r\n self.children = [Morphology().create_from_segments(segment, origin=c) for c in segment[n]['children']]\r\n # Create dictionary of names (enumerates children from number 1)\r\n for i, child in enumerate(self.children):\r\n self._namedkid[str(i + 1)] = child\r\n # Name the child if possible\r\n if child.type in ['soma', 'axon', 'dendrite']:\r\n if child.type in self._namedkid:\r\n self._namedkid[child.type] = None # two children with the same name: erase (see next block)\r\n else:\r\n self._namedkid[child.type] = child\r\n # Erase useless names\r\n for k in self._namedkid.keys():\r\n if self._namedkid[k] is None:\r\n del self._namedkid[k]\r\n # If two kids, name them L (left) and R (right)\r\n if len(self.children) == 2:\r\n self._namedkid['L'] = self._namedkid['1']\r\n self._namedkid['R'] = self._namedkid['2']\r\n return self", "def save_chain(self):\n pprint('saving to file named bc_file.txt')\n with open('ddos_bc_file.txt', 'w') as output:\n output.write(serializer.serialize(self.chain))", "def make_mixture_info(parts, operation='+'):\n # type: (List[ModelInfo], str) -> ModelInfo\n # Build new parameter list\n combined_pars = []\n\n # When creating a mixture model that is a sum of product models (ie (1*2)+(3*4))\n # the parameters for models 1 & 2 will be prefixed with A & B respectively,\n # but so will the parameters for models 3 & 4. We need to rename models 3 & 4\n # so that they are prefixed with C & D to avoid overlap of parameter names.\n used_prefixes = []\n for part in parts:\n if part.composition and part.composition[0] == 'mixture':\n i = 0\n for submodel in part.composition[1]:\n npars = len(submodel.parameters.kernel_parameters)\n # List of params of one of the constituent models of part\n submodel_pars = part.parameters.kernel_parameters[i:i+npars]\n # Prefix of the constituent model\n prefix = submodel_pars[0].name[0]\n if prefix not in used_prefixes: # Haven't seen this prefix so far\n used_prefixes.append(prefix)\n i += npars\n continue\n # TODO: don't modify submodel --- it may be used elsewhere\n # Existing code probably doesn't keep a handle on the model\n # parts so its probably okay, but it's possible that a mix\n # on user defined mixture models models will change the\n # parameters used for the parts in the GUI. Even worse if the\n # same plugin is used twice. For example, twosphere.py\n # contains sphere+sphere and you create twosphere+twosphere.\n while prefix in used_prefixes:\n # This prefix has been already used, so change it to the\n # next letter that hasn't been used\n prefix = chr(ord(prefix) + 1)\n used_prefixes.append(prefix)\n prefix += \"_\"\n # Update the parameters of this constituent model to use the\n # new prefix\n for par in submodel_pars:\n # Strip {prefix}_ using par.name[2:], etc.\n # TODO: fails for AB_scale\n par.id = prefix + par.id[2:]\n par.name = prefix + par.name[2:]\n if par.length_control is not None:\n par.length_control = prefix + par.length_control[2:]\n i += npars\n\n for part in parts:\n # Parameter prefix per model, A_, B_, ...\n # Note that prefix must also be applied to id and length_control\n # to support vector parameters\n prefix = ''\n if not part.composition or part.composition[0] == 'product':\n # Model isn't a composition model, so its parameters don't have a\n # a prefix. Add the next available prefix\n prefix = chr(ord('A')+len(used_prefixes))\n used_prefixes.append(prefix)\n prefix += '_'\n\n if operation == '+':\n # If model is a sum model, each constituent model gets its own scale parameter\n scale_prefix = prefix\n if prefix == '' and getattr(part, \"operation\", '') == '*':\n # `part` is a composition product model. Find the prefixes of\n # its parameters to form a new prefix for the scale.\n # For example, a model with A*B*C will have ABC_scale.\n sub_prefixes = []\n for param in part.parameters.kernel_parameters:\n # Prefix of constituent model\n sub_prefix = param.id.split('_')[0]\n if sub_prefix not in sub_prefixes:\n sub_prefixes.append(sub_prefix)\n # Concatenate sub_prefixes to form prefix for the scale\n scale_prefix = ''.join(sub_prefixes) + '_'\n scale = Parameter(scale_prefix + 'scale', default=1.0,\n description=\"model intensity for \" + part.name)\n combined_pars.append(scale)\n for p in part.parameters.kernel_parameters:\n p = copy(p)\n p.name = prefix + p.name\n p.id = prefix + p.id\n if p.length_control is not None:\n p.length_control = prefix + p.length_control\n combined_pars.append(p)\n parameters = ParameterTable(combined_pars)\n # Allow for the scenario in which each component has all its PD parameters\n # active simultaneously. details.make_details() will throw an error if\n # too many are used from any one component.\n parameters.max_pd = sum(part.parameters.max_pd for part in parts)\n\n def random():\n \"\"\"Random set of model parameters for mixture model\"\"\"\n combined_pars = {}\n for k, part in enumerate(parts):\n prefix = chr(ord('A')+k) + '_'\n pars = part.random()\n combined_pars.update((prefix+k, v) for k, v in pars.items())\n return combined_pars\n\n model_info = ModelInfo()\n model_info.id = operation.join(part.id for part in parts)\n model_info.operation = operation\n model_info.name = '(' + operation.join(part.name for part in parts) + ')'\n model_info.filename = None\n model_info.title = 'Mixture model with ' + model_info.name\n model_info.description = model_info.title\n model_info.docs = model_info.title\n model_info.category = \"custom\"\n model_info.parameters = parameters\n model_info.random = random\n #model_info.single = any(part['single'] for part in parts)\n model_info.structure_factor = False\n #model_info.tests = []\n #model_info.source = []\n # Remember the component info blocks so we can build the model\n model_info.composition = ('mixture', parts)\n return model_info", "def writeBlocks(self):\n dataFile = open(\"chain.txt\", \"w\")\n chainData = []\n for eachBlock in self.chain:\n chainData.append(eachBlock.__dict__)\n dataFile.write(json.dumps(chainData, indent=4))\n dataFile.close()", "def construct_fragments(self):\n for frag_dict in self.fragment_dict_list:\n try:\n chain = self.model[frag_dict[\"chain_id\"]]\n frag = chain[frag_dict[\"frag_id\"]]\n except KeyError:\n self.add_fragment(frag_dict, None)\n continue\n\n self.add_fragment(frag_dict, frag)", "def _pack(self):\n pass", "def pack_goods(self, by=None):", "def pack_goods(self, by=None):", "def createInnerRepresentation(self):\n\n for idx, single_block in enumerate(self._block_list):\n del self._to_be_processed[:]\n del self._metastring_rest[:]\n self._metastring_rest.append(self._metastring[idx])\n self.addMetastringPointer(single_block)", "def create(data):\n \n return Part(\n part_id = data['part_num'],\n category_id = data['part_cat_id'],\n external_ids = data.get('external_ids', {}),\n name = data['name'],\n year_from = data.get('year_from', None),\n year_to = data.get('year_to', None),\n url = data.get('part_url', None),\n img_url = data.get('part_img_url', None),\n print_of = data.get('print_of', None),\n prints = data.get('prints', []),\n molds = data.get('molds', []),\n alternates = data.get('alternates', []))", "def assemble_parts(self):\n self.parts['whole'] = self.output\n self.parts['encoding'] = self.document.settings.output_encoding\n self.parts['version'] = docutils.__version__", "def serialize(self):", "def _marshal(self, pieces):\n payload = b''.join(pieces)\n return struct.pack('>BHI', self.frame_type, self.channel_number,\n len(payload)) + payload + bytes((spec.FRAME_END,))", "def _blob(self):\n self.__rewrite_sldIdLst()\n # # at least the following needs to be added before using\n # # _reltype_ordering again for Presentation\n # self.__rewrite_notesMasterIdLst()\n # self.__rewrite_handoutMasterIdLst()\n # self.__rewrite_sldMasterIdLst()\n return super(Presentation, self)._blob", "def get_structure(self):\n return self.chain.model.structure", "def marshal(self):\n return self._marshal(list())", "def marshal(self):\n return self._marshal([self.fragment])", "def chainsetup(filename, cation, facets, operation, end_radii, nradii,\n adensity):\n\n # Load the Cage from the file\n try:\n # If that fails, try other file formats supported by pymatgen\n anion = Cage.from_file(filename)\n except ValueError:\n # If that fails, try the VASP POSCAR format\n anion = Cage.from_poscar(filename)\n\n # Center the anion around the origin\n anion.center()\n\n # Find the chain edges, i.e. the paths between the edge sharing facets of\n # the chain of non-equivalent facets.\n anion.find_surface_facets(ignore=IGNORE)\n\n if not facets == tuple:\n chosen_facets = [anion.facets[index] for index in facets]\n edges = anion.find_noneq_chain_links(chosen_facets)\n else:\n edges = anion.find_noneq_chain_links()\n\n total_mol = anion.copy()\n\n chain_dir = 'chain_' + operation\n try:\n os.mkdir(chain_dir)\n except FileExistsError:\n pass\n\n # For each edge, set up the calculation input files\n edge_number = 1\n\n for edge in edges:\n\n # Set up the edge directory\n edge_dir = os.path.join(chain_dir, \"edge\" + str(edge_number))\n\n while os.path.exists(edge_dir):\n edge_number += 1\n edge_dir = os.path.join(chain_dir, \"edge\" + str(edge_number))\n\n os.mkdir(edge_dir)\n\n # Write out the molecule and path facets to the edge directory\n anion.to(fmt=\"json\", filename=os.path.join(edge_dir, \"molecule.json\"))\n edge[0].to(fmt=\"json\", filename=os.path.join(edge_dir,\n \"init_facet.json\"))\n edge[1].to(fmt=\"json\", filename=os.path.join(edge_dir,\n \"final_facet.json\"))\n\n # Get copies so the originals aren't mutated\n edge_mol = anion.copy()\n facet1 = edge[0].copy()\n facet2 = edge[1].copy()\n\n if edge == edges[-1]:\n remove_endline = False\n else:\n remove_endline = True\n\n # Set up the landscape\n landscape = set_up_edge_landscape(facet1, facet2,\n endpoint_radii=end_radii,\n number_of_radii=nradii,\n angle_density=adensity,\n remove_endline=remove_endline)\n\n # Get the molecule for each landscape point\n molecules = set_up_molecules(edge_mol, landscape, cation)\n\n # Set up an xyz file to visualize the edge and total landscape\n for point in landscape.points:\n try:\n total_mol.append(pmg.Specie(cation, 1), point,\n validate_proximity=False)\n edge_mol.append(pmg.Specie(cation, 1), point,\n validate_proximity=False)\n except ValueError:\n pass\n\n edge_mol.to(fmt=\"xyz\", filename=os.path.join(edge_dir, \"edge.xyz\"))\n\n # In case the molecules must be optimized, add the constraints and\n # optimization setup (DRIVER)\n if operation == \"optimize\":\n far_facet = anion.find_farthest_facet(landscape.center)\n constraints = find_constraints(anion, far_facet.sites)\n constraints['fix atom'] += ' ' + str(len(anion.sites) + 1)\n ALT_SETUP['constraints'] = constraints\n ALT_SETUP[\"driver\"] = DRIVER_SETUP\n\n # Set up the task for the calculations\n tasks = [nwchem.NwTask(molecules[0].charge, None, BASIS,\n theory=\"dft\",\n operation=operation,\n theory_directives=THEORY_SETUP,\n alternate_directives=ALT_SETUP)]\n\n # Set up the input files\n study = Study(molecules, tasks)\n study.set_up_input(edge_dir, sort_comp=False,\n geometry_options=GEO_SETUP)\n\n edge_number += 1\n\n # Set up an xyz file with all the paths\n total_mol.to(fmt=\"xyz\", filename=os.path.join(chain_dir, \"total_mol.xyz\"))", "def build_serializer(self):\n self._add_child_elements_recursive(self.get_root_element())", "def add_chain_signature(\n self, prop: str, key: JWK, alg: Optional[AlgorithmName] = None,\n header: Optional[JsonObject] = None) -> None:\n top_level_signature = self._payload.get(prop)\n for k in top_level_signature.keys():\n if k != _CHAIN:\n del top_level_signature[k]\n chain = top_level_signature.get(_CHAIN, [])\n self._add_signature(prop, key, alg, header,\n lambda h: {_CHAIN: chain + [h]},\n lambda h: (self._payload\n .setdefault(prop, {})\n .setdefault(_CHAIN, [])\n .append(h)))", "def trip_chain(self):\n pass", "def __init__(self, objects=()):\n\n vtk.vtkPropAssembly.__init__(self)\n\n self.name = \"\"\n self.created = \"\"\n self.trail = None\n self.trail_points = []\n self.trail_segment_size = 0\n self.trail_offset = None\n self.shadows = []\n self.info = {}\n self.rendered_at = set()\n self.transform = None\n self.scalarbar = None\n\n for a in vedo.utils.flatten(objects):\n if a:\n self.AddPart(a)\n\n self.PickableOff()", "def parse_part(self):\n parts = []\n for part in re.split(r'\\*\\*\\* ([A-Z- ]+) \\*\\*\\*', self.hand_file): # return [ 'part1', 'splitter1', 'part2',..\n parts.append(part)\n\n for i in range(0, len(parts)):\n if i == 0:\n self.part_dict['HEADER'] = parts[i]\n if i % 2 != 0: # number is odd\n self.part_dict[parts[i]] = parts[i + 1]", "def read_chain(self, _id):\n chain_def = {'class' : 'chain', 'type' : ''}\n for i in self.config_reader.options(_id):\n chain_def[i] = self.config_reader.get(_id, i)\n logging.debug(\"Registering chain %s\", _id)\n self.instances.register(_id,chain_def,self.create_chain)", "def marshal_departments(result):\n return result", "def dump_parts(self, io):\n\n # XXX refactor with Tempita\n title = \"Parts created by the docutils writer '%s'\" % self.strategy.name\n io.say(title + os.linesep)\n io.say(len(title) * '-')\n io.say(2 * os.linesep)\n io.say('Part keys: ' + 2 * os.linesep)\n\n parts = self.publish_parts(io)\n io.say(os.linesep.join(sorted(parts.keys())))\n io.say(2 * os.linesep)\n for part in parts:\n io.say(\"Value of part '%s':%s\" % (part, os.linesep))\n io.say(parts[part].encode('utf-8') + os.linesep)\n io.say(80*'-'+os.linesep)\n io.say(os.linesep)", "def addChain(self, chain):\n\n\t\tself.chain.append(chain)\n\t\tchain.parentMolecule = self", "def normalize_chain(cls, chain):\n\n if isinstance(chain, cls):\n chain = (chain,)\n return tuple(x for x in chain if x is not None)", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'ingredients': self.ingredients,\n 'directions': self.directions,\n 'type': self.type,\n }", "def toPartners(self):\n num_bases = len(self) #number of bases\n result = [None] * len(self) #array of None, one for each base\n stack = []\n start = self.StartSymbols\n end = self.EndSymbols\n for i, symbol in enumerate(self):\n if symbol in start: #open a pair\n stack.append(i)\n elif symbol in end: #close a pair\n curr = stack.pop() #return and delete last element\n result[i] = curr #make i pair with the last element...\n result[curr] = i #...and the last element pair with i\n \n #test whether there are any open pairs left unaccounted for \n if stack:\n raise IndexError, \\\n \"Too many open pairs in structure:\\n%s\" % self\n return Partners(result)", "def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None):\n # check if it's a Chain object\n if heavy_chains is None and light_chains is None and fab is None:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n # check if fab object is a list and if all object are abpytools.Fab objects\n if isinstance(fab, list) and all(isinstance(fab_i, Fab) for fab_i in fab):\n self._fab = fab\n self._light_chains = ChainCollection([x[0] for x in self._fab])\n self._heavy_chains = ChainCollection([x[1] for x in self._fab])\n\n if fab is None and (heavy_chains is not None and light_chains is not None):\n\n if isinstance(heavy_chains, list):\n self._heavy_chains = ChainCollection(antibody_objects=heavy_chains)\n\n elif isinstance(heavy_chains, ChainCollection):\n self._heavy_chains = heavy_chains\n\n else:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n if isinstance(light_chains, list):\n self._light_chains = ChainCollection(antibody_objects=light_chains)\n\n elif isinstance(light_chains, ChainCollection):\n self._light_chains = light_chains\n\n else:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n if len(self._light_chains.loading_status()) == 0:\n self._light_chains.load()\n\n if len(self._heavy_chains.loading_status()) == 0:\n self._heavy_chains.load()\n\n if self._light_chains.n_ab != self._heavy_chains.n_ab:\n raise ValueError('Number of heavy chains must be the same of light chains')\n\n if isinstance(names, list) and all(isinstance(name, str) for name in names):\n if len(names) == self._heavy_chains.n_ab:\n self._names = names\n else:\n raise ValueError(\n 'Length of name list must be the same as length of heavy_chains/light chains lists')\n\n elif names is None:\n self._names = ['{} - {}'.format(heavy, light) for heavy, light in zip(self._heavy_chains.names,\n self._light_chains.names)]\n\n else:\n raise ValueError(\"Names expected a list of strings, instead got {}\".format(type(names)))\n\n self._n_ab = self._light_chains.n_ab\n self._pair_sequences = [heavy + light for light, heavy in zip(self._heavy_chains.sequences,\n self._light_chains.sequences)]\n\n # keep the name of the heavy and light chains internally to keep everything in the right order\n self._internal_heavy_name = self._heavy_chains.names\n self._internal_light_name = self._light_chains.names", "def get_serializer(self, *args, **kwargs):\n kwargs['part_detail'] = True\n kwargs['location_detail'] = True\n kwargs['supplier_part_detail'] = True\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def _encode_supplement(self):", "def _encode_supplement(self):", "def partid2nids(self, partid, ntype): # -> None:\n ...", "def serialize(self, data):", "def from_binary(self, d):\n p = MsgCertificateChainDep._parser.parse(d)\n for n in self.__class__.__slots__:\n setattr(self, n, getattr(p, n))", "def __init__(self, chain_instance, *args, **kwargs):\n protocol_logger('Intializing protocol processor')\n self.chain_instance = chain_instance", "def build_parts_from_dict(self, data, skip_power_controls=False):\n \n # Validate Objects information.\n if \"Objects\" not in data:\n return\n\n # Start creating parts.\n parts = []\n for part_data in data[\"Objects\"]:\n part = part_data[\"ObjectID\"].replace(\"^\", \"\")\n timestamp = part_data[\"Timestamp\"]\n user_data = part_data[\"UserData\"]\n part_position = part_data[\"Position\"]\n up_vec = part_data[\"Up\"]\n at_vec = part_data[\"At\"]\n # Build the item.\n item = self.build_item(\n part,\n timestamp,\n user_data,\n part_position,\n up_vec,\n at_vec,\n skip_power_controls\n )\n parts.append(item)\n\n return parts", "def preparing(fasta_list, pdb_dict):\n for item1 in fasta_list:\n matchObj = re.search( '^(.*)_([a-zA-Z0-9])$', item1[0])\n fasta1= item1[1]\n if matchObj:\n original_name1= matchObj.group(1)\n original_structure1=pdb_dict[original_name1]\n chain_1= matchObj.group(2) \n yield fasta1, [original_structure1, chain_1]", "def _get_chain_repr(self, chain):\n chain_repr = []\n for module in chain:\n if isinstance(module, collections.Iterable): # module is a chain\n chain_repr.append(self._get_chain_repr(module))\n elif hasattr(module, 'process'): # module is an object\n chain_repr.extend(\n (str(module.__class__), repr(vars(module))))\n else: # module is a function\n if isinstance(module, partial): # partial function\n chain_repr.extend((str(module.__class__), repr(module.func),\n repr(module.keywords)))\n else:\n chain_repr.append(repr(module))\n return ' '.join(chain_repr)", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def __init__(self):\n self.chain = [Block.genesis()]", "def partid2nids(self, partid, ntype=...):\n ...", "def partid2nids(self, partid, ntype=...):\n ...", "def serialize(self, root):", "def _decode(self, parts: typing.List[int]) -> typing.Dict:\n info = {field.name: field.decode(parts[i]) for i, field in enumerate(self.fields)}\n return info", "def compose(self):\r\n return_lib = self.other\r\n return_lib.update({\r\n 'ID' : self.id,\r\n 'Name' : self.name,\r\n 'Alias' : self.alias,\r\n 'Parent' : self.parent,\r\n 'Target' : self.target,\r\n 'Gap' : self.gap,\r\n 'Derives_from' : self.derives_from,\r\n 'Note' : self.note,\r\n 'Dbxref' : self.dbxref,\r\n 'Ontology_term' : self.ontology_term,\r\n 'Is_circular' : self.is_circular,\r\n 'allele' : ','.join([a.strip() for a in self.allele])\r\n })\r\n if self.discovered != None:\r\n if self.discovered:\r\n return_lib[self.discovered_key] = self.enabled[0]\r\n else:\r\n return_lib[self.discovered_key] = self.disabled[0]\r\n if self.validated != None:\r\n if self.validated:\r\n return_lib[self.validated_key] = self.enabled[0]\r\n else:\r\n return_lib[self.validated_key] = self.disabled[0]\r\n if self.active != None:\r\n if self.active:\r\n return_lib[self.active_key] = self.disabled[0]\r\n else:\r\n return_lib[self.active_key] = self.enabled[0]\r\n for i,item in enumerate(self.history):\r\n return_lib[\"{}_{}\".format(self.history_key,i)] = ','.join(item)\r\n return_str = ';'.join([key + '=' + value for key, value in return_lib.items() if value])\r\n return return_str", "def _encode_parts(self, messages, encode_empty=False):\n if messages or encode_empty:\n return self.signer.sign_object(\n messages, serializer=MessagePartGatherSerializer, compress=True\n )", "def _create_chain(class_type_list, kwargs_list):\n chain = None # module with preprocessing chain\n modules = [] # list of modules (not connected via preprocessing)\n for i, pre_id in enumerate(class_type_list):\n chain = CModule.create(\n pre_id, preprocess=chain, **kwargs_list[i])\n modules.append(CModule.create(pre_id, **kwargs_list[i]))\n return chain, modules", "def skeleton_getHandleChain(self, typeModifier = None, jointHelpers = True, mOrientHelper = None):\n _short = self.mNode\n _str_func = 'skeleton_getHandleChain'\n #start = time.clock()\t\n log.debug(cgmGEN.logString_start(_str_func))\n \n mRigNull = self.moduleTarget.rigNull\n ml_fkJoints = mRigNull.msgList_get('fkJoints')\n \n if not ml_fkJoints:\n log.debug(\"|{0}| >> Generating handleJoints\".format(_str_func))\n \n ml_formHandles = self.msgList_get('formHandles',asMeta = True)\n if not ml_formHandles:\n raise ValueError,\"No formHandles connected\" \n \n ml_prerigHandles = self.msgList_get('prerigHandles',asMeta = True)\n if not ml_prerigHandles:\n raise ValueError,\"No prerigHandles connected\"\n \n if mOrientHelper is None:\n mOrientHelper = ml_formHandles[0].orientHelper or ml_prerigHandles[0].orientHelper\n \n #_d = skeleton_getCreateDict(self)\n #pprint.pprint(_d)\n l_pos = []\n for mObj in ml_prerigHandles:\n l_pos.append(mObj.p_position)\n \n ml_fkJoints = COREJOINTS.build_chain(posList = l_pos,\n axisAim='z+',\n axisUp='y+',\n parent=True,\n worldUpAxis= mOrientHelper.getAxisVector('y+'))\n \n for i,mJnt in enumerate(ml_fkJoints):\n mJnt.doCopyNameTagsFromObject(ml_prerigHandles[i].mNode, ignore = ['cgmType'])\n if not typeModifier:\n mJnt.doName()\n \n if typeModifier:\n for mJnt in ml_fkJoints:\n mJnt.addAttr('cgmTypeModifier',typeModifier,attrType='string',lock=True)\n mJnt.addAttr('cgmType','frame',attrType='string',lock=True) \n mJnt.doName()\n \n ml_fkJoints[0].p_parent = False\n else:\n log.debug(\"|{0}| >> Found fkJoints\".format(_str_func))\n \n #log.debug(\"%s >> Time >> = %0.3f seconds \" % (_str_func,(time.clock()-start)) + \"-\"*75)\t\n return ml_fkJoints", "def serialize(self):\n pass", "def __store_part(self, definition, pnum, multisubtype):\n pnum = \"1\" if pnum is None else pnum\n params = {\n \"pnum\": pnum,\n \"params\": definition[2],\n \"cid\": definition[3],\n \"description\": definition[4],\n \"encoding\": definition[5],\n \"size\": definition[6]\n }\n mtype = definition[0].lower()\n subtype = definition[1].lower()\n ftype = \"%s/%s\" % (definition[0].lower(), subtype)\n if ftype in (\"text/plain\", \"text/html\"):\n if subtype not in self.contents:\n self.contents[subtype] = [params]\n else:\n self.contents[subtype].append(params)\n return\n elif multisubtype in [\"related\"]:\n self.inlines[params[\"cid\"].strip(\"<>\")] = params\n return\n\n params[\"Content-Type\"] = ftype\n if len(definition) > 7:\n extensions = [\"md5\", \"disposition\", \"language\", \"location\"]\n if mtype == \"text\":\n extensions = [\"textlines\"] + extensions\n elif ftype == \"message/rfc822\":\n extensions = [\n \"envelopestruct\",\n \"bodystruct\",\n \"textlines\"] + extensions\n for idx, value in enumerate(definition[7:]):\n params[extensions[idx]] = value\n\n self.attachments += [params]", "def serialize(self):\n return {\n 'name' : self.name,\n 'ingredients' : self.ingredients,\n 'id' : self.id,\n 'preparation' : self.preparation,\n 'image' : self.image,\n }", "def serialize(self, pipe, pid):\n for trsp in self.transports:\n pipe.send(trsp.__class__)\n trsp.serialize(pipe, pid)", "def save_data(self):\n try:\n with open('blockchain-{}.txt'.format(self.node_id), mode='w') as f:\n saveable_chain = [block.__dict__ for block in [Block(block_el.index, block_el.previous_hash, \n [tx.__dict__ for tx in block_el.transactions], \n [tx.__dict__ for tx in block_el.chipsactions],\n [tx.__dict__ for tx in block_el.messsactions],\n block_el.proof, block_el.timestamp) for block_el in self.__chain]]\n f.write(json.dumps(saveable_chain))\n f.write('\\n')\n saveable_tx = [tx.__dict__ for tx in self.__open_transactions]\n f.write(json.dumps(saveable_tx))\n f.write('\\n')\n saveable_chip = [tx.__dict__ for tx in self.__open_chipsactions]\n f.write(json.dumps(saveable_chip))\n f.write('\\n')\n saveable_chip = [tx.__dict__ for tx in self.__open_messsactions]\n f.write(json.dumps(saveable_chip))\n f.write('\\n')\n f.write(json.dumps(list(self.__peer_nodes)))\n except IOError:\n print('Saving failed!')", "def to_binary(self):\n c = containerize(exclude_fields(self))\n self.payload = MsgCertificateChainDep._parser.build(c)\n return self.pack()", "def _make_information_storable( self, data ):\n\t\tpass", "def create_chain_instances(self):\n for section in self.config_reader.sections():\n self.read_chain(section)", "def chain():\n chain_identifier, url = get_vars(request, [\"id\", \"data\"])\n info('chain=%s' % chain_identifier)\n chain = LAPPS_SERVICE_CHAINS.get_chain(chain_identifier)\n info('source-url=%s' % url)\n data = requests.get(url).text\n result = chain.run({\n \"discriminator\": \"http://vocab.lappsgrid.org/ns/media/text\", \n \"payload\": data})\n info(\"discriminator=%s\" % result.get('discriminator'))\n return render_template(\"chain.html\",\n chain=chain,\n fname=url,\n result=result,\n builder=HtmlBuilder())", "def create(data):\n \n return Partlist(\n list_id = data['id'],\n name = data['name'],\n pieces = data['num_parts'])", "def make_drs_tree(self):\n pass", "def produce_chain_dict (inPath, outPath):\n with open(inPath, 'r') as fin:\n chainIDs = list(fin.read().split())\n chains = {}\n for chainid in chainIDs:\n pdbid = (chainid[ : chainid.find('_') ] if '_' in chainid else chainid)\n if pdbid in chains:\n chains[pdbid].add(chainid)\n else:\n chains[pdbid] = {chainid}\n with open(outPath, 'wb') as fOut:\n pickle.dump(chains, fOut)", "def wire_chains(self):\n allChains = self.instances.getAllChainInstances()\n for chain in allChains:\n logging.debug(\"%s\", chain)\n allChains[chain].setup_event_path()", "def setPartsToRegister(self, parts):\n internals.blpapi_ServiceRegistrationOptions_setPartsToRegister(\n self.__handle, parts)", "def _encode(self, boxes, anchors):\n pass", "def __init__(self):\n self.chain = {}\n self.blocks = {}\n self.blocks_spending_input = {}\n self.blocks_containing_tx = {}\n self.all_transactions = {}", "def trans_setup():\n # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)\n # Be Be Be Be Be Be Be lens material\n # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]\n # 1 1 5 8 4 2 1 number of lenses\n lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]\n lens_mat=['Be','Be','Be','Be','Be','Be','Be']\n lens_N=[1,2,4,8,5,1,1]\n trans_pos=[35.2,35.8]\n return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}", "def prepare_order(acct, order):\n myaddr = (acct.address).lower()\n order[\"makerAddress\"] = myaddr\n order_struct = jsdict_order_to_struct(order) \n sig = _sign_order(acct, order_struct)\n order_struct[\"signature\"] = sig\n js_order = order_to_jsdict(order_struct)\n js_order[\"exchangeAddress\"] = exchangeAddress\n return js_order", "def _finishConstruction(self, obj):\n return obj", "def _finishConstruction(self, obj):\n return obj", "def json(self):\n blocks = [block.to_json() for block in self.chain[1:]]\n return json.dumps({'blocks': blocks})", "def pdbChain_to_mdtrajChainid_li(chain,seg_to_chain,struc):\n chain = chain.upper()\n chain_segments=[seg for seg,chainval in seg_to_chain.items() if chainval==chain]\n if chain_segments:\n structable, bonds=struc.topology.to_dataframe()\n chainid_li=[]\n for segname in chain_segments:\n seg_chainid_li=structable.loc[structable['segmentID'] == segname].chainID.unique()\n chainid_li+=list(seg_chainid_li)\n chainid_li=list(set(chainid_li))\n return chainid_li\n else:\n return False", "def genereate_echo_picklist(self):\n sample_names = []\n sample_wells = []\n indices = {'i5 name': {}, 'i5 plate': {}, 'i5 sequence': {},\n 'i5 well': {}, 'i7 name': {}, 'i7 plate': {},\n 'i7 sequence': {}, 'i7 well': {}, 'index combo': {},\n 'index combo seq': {}}\n\n for idx, well in enumerate(chain.from_iterable(self.plates[0].layout)):\n # Add the sample well\n sample_wells.append(well.well_id)\n # Get the sample name - we need to go back to the SampleComposition\n lib_comp = well.composition\n sample_comp = lib_comp.normalized_gdna_composition\\\n .gdna_composition.sample_composition\n sample_names.append(sample_comp.content)\n # Retrieve all the information about the indices\n i5_comp = lib_comp.i5_composition.primer_set_composition\n i5_well = i5_comp.container\n indices['i5 name'][idx] = i5_comp.external_id\n indices['i5 plate'][idx] = i5_well.plate.external_id\n indices['i5 sequence'][idx] = i5_comp.barcode\n indices['i5 well'][idx] = i5_well.well_id\n\n i7_comp = lib_comp.i7_composition.primer_set_composition\n i7_well = i7_comp.container\n indices['i7 name'][idx] = i7_comp.external_id\n indices['i7 plate'][idx] = i7_well.plate.external_id\n indices['i7 sequence'][idx] = i7_comp.barcode\n indices['i7 well'][idx] = i7_well.well_id\n\n indices['index combo seq'][idx] = '%s%s' % (\n indices['i5 sequence'][idx], indices['i7 sequence'][idx])\n\n sample_names = np.asarray(sample_names)\n sample_wells = np.asarray(sample_wells)\n indices = pd.DataFrame(indices)\n\n return LibraryPrepShotgunProcess._format_picklist(\n sample_names, sample_wells, indices)", "def marshal(self):\n pieces = self.properties.encode()\n pieces.insert(\n 0, struct.pack('>HxxQ', self.properties.INDEX, self.body_size))\n return self._marshal(pieces)", "def init(self):\n\n logger.info(mm_chain.ackn_str)\n self.acknowledgements = mm_chain.ackn_str\n self.references = mm_chain.refs['chain']\n\n return", "def __init__(self):\n self.unconfirmed_transactions = [] \n self.chain = []\n self.create_genesis_block()", "def connectPart(self,\n hsNondes: list,\n part: Union[TransPart, ChoicesOfFrameParts],\n en: Union[RtlSignal, bool],\n exclusiveEn: Optional[RtlSignal]=hBit(1)):\n busVld = self.dataIn.valid\n tToIntf = self.dataOut._fieldsToInterfaces\n\n if isinstance(part, ChoicesOfFrameParts):\n parentIntf = tToIntf[part.origin.parent.origin]\n try:\n sel = self._tmpRegsForSelect[parentIntf]\n except KeyError:\n sel = HsBuilder(self, parentIntf._select).buff().end\n self._tmpRegsForSelect[parentIntf] = sel\n unionGroup = ExclusieveListOfHsNodes(sel)\n\n # for unions\n for choice in part:\n # connect data signals of choices and collect info about\n # streams\n intfOfChoice = tToIntf[choice.tmpl.origin]\n selIndex, isSelected, isSelectValid = self.choiceIsSelected(\n intfOfChoice)\n _exclusiveEn = isSelectValid & isSelected & exclusiveEn\n\n unionMemberPart = ListOfOutNodeInfos()\n for p in choice:\n self.connectPart(unionMemberPart, p, en, _exclusiveEn)\n unionGroup.append(selIndex, unionMemberPart)\n\n hsNondes.append(unionGroup)\n\n if part.isLastPart():\n # synchronization of reading from _select register for unions\n selNode = InNodeInfo(sel, en)\n else:\n selNode = InNodeReadOnlyInfo(sel, en)\n hsNondes.append(selNode)\n return\n\n if part.isPadding:\n return\n\n fPartSig = self.getInDataSignal(part)\n fieldInfo = part.tmpl.origin\n\n try:\n signalsOfParts = self._signalsOfParts[part.tmpl]\n except KeyError:\n signalsOfParts = []\n self._signalsOfParts[part.tmpl] = signalsOfParts\n\n if part.isLastPart():\n # connect all parts in this group to output stream\n signalsOfParts.append(fPartSig)\n intf = self.dataOut._fieldsToInterfaces[fieldInfo]\n intf.data(self.byteOrderCare(\n Concat(\n *reversed(signalsOfParts)\n ))\n )\n on = OutNodeInfo(self, intf, en, exclusiveEn)\n hsNondes.append(on)\n else:\n dataVld = busVld & en & exclusiveEn\n # part is in some word as last part, we have to store its value to register\n # until the last part arrive\n fPartReg = self._reg(\"%s_part_%d\" % (fieldInfo.name,\n len(signalsOfParts)),\n fPartSig._dtype)\n If(dataVld,\n fPartReg(fPartSig)\n )\n signalsOfParts.append(fPartReg)", "def build_item(\n self,\n part,\n timestamp=1539023700,\n userdata=0,\n position=[0, 0, 0],\n up_vec=[0, 1, 0],\n at_vec=[0, 0, 1],\n skip_power_controls=False):\n # Get the obj path.\n item = self.retrieve_part(part)\n\n # Lock Everything if it's the BASE_FLAG or U_POWERLINE.\n # BASE_FLAG can break things if user moves it around.\n # As it acts as the \"origin\" of the base.\n locked_parts = [\"BASE_FLAG\", \"U_POWERLINE\", \"U_PIPELINE\", \"U_PORTALLINE\"]\n line_parts = [\"U_POWERLINE\", \"U_PIPELINE\", \"U_PORTALLINE\"]\n if part in locked_parts:\n item.lock_location[0] = True\n item.lock_location[1] = True\n item.lock_location[2] = True\n item.lock_rotation[0] = True\n item.lock_rotation[1] = True\n item.lock_rotation[2] = True\n item.lock_scale[0] = True\n item.lock_scale[1] = True\n item.lock_scale[2] = True\n \n # Add custom attributes.\n item[\"ObjectID\"] = part\n item[\"SnapID\"] = part\n item[\"Timestamp\"] = timestamp\n item[\"belongs_to_preset\"] = False\n # Add an order flag to retain order when generating data..\n item[\"Order\"] = self.part_order\n self.part_order += 1\n # Apply Colour\n is_powerline = part in line_parts\n is_pipeline = part in [\"U_PIPELINE\"]\n material.assign_material(\n item,\n userdata,\n powerline=is_powerline,\n pipeline=is_pipeline\n )\n\n # Move\n utils.move_to(item, position=position, up=up_vec, at=at_vec)\n\n # If the object is a powerline, we should create additional controls\n # for it.\n if is_powerline and not skip_power_controls:\n power.create_power_controls(item)\n # Select the new object.\n item.select_set(True)\n return item", "def nid2partid(self, nids, ntype=...):\n ...", "def nid2partid(self, nids, ntype=...):\n ...", "def test_hierarchy_perceived_information_propagation(self):\n from openff.toolkit._tests.create_molecules import (\n dipeptide_hierarchy_added as create_dipeptide,\n )\n\n dipeptide_hierarchy_perceived = create_dipeptide()\n\n for atom in dipeptide_hierarchy_perceived.atoms:\n atom.metadata[\"chain_id\"] = \"A\"\n assert (\"A\", \"1\", \" \", \"ACE\") != dipeptide_hierarchy_perceived.residues[\n 0\n ].identifier\n dipeptide_hierarchy_perceived.update_hierarchy_schemes()\n assert (\"A\", \"1\", \" \", \"ACE\") == dipeptide_hierarchy_perceived.residues[\n 0\n ].identifier", "def createInnerRepresentation(self):\n\n while self._metastring_rest:\n self.addMetastringPointer(self._metastring_rest.pop(0))", "def _serialize(self, state, handle):\n raise NotImplementedError", "def serialize(cleaned_data):\n result = cleaned_data.copy()\n for fieldname in Step1Data.REFERENCE_FIELDS_NAMES:\n if result[fieldname]:\n result[fieldname] = result[fieldname].pk\n result[\"data\"] = result[\"data\"].isoformat()\n # print \"serialize to:\", result\n return result", "def encode(self):\r\n # Create dict from attributes. Maintain added order\r\n #jd = {'txpk': collections.OrderedDict()}\r\n jd = {'txpk':{}}\r\n\r\n for key in self.keys:\r\n val = getattr(self, key)\r\n\r\n if val is not None:\r\n if key == 'data':\r\n jd['txpk'][key] = val.decode('utf-8')\r\n else:\r\n jd['txpk'][key] = val\r\n #print('key',key)\r\n #print('valtype',type(val),val) \r\n #print(jd)\r\n \r\n return dumps(jd, separators=(',', ':'))", "async def b_chain() -> dict:\n authority_chain = await chain.consensus()\n return {\"chain\": authority_chain[\"chain\"]}", "def _create_chained_picking(self, cr, uid, pick_name, picking, purchase_type, move, context=None):\n res = super(stock_move, self)._create_chained_picking(cr, uid, pick_name, picking, purchase_type, move, context=context)\n if picking.purchase_id:\n self.pool.get('stock.picking').write(cr, uid, [res], {'purchase_id': picking.purchase_id.id})\n self.pool.get('stock.picking').write(cr, uid, [res], {'invoice_state': picking.invoice_state})\n return res", "def fetchParts(jdb, melId, instrument, signature):\n chorusDict = collections.OrderedDict()\n bs = jdb.beats(melId)\n formId = ''\n for b in bs:\n beat = Beat(*b[1:])\n if not beat.chorus in chorusDict:\n chorus = Section(u'CHORUS', beat.onset, None, beat.chorus)\n chorusDict[beat.chorus] = chorus\n chorus = chorusDict[beat.chorus]\n if (beat.form != ''):\n formId = beat.form\n if not formId in chorus.children:\n form = Section(u'FORM', beat.onset, None, formId)\n chorus.children[formId] = form\n form = chorus.children[formId]\n form.beats.append(beat)\n #print [c.value for c in chorusDict.values()]\n return makeParts(chorusDict.values(), instrument, jdb.shiftTime(melId), int(signature.split('/')[0]))", "def serialize(self):\n\t\treturn { 'type': self.type, 'parameters' : self.parameters}" ]
[ "0.5191058", "0.516895", "0.5164095", "0.5164095", "0.50821406", "0.5015414", "0.50149983", "0.49796292", "0.49461022", "0.49153453", "0.48709112", "0.48381686", "0.483411", "0.48162767", "0.48084295", "0.4796053", "0.4777216", "0.4726833", "0.4726833", "0.47081837", "0.46883693", "0.46671337", "0.4659657", "0.46214417", "0.4620025", "0.46192482", "0.46013826", "0.4593191", "0.4578713", "0.45755225", "0.45629892", "0.4562384", "0.45505276", "0.45496094", "0.45312953", "0.4530276", "0.44867477", "0.44717628", "0.44700804", "0.44686908", "0.4464296", "0.44605577", "0.445083", "0.44423646", "0.44423646", "0.44404605", "0.44372246", "0.44330332", "0.4432397", "0.44275826", "0.4422517", "0.4420428", "0.44194275", "0.44129044", "0.44119993", "0.44119993", "0.44106048", "0.4409376", "0.44068092", "0.44056058", "0.44040802", "0.44017375", "0.4398603", "0.4398233", "0.43976173", "0.43958825", "0.43893126", "0.43872815", "0.43871835", "0.4382691", "0.4382665", "0.43800005", "0.437984", "0.43758628", "0.4373381", "0.43725127", "0.43637726", "0.43602848", "0.43537498", "0.43526798", "0.43526134", "0.43526134", "0.43515876", "0.43513954", "0.43512222", "0.43434176", "0.4340548", "0.43402886", "0.43324253", "0.43324223", "0.43310797", "0.43310797", "0.43297496", "0.4321092", "0.43161824", "0.43142584", "0.43115595", "0.43098602", "0.43076512", "0.43067062", "0.43060982" ]
0.0
-1
Marshal information deom the selected chainParts to create a 'simple_partition' label.
def _make_simple_partition_label(chain_dict): cps = chain_dict['chainParts'] if not (_select_simple_chainparts(cps)): raise NotImplementedError( 'chain fails substring selection: not "simple": %s' % ( chain_dict['chainName'])) label = 'simplepartition([' for cp in cps: smcstr = str(cp['smc']) if smcstr == 'nosmc': smcstr = '' for i in range(int(cp['multiplicity'])): # condition_str = '(%set,%s,%s)' % (str(cp['threshold']), # str(cp['etaRange']), # smcstr,) condition_str = '(%set,%s' % (str(cp['threshold']), str(cp['etaRange']),) if smcstr: condition_str += ',%s)' else: condition_str += ')' label += condition_str label += '])' return label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def provide_partition_info(self):\n self.partition_info = True", "def __str__(self) -> str:\n return str(self.my_partition)", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def FormatPartition(self, partition):\n\n fstab = self.fstab\n if fstab:\n p = fstab[partition]\n self.script.append('format(\"%s\", \"%s\", %s, \"%s\", \"%s\");' %\n (p.fs_type, common.PARTITION_TYPES[p.fs_type],\n self._GetSlotSuffixDeviceForEntry(p),\n p.length, p.mount_point))", "def __str__(self):\n \n return \"Part ID: %s, %s\" % (self.part_id, self.name)", "def create(self, disk):\n logging.info('Adding type %d partition to disk image: %s' % (self.type, disk.filename))\n run_cmd('parted', '--script', '--', disk.filename, 'mkpart', 'primary', self.parted_fstype(), self.begin, self.end)", "def print_partition(t, par=[]):\n\n if is_leaf(t):\n if label(t) == True:\n print(' + '.join(par))\n else:\n left, right = branches(t)[0], branches(t)[1]\n print_partition(left, [str(label(t))] + par)\n print_partition(right, par)\n #print(\"total partitions: \", str(count_leaves(t)))", "def partid2nids(self, partid, ntype=...):\n ...", "def partid2nids(self, partid, ntype=...):\n ...", "def _setPartedPartition(self, partition):\n log_method_call(self, self.name)\n\n if partition is not None and not isinstance(partition, parted.Partition):\n raise ValueError(\"partition must be None or a parted.Partition instance\")\n\n log.debug(\"device %s new partedPartition %s\", self.name, partition)\n self._partedPartition = partition\n self.updateName()", "def dump_parts(self, io):\n\n # XXX refactor with Tempita\n title = \"Parts created by the docutils writer '%s'\" % self.strategy.name\n io.say(title + os.linesep)\n io.say(len(title) * '-')\n io.say(2 * os.linesep)\n io.say('Part keys: ' + 2 * os.linesep)\n\n parts = self.publish_parts(io)\n io.say(os.linesep.join(sorted(parts.keys())))\n io.say(2 * os.linesep)\n for part in parts:\n io.say(\"Value of part '%s':%s\" % (part, os.linesep))\n io.say(parts[part].encode('utf-8') + os.linesep)\n io.say(80*'-'+os.linesep)\n io.say(os.linesep)", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def partid(self): # -> Unknown:\n ...", "def get_partition():\n if selection is None:\n warning(\"You need to pick something first.\")\n return\n if not selection.obj_type in ['partition']:\n warning(\"You need to partition the selection first.\")\n return\n res = askItems([['property',[1]]],\n caption='Partition property')\n if res:\n prop = res['property']\n getPartition(selection,prop)\n highlightPartitions(selection)", "def _wrap_partitions(self, partitions):\n return [\n self.partition_type(object_id, length, width, ip)\n for (object_id, length, width, ip) in zip(*[iter(partitions)] * 4)\n ]", "def bootpart(disks):\n return path_to_partition(disks, '/boot/foo')", "def choose_partition():\n # Ask the user wether the partitions should be taken from the original partitions, or from the home-made partitions\n file_name = selector([\"The original partition given by the instructor\", \"The homemade partition file\"], [\"ORIGINAL\", \"HOMEMADE\"])\n\n # Open the corresponding file\n if file_name == \"1\" or file_name == \"ORIGINAL\":\n file = open(\"./assets/partitions.txt\", \"r\")\n elif file_name == \"2\" or file_name == \"HOMEMADE\":\n file = open(\"./assets/homemade_partitions.txt\", \"r\")\n\n skip_lines(-1)\n\n # Print all song's names in the partitions\n lines = file.readlines()\n file.close()\n for i in range(0, len(lines), 2):\n print(lines[i][:-1])\n\n # Ask the user to choose for a song\n song_index = choose_number(len(lines) / 2)\n\n # Get the corresponding song's partition and convert notes to Note instances\n partition = lines[song_index * 2 - 1][:-1].replace(' ', '')\n raw_notes = get_notes_from_line(partition)\n parsed_notes = [Note(note) for note in raw_notes]\n return parsed_notes", "def partid2nids(self, partid, ntype): # -> None:\n ...", "def create(data):\n \n return Part(\n part_id = data['part_num'],\n category_id = data['part_cat_id'],\n external_ids = data.get('external_ids', {}),\n name = data['name'],\n year_from = data.get('year_from', None),\n year_to = data.get('year_to', None),\n url = data.get('part_url', None),\n img_url = data.get('part_img_url', None),\n print_of = data.get('print_of', None),\n prints = data.get('prints', []),\n molds = data.get('molds', []),\n alternates = data.get('alternates', []))", "def _wrap_partitions(self, partitions):\n return [\n self.partition_type(future, length, width, ip)\n for (future, length, width, ip) in zip(*[iter(partitions)] * 4)\n ]", "def parse_part(self):\n parts = []\n for part in re.split(r'\\*\\*\\* ([A-Z- ]+) \\*\\*\\*', self.hand_file): # return [ 'part1', 'splitter1', 'part2',..\n parts.append(part)\n\n for i in range(0, len(parts)):\n if i == 0:\n self.part_dict['HEADER'] = parts[i]\n if i % 2 != 0: # number is odd\n self.part_dict[parts[i]] = parts[i + 1]", "def preCommitFixup(self):\n log_method_call(self, self.name)\n if not self.exists or not self.disklabelSupported:\n return\n\n # find the correct partition on the original parted.Disk since the\n # name/number we're now using may no longer match\n _disklabel = self.disk.originalFormat\n\n if self.isExtended:\n # getPartitionBySector doesn't work on extended partitions\n _partition = _disklabel.extendedPartition\n log.debug(\"extended lookup found partition %s\",\n devicePathToName(getattr(_partition, \"path\", None) or \"(none)\"))\n else:\n # lookup the partition by sector to avoid the renumbering\n # nonsense entirely\n _sector = self.partedPartition.geometry.start\n _partition = _disklabel.partedDisk.getPartitionBySector(_sector)\n log.debug(\"sector-based lookup found partition %s\",\n devicePathToName(getattr(_partition, \"path\", None) or \"(none)\"))\n\n self.partedPartition = _partition", "def prep_disk_for_formatting(disk=None):\n disk['Format Warnings'] = '\\n'\n width = len(str(len(disk['Partitions'])))\n\n # Bail early\n if disk is None:\n raise Exception('Disk not provided.')\n\n # Set boot method and partition table type\n disk['Use GPT'] = True\n if (get_boot_mode() == 'UEFI'):\n if (not ask(\"Setup Windows to use UEFI booting?\")):\n disk['Use GPT'] = False\n else:\n if (ask(\"Setup Windows to use BIOS/Legacy booting?\")):\n disk['Use GPT'] = False\n\n # Set Display and Warning Strings\n if len(disk['Partitions']) == 0:\n disk['Format Warnings'] += 'No partitions found\\n'\n for partition in disk['Partitions']:\n display = '{size} {fs}'.format(\n num = partition['Number'],\n width = width,\n size = partition['Size'],\n fs = partition['FileSystem'])\n\n if is_bad_partition(partition):\n # Set display string using partition description & OS type\n display += '\\t\\t{q}{name}{q}\\t{desc} ({os})'.format(\n display = display,\n q = '\"' if partition['Name'] != '' else '',\n name = partition['Name'],\n desc = partition['Description'],\n os = partition['OS'])\n else:\n # List space used instead of partition description & OS type\n display += ' (Used: {used})\\t{q}{name}{q}'.format(\n used = partition['Used Space'],\n q = '\"' if partition['Name'] != '' else '',\n name = partition['Name'])\n # For all partitions\n partition['Display String'] = display", "def parse_partition(partition):\n partition_data = partition.split(\":\")\n if len(partition_data) != 2:\n raise ValueError(\"Partitions line parts format is 'size:mount'\")\n return partition_data", "def nid2partid(self, nids, ntype): # -> None:\n ...", "def nid2partid(self, nids, ntype=...):\n ...", "def nid2partid(self, nids, ntype=...):\n ...", "def usableparts(self):\n # First get the partition type-id for all hard disk partitions\n partid = {}\n for pline in self.fdiskl():\n partid[pline[0]] = pline[4]\n ups = {}\n for s in self.xlist(\"get-blkinfo\")[1]:\n mo = re.match(r'(/dev/[^:]*):(?: LABEL=\"([^\"]*)\")?(?:'\n ' UUID=\"([^\"]*)\")?(?: TYPE=\"([^\"]*)\")?', s)\n if mo:\n dev, label, uuid, fstype = mo.groups()\n if fstype in (None, \"linux_raid_member\", \"LVM2_member\"):\n continue\n if dev.startswith(\"/dev/loop\"):\n continue\n rem = None\n if dev.startswith(\"/dev/sd\"):\n if partid.get(dev) == \"fd\":\n # This test seems to be necessary because blkid\n # sometimes returns an fs-type, rather than\n # linux_raid_member\", for the the first device\n # in a formatted raid array\n continue\n rem = self.xlist(\"removable\", dev)[1][0].strip() == \"1\"\n ups[dev] = (fstype, label, uuid, rem)\n return ups", "def partition_pair_to_spart(part_pair):\n part_star = list(part_pair[0])\n part_circ_star = list(part_pair[1])\n add_zeros = len(part_circ_star) - len(part_star)\n if add_zeros != 0:\n new_star = part_star + [0]\n else:\n new_star = part_star\n diff_list = [a - b for a, b in zip(part_circ_star, new_star)]\n fermionic_parts = []\n bosonic_parts = []\n for k in range(len(diff_list)):\n if diff_list[k] == 0:\n bosonic_parts += [part_circ_star[k]]\n elif diff_list[k] == 1:\n fermionic_parts += [new_star[k]]\n else:\n raise Exception(\"This should not happen.\")\n # sparts = Superpartitions()\n return _Superpartitions([fermionic_parts, bosonic_parts])", "def newpart(self, device, primary, ncyls, swap=False):\n # This is a simple partitioning tool, which only supports\n # adding partitions sequentially, with all primary partitions\n # being before the extended partition, so once a logical\n # partition has been added, it is not possible to add further\n # primary ones.\n di = DiskInfo(device)\n pmax = 0 # Record highest partition number\n lim = -1 # Used for seeking last used cylinder\n exp = 0 # Number of extended partition\n ex0, ex1 = 0, -1 # Extended partition start and end\n log0, log1 = 0, -1 # Start and end of area used by logical partitions\n for p in di.parts:\n pn = int(p[0][len(device):])\n scyl, ecyl = p[1:3]\n if pn <= 4:\n if exp:\n run_error(_(\"Not supported: primary partition (%s%d)\\n\"\n \"has higher partition number than extended \"\n \"partition\") % (device, pn))\n return \"\"\n if scyl <= lim:\n run_error(_(\"Partitions must be ordered on the device.\\n\"\n \"%s%d is out of order.\") % (device, pn))\n return \"\"\n if p[3] in (\"5\", \"f\"):\n # extended\n exp = pn\n ex0, ex1 = scyl, ecyl\n continue\n pmax = pn\n lim = ecyl\n\n startcyl = lim + 1\n endcyl = lim + ncyls\n if endcyl >= di.drvcyls:\n run_error(_(\"Too little space at end of drive for new partition\"))\n return \"\"\n if exp and (pmax <= 4):\n # Remove the extended partition, which is empty anyway\n if not self.rmpart(device, exp):\n return \"\"\n pmax = exp - 1\n if primary:\n if pmax >= 4:\n run_error(_(\"Cannot add primary partition to %s\") % device)\n return \"\"\n t = \"primary\"\n else:\n t = \"logical\"\n if pmax > 4:\n # resize extended partition\n if not self.xcheck(\"resizepart\", device, str(exp),\n str(ex0), str(endcyl),\n onfail=_(\"Couldn't resize extended partition %s%d\")\n % (device, exp)):\n return False\n else:\n # create extended partition\n if not self.xcheck(\"newpart\", device,\n str(startcyl), str(endcyl), \"extended\",\n onfail=_(\"Couldn't create extended partition on %s\")\n % device):\n return False\n if pmax < 4:\n pmax = 4\n\n if self.xcheck(\"newpart\", device, str(startcyl), str(endcyl),\n t, \"linux-swap\" if swap else \"ext2\"):\n return \"%s%d\" % (device, pmax + 1)\n else:\n run_error(_(\"Couldn't add new partition to %s\") % device)\n return \"\"", "def get_partitioning(disk):\n\n #TODO\n return \"Unknown\"", "def partition(self):\n return self.tag(\"partition\")", "def _encode_structure(self):\n pass", "def _encode_structure(self):\n pass", "def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _map_part_plnt(entry):\n dmt_entry = {}\n part_type = config.part_type_mapping.get(entry[config.classkey], 'P')\n\n dmt_entry['Plant'] = config.plant\n dmt_entry['PrimWhse'] = config.prim_whse\n dmt_entry['SourceType'] = part_type\n\n return dmt_entry", "def _make_combinationsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'combinationsTest'\n\n \n\n return \"\"\"\n combgen(\n [(2)(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def get_partition_cfg(partition_type: str) -> Dict:\n raise NotImplementedError('Not supported yet.')", "def create(data):\n \n return Partlist(\n list_id = data['id'],\n name = data['name'],\n pieces = data['num_parts'])", "def partition_selection():\n if selection is None:\n warning(\"You need to pick something first.\")\n return\n if not selection.obj_type in ['actor','element']:\n warning(\"You need to pick actors or elements.\")\n return\n for A in GD.canvas.actors:\n if not A.atype() == 'TriSurface':\n warning(\"Currently I can only partition TriSurfaces.\" )\n return\n partitionCollection(selection)\n highlightPartitions(selection)", "def get_partition_cfg(partition_type: str, **kwargs) -> Dict:\n raise NotImplementedError", "def createPartition(self, mp, mtype, fs, size, vg, nr):\n startSector = 0\n endSector = 0\n\n # primary partition: calculate the space according instructions below\n if mtype == 'Pri':\n\n # calculate the start sector\n startSector = self.__primaryStartPoint\n\n # calculate the end sector\n sectorLen = startSector + int(size * MEGABYTE / float(self.__sectorSize))\n endSector = sectorLen - 1\n self.__primaryStartPoint = sectorLen\n\n # decrease disk size\n self.__diskSize -= size\n\n # extended partition: update primary and logical pointers\n # when a extended partition is given, its size is not taken into account\n elif mtype == 'Ext':\n\n # calculate the start sector\n startSector = self.__primaryStartPoint\n\n # calculate end sector pointer\n endSector = int(self.__diskSize * MEGABYTE / float(self.__sectorSize)) + startSector - 1\n if endSector > MAX_SECTOR_POSSIBLE:\n endSector = MAX_SECTOR_POSSIBLE\n\n self.__extEndSector = endSector\n\n # decrease disk size\n self.__diskSize -= EXTENT_SIZE - 1\n\n # logical partition: calculate the space according instructions below\n elif mtype == 'Log':\n\n # FIXME, need to improve\n # just for zkvm without extended partition\n self.__extEndSector = endSector\n # refresh start sector pointer\n startSector = self.__primaryStartPoint + self.__sectorOffset\n\n if size == ALL_AVAILABLE:\n endSector = self.__extEndSector\n size = self.__diskSize - 1\n self.__diskSize = 0\n\n else: \n # calculate end sector pointer\n sectorLen = startSector + int(size * MEGABYTE / float(self.__sectorSize))\n endSector = sectorLen - 1\n self.__primaryStartPoint = sectorLen\n\n # decrease disk size\n self.__diskSize -= size\n\n\n part = {}\n part['command'] = 'create:partition'\n part['id'] = \"%s-part%s\" % (self.__diskId, str(nr))\n part['name'] = self.__disk + str(nr)\n part['mount_point'] = mp\n part['type'] = mtype\n part['fs'] = fs\n part['multipath'] = self.__hasMultipath\n part['raid_name'] = None\n part['disk_name'] = '/dev/%s' % self.__disk\n part['size'] = size\n part['vg'] = vg\n part['nr'] = nr\n part['format'] = True\n part['start'] = startSector\n part['end'] = endSector\n\n if self.__hasMultipath:\n part['disk_name'] = '/dev/mapper/%s' % self.__disk\n\n # extended partition: do not format\n if mtype == 'Ext':\n part['format'] = False\n\n return part", "def partitionData(data, labels, partition):\n\treturn [s[partition] for s in data], labels[partition]", "def part_id(self):\n ...", "def split(self):\n out = []\n if self.section != \"\":\n out.append(self.section)\n out.append(self.topic)\n if self.sub_topic != \"\":\n out.append(self.sub_topic)\n if self.cutter != \"\":\n out.append(self.cutter)\n if self.version != 0:\n out.append(\"v.\" + str(self.version))\n if self.year != 0:\n out.append(str(self.year) + self.work_letter)\n if self.other != \"\":\n out.append(self.other)\n if self.copy != 0:\n out.append(\"c.\" + str(self.copy))\n return out", "def partid(self): # -> None:\n ...", "def partition_network(self, *args):\n Blockade.blockade_create_partition(*args)", "def assemble_parts(self):\n self.parts['whole'] = self.output\n self.parts['encoding'] = self.document.settings.output_encoding\n self.parts['version'] = docutils.__version__", "def create_from_segments(self, segment, origin=0):\r\n n = origin\r\n if segment[origin]['T'] != 'soma': # if it's a soma, only one compartment\r\n while (len(segment[n]['children']) == 1) and (segment[n]['T'] != 'soma'): # Go to the end of the branch\r\n n += 1\r\n # End of branch\r\n branch = segment[origin:n + 1]\r\n # Set attributes\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = \\\r\n zip(*[(seg['diameter'], seg['length'], seg['area'], seg['x'], seg['y'], seg['z']) for seg in branch])\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = array(self.diameter), array(self.length), \\\r\n array(self.area), array(self.x), array(self.y), array(self.z)\r\n self.type = segment[n]['T'] # normally same type for all compartments in the branch\r\n # Create children (list)\r\n self.children = [Morphology().create_from_segments(segment, origin=c) for c in segment[n]['children']]\r\n # Create dictionary of names (enumerates children from number 1)\r\n for i, child in enumerate(self.children):\r\n self._namedkid[str(i + 1)] = child\r\n # Name the child if possible\r\n if child.type in ['soma', 'axon', 'dendrite']:\r\n if child.type in self._namedkid:\r\n self._namedkid[child.type] = None # two children with the same name: erase (see next block)\r\n else:\r\n self._namedkid[child.type] = child\r\n # Erase useless names\r\n for k in self._namedkid.keys():\r\n if self._namedkid[k] is None:\r\n del self._namedkid[k]\r\n # If two kids, name them L (left) and R (right)\r\n if len(self.children) == 2:\r\n self._namedkid['L'] = self._namedkid['1']\r\n self._namedkid['R'] = self._namedkid['2']\r\n return self", "def create_partition(mesh,polygons,enforce_exact=False):", "def get_partname(self):\n return '{0:03.0f}{1}'.format(self.lon1, self.part)", "def build_output_partitions(cls, name='inputTablePartitions', output_name='output'):\n obj = cls(name)\n obj.exporter = 'get_output_table_partition'\n obj.output_name = output_name\n return obj", "def build_parts(self, simple_sentence, kana_sentence):\r\n parts = parse_simple_sentence_into_parts(simple_sentence)\r\n \r\n remaining_sentence = kana_sentence\r\n \r\n for i, part in enumerate(parts):\r\n if part.kanji:\r\n if i+1 == len(parts):\r\n part.kana = remaining_sentence\r\n else:\r\n start_of_next_part = remaining_sentence.find(parts[i+1].kana)\r\n part.kana = remaining_sentence[:start_of_next_part]\r\n remaining_sentence = remaining_sentence[start_of_next_part:]\r\n else:\r\n remaining_sentence = remaining_sentence.replace(part.kana, '', 1)\r\n \r\n return parts", "def test_create_part(self):\n pass", "def make_mixture_info(parts, operation='+'):\n # type: (List[ModelInfo], str) -> ModelInfo\n # Build new parameter list\n combined_pars = []\n\n # When creating a mixture model that is a sum of product models (ie (1*2)+(3*4))\n # the parameters for models 1 & 2 will be prefixed with A & B respectively,\n # but so will the parameters for models 3 & 4. We need to rename models 3 & 4\n # so that they are prefixed with C & D to avoid overlap of parameter names.\n used_prefixes = []\n for part in parts:\n if part.composition and part.composition[0] == 'mixture':\n i = 0\n for submodel in part.composition[1]:\n npars = len(submodel.parameters.kernel_parameters)\n # List of params of one of the constituent models of part\n submodel_pars = part.parameters.kernel_parameters[i:i+npars]\n # Prefix of the constituent model\n prefix = submodel_pars[0].name[0]\n if prefix not in used_prefixes: # Haven't seen this prefix so far\n used_prefixes.append(prefix)\n i += npars\n continue\n # TODO: don't modify submodel --- it may be used elsewhere\n # Existing code probably doesn't keep a handle on the model\n # parts so its probably okay, but it's possible that a mix\n # on user defined mixture models models will change the\n # parameters used for the parts in the GUI. Even worse if the\n # same plugin is used twice. For example, twosphere.py\n # contains sphere+sphere and you create twosphere+twosphere.\n while prefix in used_prefixes:\n # This prefix has been already used, so change it to the\n # next letter that hasn't been used\n prefix = chr(ord(prefix) + 1)\n used_prefixes.append(prefix)\n prefix += \"_\"\n # Update the parameters of this constituent model to use the\n # new prefix\n for par in submodel_pars:\n # Strip {prefix}_ using par.name[2:], etc.\n # TODO: fails for AB_scale\n par.id = prefix + par.id[2:]\n par.name = prefix + par.name[2:]\n if par.length_control is not None:\n par.length_control = prefix + par.length_control[2:]\n i += npars\n\n for part in parts:\n # Parameter prefix per model, A_, B_, ...\n # Note that prefix must also be applied to id and length_control\n # to support vector parameters\n prefix = ''\n if not part.composition or part.composition[0] == 'product':\n # Model isn't a composition model, so its parameters don't have a\n # a prefix. Add the next available prefix\n prefix = chr(ord('A')+len(used_prefixes))\n used_prefixes.append(prefix)\n prefix += '_'\n\n if operation == '+':\n # If model is a sum model, each constituent model gets its own scale parameter\n scale_prefix = prefix\n if prefix == '' and getattr(part, \"operation\", '') == '*':\n # `part` is a composition product model. Find the prefixes of\n # its parameters to form a new prefix for the scale.\n # For example, a model with A*B*C will have ABC_scale.\n sub_prefixes = []\n for param in part.parameters.kernel_parameters:\n # Prefix of constituent model\n sub_prefix = param.id.split('_')[0]\n if sub_prefix not in sub_prefixes:\n sub_prefixes.append(sub_prefix)\n # Concatenate sub_prefixes to form prefix for the scale\n scale_prefix = ''.join(sub_prefixes) + '_'\n scale = Parameter(scale_prefix + 'scale', default=1.0,\n description=\"model intensity for \" + part.name)\n combined_pars.append(scale)\n for p in part.parameters.kernel_parameters:\n p = copy(p)\n p.name = prefix + p.name\n p.id = prefix + p.id\n if p.length_control is not None:\n p.length_control = prefix + p.length_control\n combined_pars.append(p)\n parameters = ParameterTable(combined_pars)\n # Allow for the scenario in which each component has all its PD parameters\n # active simultaneously. details.make_details() will throw an error if\n # too many are used from any one component.\n parameters.max_pd = sum(part.parameters.max_pd for part in parts)\n\n def random():\n \"\"\"Random set of model parameters for mixture model\"\"\"\n combined_pars = {}\n for k, part in enumerate(parts):\n prefix = chr(ord('A')+k) + '_'\n pars = part.random()\n combined_pars.update((prefix+k, v) for k, v in pars.items())\n return combined_pars\n\n model_info = ModelInfo()\n model_info.id = operation.join(part.id for part in parts)\n model_info.operation = operation\n model_info.name = '(' + operation.join(part.name for part in parts) + ')'\n model_info.filename = None\n model_info.title = 'Mixture model with ' + model_info.name\n model_info.description = model_info.title\n model_info.docs = model_info.title\n model_info.category = \"custom\"\n model_info.parameters = parameters\n model_info.random = random\n #model_info.single = any(part['single'] for part in parts)\n model_info.structure_factor = False\n #model_info.tests = []\n #model_info.source = []\n # Remember the component info blocks so we can build the model\n model_info.composition = ('mixture', parts)\n return model_info", "def create_net_partition(self, body=None):\r\n return self.post(self.net_partitions_path, body=body)", "def list_partitions(self, partitioning):\n return []", "def gen_partition_statement(partition_tuples, target_root, run_id=None):\n if run_id is not None:\n partition_tuples = [('run_id', run_id)] + partition_tuples\n # todo: part_a1, part_a2, part_b, part_c, part_what? you lost me.\n part_a1 = \", \".join(\n [\"{label}='{value}'\".format(label=i[0], value=i[1]) for i in partition_tuples]\n )\n part_a2 = \"/\".join(\n [\"{label}={value}\".format(label=i[0], value=i[1]) for i in partition_tuples]\n )\n part_b = \"partition ({partitions_str})\".format(partitions_str=part_a1)\n part_c = \"location '{location}'\".format(location=os.path.join(target_root, part_a2))\n return part_b + ' ' + part_c", "def get_partition(self, partid):\n #TODO(zhengda) add implementation later.", "def get_partition(self, partid):\n #TODO(zhengda) add implementation later.", "def set_partition(self, partition=0):\n if not isinstance(partition, int):\n raise TypeError('partition must be an integer')\n if partition <= 0:\n raise ValueError('partition must be positive')\n if self.connected:\n self.producer.send(\"PART:\"+str(partition))", "def do_prepare_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir, rootfs_dir,\n native_sysroot):\n logger.debug(\"SourcePlugin: do_prepare_partition: part: %s\", part)", "def partition_description(self) -> pulumi.Output[Optional[Any]]:\n return pulumi.get(self, \"partition_description\")", "def names_to_parts(self, pnames, must_exist=True):\n meta = self.mesh.meta\n for pname in pnames:\n part = meta.get_part(pname, must_exist)\n yield part", "def do_stage_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n native_sysroot):\n logger.debug(\"SourcePlugin: do_stage_partition: part: %s\", part)", "def defineTasks(self,partition):\n recv_slots = partition.recvSlices()\n strm_slots = partition.streamSlices()\n recvNodes = partition.recvNodesFromSlots()\n strmNodes = partition.streamNodesFromSlots()\n opt = '/'+self.manager.hostName()+'/'+partition.manager.name()+'/'+partition.name+'/'\n cl0 = '/Class0'+opt\n cl1 = '/Class1'+opt\n\n partition.setDataSources([])\n tasks = []\n pn = self.partitionName()\n print '---------------------- Partition name is:',pn\n for i in xrange(len(recv_slots)):\n slot = recv_slots[i]\n node = slot[:slot.find(':')]\n sub_farm = 'SF%02d'%(i,)\n short_name = sub_farm+'_SND' # Keep this name to ensure storageMon is working!\n task = pn+'_'+node+'_'+short_name\n tasks.append(node+'/'+task+'/'+short_name+'/RecStorageSend'+cl1+'(\"'+sub_farm+'\",'+str(i)+',)')\n partition.setRecvSenders(tasks)\n tasks = []\n for i in xrange(len(strm_slots)):\n slot = strm_slots[i]\n node = slot[:slot.find(':')]\n sub_farm = 'SF%02d'%(i,)\n short_name = sub_farm+'_HLT' # Keep this name to ensure storageMon is working!\n task = pn+'_'+node+'_'+short_name\n tasks.append(node+'/'+task+'/'+short_name+'/RecStorageRecv'+cl1+'(\"'+sub_farm+'\",'+str(i)+',)')\n partition.setStreamReceivers(tasks)\n cnt = 0\n tasks = []\n infra = []\n for j in recvNodes:\n for itm in self.rcvInfra.data:\n i,cl=itm.split('/')\n infra.append(j+'/'+pn+'_'+j+'_'+i+'/'+i+'/'+i+'/'+cl+opt+'(\"'+str(cnt)+'\",)')\n cnt = cnt + 1\n partition.setRecvInfrastructure(infra)\n partition.setRecvReceivers(tasks)\n cnt = 0\n tasks = []\n infra = []\n for j in strmNodes:\n for itm in self.strInfra.data:\n i,cl=itm.split('/')\n infra.append(j+'/'+pn+'_'+j+'_'+i+'/'+i+'/'+i+'/'+cl+opt+'(\"'+str(cnt)+'\",)')\n cnt = cnt + 1\n partition.setStreamInfrastructure(infra)\n partition.setStreamSenders(tasks)\n if partition.saveTasks():\n tasks = partition.collectTasks(tasks={},with_data_sources=0)\n return tasks\n return None", "def to_partid(self, id_tensor):\n ...", "def test_splitPartiesOnlyOneParty(self):\n parties_str = \"Applicant Tenant(s): Tabata A, Caio N\"\n expected = {\n \"applicant\": [{\"role\": \"tenant\", \"name\": \"tabata a\"},\n {\"role\": \"tenant\", \"name\": \"caio n\"}],\n \"respondent\": []\n }\n result = split_parties.splitCaseParties('adjudication', parties_str)\n self.assertEqual(expected, result)", "def export_part_list(self, filetype='xlsx'):\n if filetype == 'csv':\n enum = 48649\n else:\n enum = 48642\n path = self.export_dir.joinpath(self.partcode).joinpath('part_list.xlsx')\n self.doc.Sheets(1).PartsLists(1).Export(str(path), enum)", "def do_configure_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n native_sysroot):\n logger.debug(\"SourcePlugin: do_configure_partition: part: %s\", part)", "def get_partition_info(disk, host_disk):\n\n volume = \"/dev/\"+disk\n DISKINFO[volume] = {}\n DISKINFO[volume][\"Name\"] = volume\n DISKINFO[volume][\"Type\"] = \"Partition\"\n DISKINFO[volume][\"HostDevice\"] = host_disk\n DISKINFO[volume][\"Partitions\"] = []\n DISKINFO[host_disk][\"Partitions\"].append(volume)\n DISKINFO[volume][\"Vendor\"] = get_vendor(disk)\n DISKINFO[volume][\"Product\"] = \"Host Device: \"+DISKINFO[host_disk][\"Product\"]\n DISKINFO[volume][\"RawCapacity\"], DISKINFO[volume][\"Capacity\"] = get_capacity()\n DISKINFO[volume][\"Description\"] = get_description(disk)\n DISKINFO[volume][\"Flags\"] = get_capabilities(disk)\n DISKINFO[volume][\"FileSystem\"] = get_file_system(disk)\n DISKINFO[volume][\"Partitioning\"] = \"N/A\"\n DISKINFO[volume][\"UUID\"] = get_uuid(disk)\n DISKINFO[volume][\"ID\"] = get_id(disk)\n DISKINFO[volume][\"BootRecord\"], DISKINFO[volume][\"BootRecordStrings\"] = get_boot_record(disk)\n\n return volume", "def fillQuickList():\n global quickList\n cmd = \"/sbin/blkid\"\n proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)\n for line in proc.stdout:\n line = line.replace(':', '').strip()\n propList = line.split()\n devName = label = uuid = fsType = ''\n devName = propList[0]\n for property in propList:\n if property.startswith('UUID'):\n uuid = property.replace('UUID=', '').replace('\"', '')\n quickList[devName] = uuid", "def getPartitionNameByUUID(part):\n global quickList\n part = part.replace('UUID=', '')\n if len(quickList) == 0:\n fillQuickList()\n for devName, uuid in quickList.items():\n if uuid == part:\n return devName\n return ''", "def test_get_parts(self):\n pass", "def genereate_echo_picklist(self):\n sample_names = []\n sample_wells = []\n indices = {'i5 name': {}, 'i5 plate': {}, 'i5 sequence': {},\n 'i5 well': {}, 'i7 name': {}, 'i7 plate': {},\n 'i7 sequence': {}, 'i7 well': {}, 'index combo': {},\n 'index combo seq': {}}\n\n for idx, well in enumerate(chain.from_iterable(self.plates[0].layout)):\n # Add the sample well\n sample_wells.append(well.well_id)\n # Get the sample name - we need to go back to the SampleComposition\n lib_comp = well.composition\n sample_comp = lib_comp.normalized_gdna_composition\\\n .gdna_composition.sample_composition\n sample_names.append(sample_comp.content)\n # Retrieve all the information about the indices\n i5_comp = lib_comp.i5_composition.primer_set_composition\n i5_well = i5_comp.container\n indices['i5 name'][idx] = i5_comp.external_id\n indices['i5 plate'][idx] = i5_well.plate.external_id\n indices['i5 sequence'][idx] = i5_comp.barcode\n indices['i5 well'][idx] = i5_well.well_id\n\n i7_comp = lib_comp.i7_composition.primer_set_composition\n i7_well = i7_comp.container\n indices['i7 name'][idx] = i7_comp.external_id\n indices['i7 plate'][idx] = i7_well.plate.external_id\n indices['i7 sequence'][idx] = i7_comp.barcode\n indices['i7 well'][idx] = i7_well.well_id\n\n indices['index combo seq'][idx] = '%s%s' % (\n indices['i5 sequence'][idx], indices['i7 sequence'][idx])\n\n sample_names = np.asarray(sample_names)\n sample_wells = np.asarray(sample_wells)\n indices = pd.DataFrame(indices)\n\n return LibraryPrepShotgunProcess._format_picklist(\n sample_names, sample_wells, indices)", "def process_new_subpart(notice, amd_label, par):\n subpart_changes = {}\n subpart_xml = find_subpart(par)\n subpart = reg_text.build_subpart(amd_label.label[0], subpart_xml)\n\n for change in changes.create_subpart_amendment(subpart):\n subpart_changes.update(change)\n return subpart_changes", "def partition(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"partition\")", "def __get_partition__(self, account, container, obj, part_shift):\n \n key = hash_path(account, container, obj, raw_digest=True)\n part = unpack_from('>I', key)[0] >> part_shift\n return part", "def get_partfstype(self, part):\n t = self.xlist(\"get-blkinfo\", part, \"TYPE\")\n return t[1][0] if t[0] and (len(t[1]) != 0) else \"\"", "def insert_parts(self, parts):\r\n self.board.insert_parts(parts)\r\n self.set_changed(parts)", "def __init__(self, section, config):\n self.disk = section.getSectionName()\n\n # Grab our partition map\n # If it doesn't exist, complain loudly\n self.diskLabelConfig = None\n for map in config.Partitions.PartitionMap:\n if (section.partitionmap.lower() == map.getSectionName()):\n # Set up the disk labels. Always s1!\n self.diskLabelConfig = DiskLabelConfig(map, self.disk + 's1')\n break", "def chainDict2jetLabel(chain_dict):\n\n # suported scenarios \n router = {\n 'simple': _make_simple_label,\n 'HT': _make_ht_label,\n 'vbenf': _make_vbenf_label,\n 'dijet': _make_dijet_label,\n 'combinationsTest': _make_combinationsTest_label,\n 'partitionsTest': _make_partitionsTest_label,\n }\n\n # chain_part - scenario association\n cp_sorter = {}\n for k in router: cp_sorter[k] = []\n\n for cp in chain_dict['chainParts']:\n if cp['signature'] != 'Jet' and cp['signature'] != 'Bjet': \n continue\n for k in cp_sorter:\n if cp['hypoScenario'].startswith(k):\n cp_sorter[k].append(cp)\n break\n\n # obtain labels by scenario.\n labels = []\n for k, chain_parts in cp_sorter.items():\n if chain_parts: labels.append(router[k](chain_parts))\n\n assert labels\n nlabels = len(labels)\n if nlabels == 1: return labels[0]\n if nlabels == 2:\n alabel = \"\"\"\\\nand([]\n %s\n %s)\"\"\" % (tuple(labels))\n return alabel\n\n # more than 2 labels is not expected\n assert False", "def partid(self): # -> int:\n ...", "def convert_partitions_to_list(partition):\n parts = list()\n for part in partition:\n parts.append(part)\n return parts", "def nodeSeparate(self,compInfo, ifSub, subname, subcktName,numNodesSub):\n node = []\n nodeTemp = []\n nodeDic = {}\n pinInit = 'Modelica.Electrical.Analog.Interfaces.Pin '\n pinProtectedInit = 'Modelica.Electrical.Analog.Interfaces.Pin '\n protectedNode = []\n print \"CompInfo coming to nodeSeparate function: compInfo\",compInfo\n \n #Removing '[' and ']' from compInfo for Digital node\n for i in range(0,len(compInfo),1):\n compInfo[i] = compInfo[i].replace(\"[\",\"\").replace(\"]\",\"\")\n \n \n for eachline in compInfo:\n words = eachline.split()\n if eachline[0] in ['m', 'e', 'g', 't','M','E','G','T']:\n nodeTemp.append(words[1])\n nodeTemp.append(words[2])\n nodeTemp.append(words[3])\n nodeTemp.append(words[4])\n elif eachline[0] in ['q', 'j','J','Q']:\n nodeTemp.append(words[1])\n nodeTemp.append(words[2])\n nodeTemp.append(words[3])\n elif eachline[0]=='x' or eachline[0]=='X':\n templine = eachline.split()\n for i in range(0,len(templine),1):\n if templine[i] in subcktName:\n point = i \n nodeTemp.extend(words[1:point])\n else:\n nodeTemp.append(words[1])\n nodeTemp.append(words[2])\n for i in nodeTemp:\n if i not in node:\n node.append(i)\n \n for i in range(0, len(node),1):\n nodeDic[node[i]] = 'n' + node[i]\n if ifSub == '0':\n if i != len(node)-1:\n pinInit = pinInit + nodeDic[node[i]] + ', '\n else:\n pinInit = pinInit + nodeDic[node[i]]\n else:\n nonprotectedNode = self.getSubInterface(subname, numNodesSub) \n if node[i] in nonprotectedNode:\n continue\n else:\n protectedNode.append(node[i])\n if ifSub == '1':\n if len(nonprotectedNode) > 0:\n for i in range(0, len(nonprotectedNode),1):\n if i != len(nonprotectedNode)-1:\n pinProtectedInit = pinProtectedInit + nodeDic[nonprotectedNode[i]] + ','\n else:\n pinProtectedInit = pinProtectedInit + nodeDic[nonprotectedNode[i]]\n if len(protectedNode) > 0:\n for i in range(0, len(protectedNode),1):\n if i != len(protectedNode)-1: \n pinInit = pinInit + nodeDic[protectedNode[i]] + ','\n else:\n pinInit = pinInit + nodeDic[protectedNode[i]]\n pinInit = pinInit + ';'\n pinProtectedInit = pinProtectedInit + ';'\n print \"Node---->\",node\n print \"nodeDic----->\",nodeDic\n print \"PinInit----->\",pinInit\n print \"pinProtectedinit--->\",pinProtectedInit\n return node, nodeDic, pinInit, pinProtectedInit", "def get_partition_details(disk, partition):\n details = {}\n script = [\n 'select disk {}'.format(disk['Number']),\n 'select partition {}'.format(partition['Number']),\n 'detail partition']\n\n # Diskpart details\n try:\n # Run script\n result = run_diskpart(script)\n except subprocess.CalledProcessError:\n pass\n else:\n # Get volume letter or RAW status\n output = result.stdout.decode().strip()\n tmp = re.search(r'Volume\\s+\\d+\\s+(\\w|RAW)\\s+', output)\n if tmp:\n if tmp.group(1).upper() == 'RAW':\n details['FileSystem'] = RAW\n else:\n details['Letter'] = tmp.group(1)\n # Remove empty lines from output\n tmp = [s.strip() for s in output.splitlines() if s.strip() != '']\n # Split each line on ':' skipping those without ':'\n tmp = [s.split(':') for s in tmp if ':' in s]\n # Add key/value pairs to the details variable and return dict\n details.update({key.strip(): value.strip() for (key, value) in tmp})\n\n # Get MBR type / GPT GUID for extra details on \"Unknown\" partitions\n guid = PARTITION_UIDS.get(details.get('Type').upper(), {})\n if guid:\n details.update({\n 'Description': guid.get('Description', '')[:29],\n 'OS': guid.get('OS', 'Unknown')[:27]})\n\n if 'Letter' in details:\n # Disk usage\n try:\n tmp = psutil.disk_usage('{}:\\\\'.format(details['Letter']))\n except OSError as err:\n details['FileSystem'] = 'Unknown'\n details['Error'] = err.strerror\n else:\n details['Used Space'] = human_readable_size(tmp.used)\n\n # fsutil details\n cmd = [\n 'fsutil',\n 'fsinfo',\n 'volumeinfo',\n '{}:'.format(details['Letter'])\n ]\n try:\n result = run_program(cmd)\n except subprocess.CalledProcessError:\n pass\n else:\n output = result.stdout.decode().strip()\n # Remove empty lines from output\n tmp = [s.strip() for s in output.splitlines() if s.strip() != '']\n # Add \"Feature\" lines\n details['File System Features'] = [s.strip() for s in tmp\n if ':' not in s]\n # Split each line on ':' skipping those without ':'\n tmp = [s.split(':') for s in tmp if ':' in s]\n # Add key/value pairs to the details variable and return dict\n details.update({key.strip(): value.strip() for (key, value) in tmp})\n\n # Set Volume Name\n details['Name'] = details.get('Volume Name', '')\n\n # Set FileSystem Type\n if details.get('FileSystem', '') not in ['RAW', 'Unknown']:\n details['FileSystem'] = details.get('File System Name', 'Unknown')\n\n return details", "def test_splitPartiesStringWithoutDivider(self):\n parties_str = \"Appellant: Denise O'B Respondent: Eugene D, Mary C\"\n expected = {\n \"applicant\": [{\"name\": \"denise o'b\"}],\n \"respondent\": [{\"name\": \"eugene d\"},\n {\"name\": \"mary c\"}]\n }\n result = split_parties.splitCaseParties(\"tribunal\", parties_str)\n self.assertEqual(expected, result)", "def voices_in_part (part):\n part.interpret ()\n part.extract_voices ()\n voices = part.get_voices ()\n part_info = part.get_staff_attributes ()\n\n return (voices, part_info)", "def __init__(self, *elems, **kwargs) -> None:\r\n self.sequence = tuple(kwargs['partition']) if kwargs.get('partition') else elems", "def partition_book(self):\n ...", "def _record_specific_partition(r_d, numnodes, cur):\n # No partitioning has been specified. Create the appropriate entries.\n if r_d['partmtd'] == 0:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partmtd = 0 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler, (i, r_d['tname']))\n\n # Range partitioning has been specified. Create the appropriate entries.\n elif r_d['partmtd'] == 1:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partcol = ?, partparam1 = ?, '\n 'partparam2 = ?, partmtd = 1 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler,\n (r_d['partcol'], r_d['param1'][i - 1], r_d['param2'][i - 1], i,\n r_d['tname']))\n\n # Hash partitioning has been specified. Create the appropriate entries.\n elif r_d['partmtd'] == 2:\n for i in range(1, numnodes + 1):\n Database.execute(cur, 'UPDATE dtables '\n 'SET partcol = ?, partparam1 = ?, partmtd = 2 '\n 'WHERE nodeid = ? AND tname = ?',\n ErrorHandle.raise_handler,\n (r_d['partcol'], r_d['param1'], i, r_d['tname']))", "def get_body_part_names(self):\n self.x_cols, self.y_cols, self.p_cols = [], [], []\n for bp in self.body_parts_lst:\n self.x_cols.append(f\"{bp}_x\")\n self.y_cols.append(f\"{bp}_y\")\n self.p_cols.append(f\"{bp}_p\")", "def on_add_clicked(self,button):\n\t\tself.list_partitions.add_partition()", "def true_partition(self):\n if 'NA' in self.mothers or 'NA' in self.fathers:\n warn('Warning: one or more individuals has at least one parent of unkown identity.')\n warn('All such individuals will be assigned to the same sibship group.')\n\n # concatenate mother and father names to create a vector of parent pairs.\n #parentage = np.array([str(self.mothers[o]) + '/' + str(self.fathers[o]) for o in range(noffs)])\n possible_families = np.unique(self.parents) # get a list of all unique parent pairs\n\n partitions = np.zeros(self.size).astype('int') # empty vector of zeros.\n for o in range(self.nfamilies):\n # for every possible family label individuals with an identical integer.\n partitions[self.parents == possible_families[o]] += o\n\n return partitions", "def mkswap(self, partition, check):\n args = [partition]\n if check:\n args.insert(0, \"-c\")\n return self.xlist(\"swap-format\", *args)[0]", "def partid2nids(self, partid):\n return self._partid2nids[partid]", "def label_block_get_serializer(get_info):\n serializer = sl_mpls_pb2.SLMplsLabelBlockGetMsg()\n if \"start_label\" in get_info:\n serializer.Key.StartLabel = get_info[\"start_label\"]\n if \"block_size\" in get_info:\n serializer.Key.LabelBlockSize = get_info[\"block_size\"]\n if \"count\" in get_info:\n serializer.EntriesCount = get_info[\"count\"]\n if \"get_next\" in get_info:\n serializer.GetNext = (get_info[\"get_next\"] != 0)\n return serializer", "def probe(self):\n log_method_call(self, self.name, exists=self.exists)\n if not self.exists or not self.disklabelSupported:\n return\n\n self._size = Size(self.partedPartition.getLength(unit=\"B\"))\n self.targetSize = self._size\n\n self._partType = self.partedPartition.type\n\n self._bootable = self.getFlag(parted.PARTITION_BOOT)", "def draw_partition(self, box, context, bounding_box):\n assert self.canvas\n\n cr = context.cairo\n cr.set_line_width(box.style(\"line-width\"))\n\n if self.subject and not self.subject.isDimension and self._toplevel:\n cr.move_to(0, 0)\n cr.line_to(bounding_box.width, 0)\n\n h = self._header_size[1]\n\n # draw outside lines if this item is toplevel partition\n if self._toplevel:\n cr.move_to(0, bounding_box.height)\n cr.line_to(0, h)\n cr.line_to(bounding_box.width, h)\n cr.line_to(bounding_box.width, bounding_box.height)\n\n if self._subpart:\n # header line for all subparitions\n hd = h + self._hdmax\n cr.move_to(0, hd)\n cr.line_to(bounding_box.width, hd)\n\n if self._subpart:\n # draw inside lines for all children but last one\n dp = 0\n for sl in self.canvas.get_children(self)[:-1]:\n dp += sl.width\n cr.move_to(dp, h)\n cr.line_to(dp, bounding_box.height)\n\n cr.stroke()\n\n if context.hovered or context.dropzone:\n cr.save()\n cr.set_dash((1.0, 5.0), 0)\n cr.set_line_width(1.0)\n cr.rectangle(0, 0, bounding_box.width, bounding_box.height)\n draw_highlight(context)\n cr.stroke()\n cr.restore()" ]
[ "0.65343463", "0.53522164", "0.52719545", "0.52471644", "0.51894504", "0.5177348", "0.5096431", "0.5087608", "0.4973218", "0.4973218", "0.4970022", "0.4951215", "0.49493623", "0.49468526", "0.49310818", "0.492718", "0.49157664", "0.49112102", "0.49103266", "0.48851392", "0.4877146", "0.48584715", "0.48566926", "0.48450437", "0.48346877", "0.482512", "0.4820094", "0.4820094", "0.47875524", "0.47756955", "0.47598547", "0.47330543", "0.47265384", "0.4719906", "0.4719906", "0.46988344", "0.46983254", "0.46896246", "0.46693867", "0.46632653", "0.46558505", "0.46128348", "0.46121785", "0.46105745", "0.46092212", "0.46080282", "0.46048173", "0.4574543", "0.45707077", "0.45687282", "0.45561484", "0.45477712", "0.45349917", "0.4528485", "0.4522739", "0.45122588", "0.44895494", "0.44875002", "0.44862923", "0.44753525", "0.44753525", "0.4475197", "0.446557", "0.44547406", "0.4451641", "0.44513708", "0.44402844", "0.4428629", "0.44193205", "0.44138128", "0.44039822", "0.43905866", "0.43837956", "0.43809974", "0.4378019", "0.43777174", "0.43745434", "0.43745103", "0.4366638", "0.4362995", "0.43581665", "0.4347518", "0.43441078", "0.4343912", "0.43435016", "0.43427646", "0.43418676", "0.43268716", "0.43181443", "0.4316197", "0.43119234", "0.43058017", "0.43017617", "0.42950252", "0.42902675", "0.42888504", "0.4285727", "0.4285506", "0.42787486", "0.42769438" ]
0.6581646
0
Marshal information deom the selected chainParts to create a
def _make_simple_comb_label(chain_dict): cps = chain_dict['chainParts'] if not (_select_simple_chainparts(cps)): raise NotImplementedError( 'chain fails substring selection: not "simple": %s' % ( chain_dict['chainName'])) simple_strs = [] for cp in cps: print(cp) simple_strs.append(_make_simple_label([cp])) label = 'combgen([(%d)]' % len(cps) for s in simple_strs: label += ' %s ' % s label += ')' return label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeBinaryChains():\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\n\t# Do some basic argument checking for this model\n\tif (len(types) < 2):\n\t\tprint \"Number of defined types must equal two for binary chain calculations.\"\n\t\treturn\n\tif (maxsize == 0):\n\t\tprint \"Must specify a valid maximum number for one or more components.\"\n\t\treturn\n\n\tallChains = []\n\tnewChainsA = [[]]\n\tnewChainsB = []\n\t\n\ttypeA = types[0]\n\ttypeB = types[1]\n\t\n\t# start the chain with a single type A component\n\taddComponent(newChainsA[0],typeA,0,0)\n\n\tdepth = 0\n\tfor n in range(maxsize):\n\t\tdepth+=1\n\t\t\n\t\t# go through all the chains created last iteration and append B components\n\t\tnewChainsB = []\n\t\tfor thisChain in newChainsA:\n\n\t\t\t# get a list of new available sites in the provided chain\n\t\t\t# by setting depth -1, we will only add to components added last round\n\t\t\topenSites = makeSiteList(thisChain,typeB,depth-1)\n\t\t\t\n\t\t\t# make all the descendants from the current chain and append them to the pool\n\t\t\tif (n == 0) and (typeA['sym']): #if the starting binder is symmetric, no need to start chains at all its sites\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,-1)\n\t\t\telse:\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,depth)\n\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsB))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsB\n\t\t\n\t\tdepth+=1\n\t\t\n\t\t# add an additional component to all the previously modified chains\n\t\tnewChainsA = []\n\t\tfor thisChain in newChainsB:\n\n\t\t\topenSites = makeSiteList(thisChain,typeA,depth-1)\n\t\t\tnewChainsA = newChainsA + fillSites(openSites,thisChain,typeA,depth)\n\t\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsA))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsA\n\n\treturn allChains", "def serialize(self):\n return pickle.dumps([block.serialize() for block in self.chain])", "def _encode_structure(self):\n pass", "def _encode_structure(self):\n pass", "def marshal(self):\n ...", "def add_chain_to_model(chain, model, atoms):\n\n if chain[\"type\"] == \"polymer\" or chain[\"type\"] == \"branched\":\n polymer = {\n \"internal_id\": chain[\"internal_id\"], \"sequence\": chain[\"sequence\"],\n \"helices\": [], \"strands\": [], \"residues\": {}\n }\n for i, group in enumerate(chain[\"groups\"], start=1):\n add_het_to_dict(group, chain, atoms, polymer[\"residues\"], number=i)\n add_ss_to_chain(polymer)\n model[\"polymer\"][chain[\"id\"]] = polymer\n else:\n for group in chain[\"groups\"]:\n add_het_to_dict(group, chain, atoms, model[chain[\"type\"]])", "def __rechaindict__(c):\n from TriggerMenu.menu.DictFromChainName import DictFromChainName\n dfcn = DictFromChainName()\n\n pl1 = []\n for pch in c['chainParts']:\n pl1.append(pch['L1item'])\n\n newname = c['chainName'].replace('dv_','').replace('TestChain','j')\n nchlist = [ newname ,c['chainCounter'],c['L1item'],pl1,c['stream'],\n c['groups'],c['EBstep'] ]\n \n return dfcn.getChainDict(nchlist)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def pack(self):\n data = {\n 'name': self._name,\n 'piece': self._piece,\n 'pos': self._pos,\n 'cash': self._cash,\n 'properties': []\n }\n\n for i in self._properties:\n data['properties'].append({'name': i.name, 'value': i.value})\n\n return data", "def marshal(self):\n raise NotImplementedError", "def adapt_chain(chain):\n type_chain = check_type(chain)\n name = chain.id\n if type_chain == \"nucleic_acid\":\n new_chain = Bio.PDB.Chain.Chain(name)\n chain = copy.copy(chain)\n for residue in chain:\n new_chain.add(residue.copy())\n\n for residue in new_chain:\n for atom in residue:\n if atom.id == \"C1'\":\n atom.id = \"CA\"\n residue.add(atom.copy())\n return new_chain\n else:\n return chain", "def create_from_segments(self, segment, origin=0):\r\n n = origin\r\n if segment[origin]['T'] != 'soma': # if it's a soma, only one compartment\r\n while (len(segment[n]['children']) == 1) and (segment[n]['T'] != 'soma'): # Go to the end of the branch\r\n n += 1\r\n # End of branch\r\n branch = segment[origin:n + 1]\r\n # Set attributes\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = \\\r\n zip(*[(seg['diameter'], seg['length'], seg['area'], seg['x'], seg['y'], seg['z']) for seg in branch])\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = array(self.diameter), array(self.length), \\\r\n array(self.area), array(self.x), array(self.y), array(self.z)\r\n self.type = segment[n]['T'] # normally same type for all compartments in the branch\r\n # Create children (list)\r\n self.children = [Morphology().create_from_segments(segment, origin=c) for c in segment[n]['children']]\r\n # Create dictionary of names (enumerates children from number 1)\r\n for i, child in enumerate(self.children):\r\n self._namedkid[str(i + 1)] = child\r\n # Name the child if possible\r\n if child.type in ['soma', 'axon', 'dendrite']:\r\n if child.type in self._namedkid:\r\n self._namedkid[child.type] = None # two children with the same name: erase (see next block)\r\n else:\r\n self._namedkid[child.type] = child\r\n # Erase useless names\r\n for k in self._namedkid.keys():\r\n if self._namedkid[k] is None:\r\n del self._namedkid[k]\r\n # If two kids, name them L (left) and R (right)\r\n if len(self.children) == 2:\r\n self._namedkid['L'] = self._namedkid['1']\r\n self._namedkid['R'] = self._namedkid['2']\r\n return self", "def save_chain(self):\n pprint('saving to file named bc_file.txt')\n with open('ddos_bc_file.txt', 'w') as output:\n output.write(serializer.serialize(self.chain))", "def make_mixture_info(parts, operation='+'):\n # type: (List[ModelInfo], str) -> ModelInfo\n # Build new parameter list\n combined_pars = []\n\n # When creating a mixture model that is a sum of product models (ie (1*2)+(3*4))\n # the parameters for models 1 & 2 will be prefixed with A & B respectively,\n # but so will the parameters for models 3 & 4. We need to rename models 3 & 4\n # so that they are prefixed with C & D to avoid overlap of parameter names.\n used_prefixes = []\n for part in parts:\n if part.composition and part.composition[0] == 'mixture':\n i = 0\n for submodel in part.composition[1]:\n npars = len(submodel.parameters.kernel_parameters)\n # List of params of one of the constituent models of part\n submodel_pars = part.parameters.kernel_parameters[i:i+npars]\n # Prefix of the constituent model\n prefix = submodel_pars[0].name[0]\n if prefix not in used_prefixes: # Haven't seen this prefix so far\n used_prefixes.append(prefix)\n i += npars\n continue\n # TODO: don't modify submodel --- it may be used elsewhere\n # Existing code probably doesn't keep a handle on the model\n # parts so its probably okay, but it's possible that a mix\n # on user defined mixture models models will change the\n # parameters used for the parts in the GUI. Even worse if the\n # same plugin is used twice. For example, twosphere.py\n # contains sphere+sphere and you create twosphere+twosphere.\n while prefix in used_prefixes:\n # This prefix has been already used, so change it to the\n # next letter that hasn't been used\n prefix = chr(ord(prefix) + 1)\n used_prefixes.append(prefix)\n prefix += \"_\"\n # Update the parameters of this constituent model to use the\n # new prefix\n for par in submodel_pars:\n # Strip {prefix}_ using par.name[2:], etc.\n # TODO: fails for AB_scale\n par.id = prefix + par.id[2:]\n par.name = prefix + par.name[2:]\n if par.length_control is not None:\n par.length_control = prefix + par.length_control[2:]\n i += npars\n\n for part in parts:\n # Parameter prefix per model, A_, B_, ...\n # Note that prefix must also be applied to id and length_control\n # to support vector parameters\n prefix = ''\n if not part.composition or part.composition[0] == 'product':\n # Model isn't a composition model, so its parameters don't have a\n # a prefix. Add the next available prefix\n prefix = chr(ord('A')+len(used_prefixes))\n used_prefixes.append(prefix)\n prefix += '_'\n\n if operation == '+':\n # If model is a sum model, each constituent model gets its own scale parameter\n scale_prefix = prefix\n if prefix == '' and getattr(part, \"operation\", '') == '*':\n # `part` is a composition product model. Find the prefixes of\n # its parameters to form a new prefix for the scale.\n # For example, a model with A*B*C will have ABC_scale.\n sub_prefixes = []\n for param in part.parameters.kernel_parameters:\n # Prefix of constituent model\n sub_prefix = param.id.split('_')[0]\n if sub_prefix not in sub_prefixes:\n sub_prefixes.append(sub_prefix)\n # Concatenate sub_prefixes to form prefix for the scale\n scale_prefix = ''.join(sub_prefixes) + '_'\n scale = Parameter(scale_prefix + 'scale', default=1.0,\n description=\"model intensity for \" + part.name)\n combined_pars.append(scale)\n for p in part.parameters.kernel_parameters:\n p = copy(p)\n p.name = prefix + p.name\n p.id = prefix + p.id\n if p.length_control is not None:\n p.length_control = prefix + p.length_control\n combined_pars.append(p)\n parameters = ParameterTable(combined_pars)\n # Allow for the scenario in which each component has all its PD parameters\n # active simultaneously. details.make_details() will throw an error if\n # too many are used from any one component.\n parameters.max_pd = sum(part.parameters.max_pd for part in parts)\n\n def random():\n \"\"\"Random set of model parameters for mixture model\"\"\"\n combined_pars = {}\n for k, part in enumerate(parts):\n prefix = chr(ord('A')+k) + '_'\n pars = part.random()\n combined_pars.update((prefix+k, v) for k, v in pars.items())\n return combined_pars\n\n model_info = ModelInfo()\n model_info.id = operation.join(part.id for part in parts)\n model_info.operation = operation\n model_info.name = '(' + operation.join(part.name for part in parts) + ')'\n model_info.filename = None\n model_info.title = 'Mixture model with ' + model_info.name\n model_info.description = model_info.title\n model_info.docs = model_info.title\n model_info.category = \"custom\"\n model_info.parameters = parameters\n model_info.random = random\n #model_info.single = any(part['single'] for part in parts)\n model_info.structure_factor = False\n #model_info.tests = []\n #model_info.source = []\n # Remember the component info blocks so we can build the model\n model_info.composition = ('mixture', parts)\n return model_info", "def writeBlocks(self):\n dataFile = open(\"chain.txt\", \"w\")\n chainData = []\n for eachBlock in self.chain:\n chainData.append(eachBlock.__dict__)\n dataFile.write(json.dumps(chainData, indent=4))\n dataFile.close()", "def construct_fragments(self):\n for frag_dict in self.fragment_dict_list:\n try:\n chain = self.model[frag_dict[\"chain_id\"]]\n frag = chain[frag_dict[\"frag_id\"]]\n except KeyError:\n self.add_fragment(frag_dict, None)\n continue\n\n self.add_fragment(frag_dict, frag)", "def _pack(self):\n pass", "def pack_goods(self, by=None):", "def pack_goods(self, by=None):", "def createInnerRepresentation(self):\n\n for idx, single_block in enumerate(self._block_list):\n del self._to_be_processed[:]\n del self._metastring_rest[:]\n self._metastring_rest.append(self._metastring[idx])\n self.addMetastringPointer(single_block)", "def create(data):\n \n return Part(\n part_id = data['part_num'],\n category_id = data['part_cat_id'],\n external_ids = data.get('external_ids', {}),\n name = data['name'],\n year_from = data.get('year_from', None),\n year_to = data.get('year_to', None),\n url = data.get('part_url', None),\n img_url = data.get('part_img_url', None),\n print_of = data.get('print_of', None),\n prints = data.get('prints', []),\n molds = data.get('molds', []),\n alternates = data.get('alternates', []))", "def assemble_parts(self):\n self.parts['whole'] = self.output\n self.parts['encoding'] = self.document.settings.output_encoding\n self.parts['version'] = docutils.__version__", "def serialize(self):", "def _marshal(self, pieces):\n payload = b''.join(pieces)\n return struct.pack('>BHI', self.frame_type, self.channel_number,\n len(payload)) + payload + bytes((spec.FRAME_END,))", "def _blob(self):\n self.__rewrite_sldIdLst()\n # # at least the following needs to be added before using\n # # _reltype_ordering again for Presentation\n # self.__rewrite_notesMasterIdLst()\n # self.__rewrite_handoutMasterIdLst()\n # self.__rewrite_sldMasterIdLst()\n return super(Presentation, self)._blob", "def get_structure(self):\n return self.chain.model.structure", "def marshal(self):\n return self._marshal(list())", "def marshal(self):\n return self._marshal([self.fragment])", "def build_serializer(self):\n self._add_child_elements_recursive(self.get_root_element())", "def chainsetup(filename, cation, facets, operation, end_radii, nradii,\n adensity):\n\n # Load the Cage from the file\n try:\n # If that fails, try other file formats supported by pymatgen\n anion = Cage.from_file(filename)\n except ValueError:\n # If that fails, try the VASP POSCAR format\n anion = Cage.from_poscar(filename)\n\n # Center the anion around the origin\n anion.center()\n\n # Find the chain edges, i.e. the paths between the edge sharing facets of\n # the chain of non-equivalent facets.\n anion.find_surface_facets(ignore=IGNORE)\n\n if not facets == tuple:\n chosen_facets = [anion.facets[index] for index in facets]\n edges = anion.find_noneq_chain_links(chosen_facets)\n else:\n edges = anion.find_noneq_chain_links()\n\n total_mol = anion.copy()\n\n chain_dir = 'chain_' + operation\n try:\n os.mkdir(chain_dir)\n except FileExistsError:\n pass\n\n # For each edge, set up the calculation input files\n edge_number = 1\n\n for edge in edges:\n\n # Set up the edge directory\n edge_dir = os.path.join(chain_dir, \"edge\" + str(edge_number))\n\n while os.path.exists(edge_dir):\n edge_number += 1\n edge_dir = os.path.join(chain_dir, \"edge\" + str(edge_number))\n\n os.mkdir(edge_dir)\n\n # Write out the molecule and path facets to the edge directory\n anion.to(fmt=\"json\", filename=os.path.join(edge_dir, \"molecule.json\"))\n edge[0].to(fmt=\"json\", filename=os.path.join(edge_dir,\n \"init_facet.json\"))\n edge[1].to(fmt=\"json\", filename=os.path.join(edge_dir,\n \"final_facet.json\"))\n\n # Get copies so the originals aren't mutated\n edge_mol = anion.copy()\n facet1 = edge[0].copy()\n facet2 = edge[1].copy()\n\n if edge == edges[-1]:\n remove_endline = False\n else:\n remove_endline = True\n\n # Set up the landscape\n landscape = set_up_edge_landscape(facet1, facet2,\n endpoint_radii=end_radii,\n number_of_radii=nradii,\n angle_density=adensity,\n remove_endline=remove_endline)\n\n # Get the molecule for each landscape point\n molecules = set_up_molecules(edge_mol, landscape, cation)\n\n # Set up an xyz file to visualize the edge and total landscape\n for point in landscape.points:\n try:\n total_mol.append(pmg.Specie(cation, 1), point,\n validate_proximity=False)\n edge_mol.append(pmg.Specie(cation, 1), point,\n validate_proximity=False)\n except ValueError:\n pass\n\n edge_mol.to(fmt=\"xyz\", filename=os.path.join(edge_dir, \"edge.xyz\"))\n\n # In case the molecules must be optimized, add the constraints and\n # optimization setup (DRIVER)\n if operation == \"optimize\":\n far_facet = anion.find_farthest_facet(landscape.center)\n constraints = find_constraints(anion, far_facet.sites)\n constraints['fix atom'] += ' ' + str(len(anion.sites) + 1)\n ALT_SETUP['constraints'] = constraints\n ALT_SETUP[\"driver\"] = DRIVER_SETUP\n\n # Set up the task for the calculations\n tasks = [nwchem.NwTask(molecules[0].charge, None, BASIS,\n theory=\"dft\",\n operation=operation,\n theory_directives=THEORY_SETUP,\n alternate_directives=ALT_SETUP)]\n\n # Set up the input files\n study = Study(molecules, tasks)\n study.set_up_input(edge_dir, sort_comp=False,\n geometry_options=GEO_SETUP)\n\n edge_number += 1\n\n # Set up an xyz file with all the paths\n total_mol.to(fmt=\"xyz\", filename=os.path.join(chain_dir, \"total_mol.xyz\"))", "def add_chain_signature(\n self, prop: str, key: JWK, alg: Optional[AlgorithmName] = None,\n header: Optional[JsonObject] = None) -> None:\n top_level_signature = self._payload.get(prop)\n for k in top_level_signature.keys():\n if k != _CHAIN:\n del top_level_signature[k]\n chain = top_level_signature.get(_CHAIN, [])\n self._add_signature(prop, key, alg, header,\n lambda h: {_CHAIN: chain + [h]},\n lambda h: (self._payload\n .setdefault(prop, {})\n .setdefault(_CHAIN, [])\n .append(h)))", "def trip_chain(self):\n pass", "def __init__(self, objects=()):\n\n vtk.vtkPropAssembly.__init__(self)\n\n self.name = \"\"\n self.created = \"\"\n self.trail = None\n self.trail_points = []\n self.trail_segment_size = 0\n self.trail_offset = None\n self.shadows = []\n self.info = {}\n self.rendered_at = set()\n self.transform = None\n self.scalarbar = None\n\n for a in vedo.utils.flatten(objects):\n if a:\n self.AddPart(a)\n\n self.PickableOff()", "def parse_part(self):\n parts = []\n for part in re.split(r'\\*\\*\\* ([A-Z- ]+) \\*\\*\\*', self.hand_file): # return [ 'part1', 'splitter1', 'part2',..\n parts.append(part)\n\n for i in range(0, len(parts)):\n if i == 0:\n self.part_dict['HEADER'] = parts[i]\n if i % 2 != 0: # number is odd\n self.part_dict[parts[i]] = parts[i + 1]", "def marshal_departments(result):\n return result", "def read_chain(self, _id):\n chain_def = {'class' : 'chain', 'type' : ''}\n for i in self.config_reader.options(_id):\n chain_def[i] = self.config_reader.get(_id, i)\n logging.debug(\"Registering chain %s\", _id)\n self.instances.register(_id,chain_def,self.create_chain)", "def dump_parts(self, io):\n\n # XXX refactor with Tempita\n title = \"Parts created by the docutils writer '%s'\" % self.strategy.name\n io.say(title + os.linesep)\n io.say(len(title) * '-')\n io.say(2 * os.linesep)\n io.say('Part keys: ' + 2 * os.linesep)\n\n parts = self.publish_parts(io)\n io.say(os.linesep.join(sorted(parts.keys())))\n io.say(2 * os.linesep)\n for part in parts:\n io.say(\"Value of part '%s':%s\" % (part, os.linesep))\n io.say(parts[part].encode('utf-8') + os.linesep)\n io.say(80*'-'+os.linesep)\n io.say(os.linesep)", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'ingredients': self.ingredients,\n 'directions': self.directions,\n 'type': self.type,\n }", "def addChain(self, chain):\n\n\t\tself.chain.append(chain)\n\t\tchain.parentMolecule = self", "def normalize_chain(cls, chain):\n\n if isinstance(chain, cls):\n chain = (chain,)\n return tuple(x for x in chain if x is not None)", "def toPartners(self):\n num_bases = len(self) #number of bases\n result = [None] * len(self) #array of None, one for each base\n stack = []\n start = self.StartSymbols\n end = self.EndSymbols\n for i, symbol in enumerate(self):\n if symbol in start: #open a pair\n stack.append(i)\n elif symbol in end: #close a pair\n curr = stack.pop() #return and delete last element\n result[i] = curr #make i pair with the last element...\n result[curr] = i #...and the last element pair with i\n \n #test whether there are any open pairs left unaccounted for \n if stack:\n raise IndexError, \\\n \"Too many open pairs in structure:\\n%s\" % self\n return Partners(result)", "def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None):\n # check if it's a Chain object\n if heavy_chains is None and light_chains is None and fab is None:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n # check if fab object is a list and if all object are abpytools.Fab objects\n if isinstance(fab, list) and all(isinstance(fab_i, Fab) for fab_i in fab):\n self._fab = fab\n self._light_chains = ChainCollection([x[0] for x in self._fab])\n self._heavy_chains = ChainCollection([x[1] for x in self._fab])\n\n if fab is None and (heavy_chains is not None and light_chains is not None):\n\n if isinstance(heavy_chains, list):\n self._heavy_chains = ChainCollection(antibody_objects=heavy_chains)\n\n elif isinstance(heavy_chains, ChainCollection):\n self._heavy_chains = heavy_chains\n\n else:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n if isinstance(light_chains, list):\n self._light_chains = ChainCollection(antibody_objects=light_chains)\n\n elif isinstance(light_chains, ChainCollection):\n self._light_chains = light_chains\n\n else:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n if len(self._light_chains.loading_status()) == 0:\n self._light_chains.load()\n\n if len(self._heavy_chains.loading_status()) == 0:\n self._heavy_chains.load()\n\n if self._light_chains.n_ab != self._heavy_chains.n_ab:\n raise ValueError('Number of heavy chains must be the same of light chains')\n\n if isinstance(names, list) and all(isinstance(name, str) for name in names):\n if len(names) == self._heavy_chains.n_ab:\n self._names = names\n else:\n raise ValueError(\n 'Length of name list must be the same as length of heavy_chains/light chains lists')\n\n elif names is None:\n self._names = ['{} - {}'.format(heavy, light) for heavy, light in zip(self._heavy_chains.names,\n self._light_chains.names)]\n\n else:\n raise ValueError(\"Names expected a list of strings, instead got {}\".format(type(names)))\n\n self._n_ab = self._light_chains.n_ab\n self._pair_sequences = [heavy + light for light, heavy in zip(self._heavy_chains.sequences,\n self._light_chains.sequences)]\n\n # keep the name of the heavy and light chains internally to keep everything in the right order\n self._internal_heavy_name = self._heavy_chains.names\n self._internal_light_name = self._light_chains.names", "def get_serializer(self, *args, **kwargs):\n kwargs['part_detail'] = True\n kwargs['location_detail'] = True\n kwargs['supplier_part_detail'] = True\n kwargs['context'] = self.get_serializer_context()\n\n return self.serializer_class(*args, **kwargs)", "def _encode_supplement(self):", "def _encode_supplement(self):", "def serialize(self, data):", "def partid2nids(self, partid, ntype): # -> None:\n ...", "def from_binary(self, d):\n p = MsgCertificateChainDep._parser.parse(d)\n for n in self.__class__.__slots__:\n setattr(self, n, getattr(p, n))", "def __init__(self, chain_instance, *args, **kwargs):\n protocol_logger('Intializing protocol processor')\n self.chain_instance = chain_instance", "def build_parts_from_dict(self, data, skip_power_controls=False):\n \n # Validate Objects information.\n if \"Objects\" not in data:\n return\n\n # Start creating parts.\n parts = []\n for part_data in data[\"Objects\"]:\n part = part_data[\"ObjectID\"].replace(\"^\", \"\")\n timestamp = part_data[\"Timestamp\"]\n user_data = part_data[\"UserData\"]\n part_position = part_data[\"Position\"]\n up_vec = part_data[\"Up\"]\n at_vec = part_data[\"At\"]\n # Build the item.\n item = self.build_item(\n part,\n timestamp,\n user_data,\n part_position,\n up_vec,\n at_vec,\n skip_power_controls\n )\n parts.append(item)\n\n return parts", "def preparing(fasta_list, pdb_dict):\n for item1 in fasta_list:\n matchObj = re.search( '^(.*)_([a-zA-Z0-9])$', item1[0])\n fasta1= item1[1]\n if matchObj:\n original_name1= matchObj.group(1)\n original_structure1=pdb_dict[original_name1]\n chain_1= matchObj.group(2) \n yield fasta1, [original_structure1, chain_1]", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def _get_chain_repr(self, chain):\n chain_repr = []\n for module in chain:\n if isinstance(module, collections.Iterable): # module is a chain\n chain_repr.append(self._get_chain_repr(module))\n elif hasattr(module, 'process'): # module is an object\n chain_repr.extend(\n (str(module.__class__), repr(vars(module))))\n else: # module is a function\n if isinstance(module, partial): # partial function\n chain_repr.extend((str(module.__class__), repr(module.func),\n repr(module.keywords)))\n else:\n chain_repr.append(repr(module))\n return ' '.join(chain_repr)", "def serialize(self, root):", "def partid2nids(self, partid, ntype=...):\n ...", "def partid2nids(self, partid, ntype=...):\n ...", "def _decode(self, parts: typing.List[int]) -> typing.Dict:\n info = {field.name: field.decode(parts[i]) for i, field in enumerate(self.fields)}\n return info", "def __init__(self):\n self.chain = [Block.genesis()]", "def compose(self):\r\n return_lib = self.other\r\n return_lib.update({\r\n 'ID' : self.id,\r\n 'Name' : self.name,\r\n 'Alias' : self.alias,\r\n 'Parent' : self.parent,\r\n 'Target' : self.target,\r\n 'Gap' : self.gap,\r\n 'Derives_from' : self.derives_from,\r\n 'Note' : self.note,\r\n 'Dbxref' : self.dbxref,\r\n 'Ontology_term' : self.ontology_term,\r\n 'Is_circular' : self.is_circular,\r\n 'allele' : ','.join([a.strip() for a in self.allele])\r\n })\r\n if self.discovered != None:\r\n if self.discovered:\r\n return_lib[self.discovered_key] = self.enabled[0]\r\n else:\r\n return_lib[self.discovered_key] = self.disabled[0]\r\n if self.validated != None:\r\n if self.validated:\r\n return_lib[self.validated_key] = self.enabled[0]\r\n else:\r\n return_lib[self.validated_key] = self.disabled[0]\r\n if self.active != None:\r\n if self.active:\r\n return_lib[self.active_key] = self.disabled[0]\r\n else:\r\n return_lib[self.active_key] = self.enabled[0]\r\n for i,item in enumerate(self.history):\r\n return_lib[\"{}_{}\".format(self.history_key,i)] = ','.join(item)\r\n return_str = ';'.join([key + '=' + value for key, value in return_lib.items() if value])\r\n return return_str", "def _encode_parts(self, messages, encode_empty=False):\n if messages or encode_empty:\n return self.signer.sign_object(\n messages, serializer=MessagePartGatherSerializer, compress=True\n )", "def serialize(self):\n pass", "def _create_chain(class_type_list, kwargs_list):\n chain = None # module with preprocessing chain\n modules = [] # list of modules (not connected via preprocessing)\n for i, pre_id in enumerate(class_type_list):\n chain = CModule.create(\n pre_id, preprocess=chain, **kwargs_list[i])\n modules.append(CModule.create(pre_id, **kwargs_list[i]))\n return chain, modules", "def __store_part(self, definition, pnum, multisubtype):\n pnum = \"1\" if pnum is None else pnum\n params = {\n \"pnum\": pnum,\n \"params\": definition[2],\n \"cid\": definition[3],\n \"description\": definition[4],\n \"encoding\": definition[5],\n \"size\": definition[6]\n }\n mtype = definition[0].lower()\n subtype = definition[1].lower()\n ftype = \"%s/%s\" % (definition[0].lower(), subtype)\n if ftype in (\"text/plain\", \"text/html\"):\n if subtype not in self.contents:\n self.contents[subtype] = [params]\n else:\n self.contents[subtype].append(params)\n return\n elif multisubtype in [\"related\"]:\n self.inlines[params[\"cid\"].strip(\"<>\")] = params\n return\n\n params[\"Content-Type\"] = ftype\n if len(definition) > 7:\n extensions = [\"md5\", \"disposition\", \"language\", \"location\"]\n if mtype == \"text\":\n extensions = [\"textlines\"] + extensions\n elif ftype == \"message/rfc822\":\n extensions = [\n \"envelopestruct\",\n \"bodystruct\",\n \"textlines\"] + extensions\n for idx, value in enumerate(definition[7:]):\n params[extensions[idx]] = value\n\n self.attachments += [params]", "def serialize(self):\n return {\n 'name' : self.name,\n 'ingredients' : self.ingredients,\n 'id' : self.id,\n 'preparation' : self.preparation,\n 'image' : self.image,\n }", "def skeleton_getHandleChain(self, typeModifier = None, jointHelpers = True, mOrientHelper = None):\n _short = self.mNode\n _str_func = 'skeleton_getHandleChain'\n #start = time.clock()\t\n log.debug(cgmGEN.logString_start(_str_func))\n \n mRigNull = self.moduleTarget.rigNull\n ml_fkJoints = mRigNull.msgList_get('fkJoints')\n \n if not ml_fkJoints:\n log.debug(\"|{0}| >> Generating handleJoints\".format(_str_func))\n \n ml_formHandles = self.msgList_get('formHandles',asMeta = True)\n if not ml_formHandles:\n raise ValueError,\"No formHandles connected\" \n \n ml_prerigHandles = self.msgList_get('prerigHandles',asMeta = True)\n if not ml_prerigHandles:\n raise ValueError,\"No prerigHandles connected\"\n \n if mOrientHelper is None:\n mOrientHelper = ml_formHandles[0].orientHelper or ml_prerigHandles[0].orientHelper\n \n #_d = skeleton_getCreateDict(self)\n #pprint.pprint(_d)\n l_pos = []\n for mObj in ml_prerigHandles:\n l_pos.append(mObj.p_position)\n \n ml_fkJoints = COREJOINTS.build_chain(posList = l_pos,\n axisAim='z+',\n axisUp='y+',\n parent=True,\n worldUpAxis= mOrientHelper.getAxisVector('y+'))\n \n for i,mJnt in enumerate(ml_fkJoints):\n mJnt.doCopyNameTagsFromObject(ml_prerigHandles[i].mNode, ignore = ['cgmType'])\n if not typeModifier:\n mJnt.doName()\n \n if typeModifier:\n for mJnt in ml_fkJoints:\n mJnt.addAttr('cgmTypeModifier',typeModifier,attrType='string',lock=True)\n mJnt.addAttr('cgmType','frame',attrType='string',lock=True) \n mJnt.doName()\n \n ml_fkJoints[0].p_parent = False\n else:\n log.debug(\"|{0}| >> Found fkJoints\".format(_str_func))\n \n #log.debug(\"%s >> Time >> = %0.3f seconds \" % (_str_func,(time.clock()-start)) + \"-\"*75)\t\n return ml_fkJoints", "def serialize(self, pipe, pid):\n for trsp in self.transports:\n pipe.send(trsp.__class__)\n trsp.serialize(pipe, pid)", "def _make_information_storable( self, data ):\n\t\tpass", "def save_data(self):\n try:\n with open('blockchain-{}.txt'.format(self.node_id), mode='w') as f:\n saveable_chain = [block.__dict__ for block in [Block(block_el.index, block_el.previous_hash, \n [tx.__dict__ for tx in block_el.transactions], \n [tx.__dict__ for tx in block_el.chipsactions],\n [tx.__dict__ for tx in block_el.messsactions],\n block_el.proof, block_el.timestamp) for block_el in self.__chain]]\n f.write(json.dumps(saveable_chain))\n f.write('\\n')\n saveable_tx = [tx.__dict__ for tx in self.__open_transactions]\n f.write(json.dumps(saveable_tx))\n f.write('\\n')\n saveable_chip = [tx.__dict__ for tx in self.__open_chipsactions]\n f.write(json.dumps(saveable_chip))\n f.write('\\n')\n saveable_chip = [tx.__dict__ for tx in self.__open_messsactions]\n f.write(json.dumps(saveable_chip))\n f.write('\\n')\n f.write(json.dumps(list(self.__peer_nodes)))\n except IOError:\n print('Saving failed!')", "def to_binary(self):\n c = containerize(exclude_fields(self))\n self.payload = MsgCertificateChainDep._parser.build(c)\n return self.pack()", "def create_chain_instances(self):\n for section in self.config_reader.sections():\n self.read_chain(section)", "def make_drs_tree(self):\n pass", "def create(data):\n \n return Partlist(\n list_id = data['id'],\n name = data['name'],\n pieces = data['num_parts'])", "def chain():\n chain_identifier, url = get_vars(request, [\"id\", \"data\"])\n info('chain=%s' % chain_identifier)\n chain = LAPPS_SERVICE_CHAINS.get_chain(chain_identifier)\n info('source-url=%s' % url)\n data = requests.get(url).text\n result = chain.run({\n \"discriminator\": \"http://vocab.lappsgrid.org/ns/media/text\", \n \"payload\": data})\n info(\"discriminator=%s\" % result.get('discriminator'))\n return render_template(\"chain.html\",\n chain=chain,\n fname=url,\n result=result,\n builder=HtmlBuilder())", "def setPartsToRegister(self, parts):\n internals.blpapi_ServiceRegistrationOptions_setPartsToRegister(\n self.__handle, parts)", "def produce_chain_dict (inPath, outPath):\n with open(inPath, 'r') as fin:\n chainIDs = list(fin.read().split())\n chains = {}\n for chainid in chainIDs:\n pdbid = (chainid[ : chainid.find('_') ] if '_' in chainid else chainid)\n if pdbid in chains:\n chains[pdbid].add(chainid)\n else:\n chains[pdbid] = {chainid}\n with open(outPath, 'wb') as fOut:\n pickle.dump(chains, fOut)", "def wire_chains(self):\n allChains = self.instances.getAllChainInstances()\n for chain in allChains:\n logging.debug(\"%s\", chain)\n allChains[chain].setup_event_path()", "def _encode(self, boxes, anchors):\n pass", "def __init__(self):\n self.chain = {}\n self.blocks = {}\n self.blocks_spending_input = {}\n self.blocks_containing_tx = {}\n self.all_transactions = {}", "def prepare_order(acct, order):\n myaddr = (acct.address).lower()\n order[\"makerAddress\"] = myaddr\n order_struct = jsdict_order_to_struct(order) \n sig = _sign_order(acct, order_struct)\n order_struct[\"signature\"] = sig\n js_order = order_to_jsdict(order_struct)\n js_order[\"exchangeAddress\"] = exchangeAddress\n return js_order", "def trans_setup():\n # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)\n # Be Be Be Be Be Be Be lens material\n # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]\n # 1 1 5 8 4 2 1 number of lenses\n lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]\n lens_mat=['Be','Be','Be','Be','Be','Be','Be']\n lens_N=[1,2,4,8,5,1,1]\n trans_pos=[35.2,35.8]\n return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}", "def _finishConstruction(self, obj):\n return obj", "def _finishConstruction(self, obj):\n return obj", "def genereate_echo_picklist(self):\n sample_names = []\n sample_wells = []\n indices = {'i5 name': {}, 'i5 plate': {}, 'i5 sequence': {},\n 'i5 well': {}, 'i7 name': {}, 'i7 plate': {},\n 'i7 sequence': {}, 'i7 well': {}, 'index combo': {},\n 'index combo seq': {}}\n\n for idx, well in enumerate(chain.from_iterable(self.plates[0].layout)):\n # Add the sample well\n sample_wells.append(well.well_id)\n # Get the sample name - we need to go back to the SampleComposition\n lib_comp = well.composition\n sample_comp = lib_comp.normalized_gdna_composition\\\n .gdna_composition.sample_composition\n sample_names.append(sample_comp.content)\n # Retrieve all the information about the indices\n i5_comp = lib_comp.i5_composition.primer_set_composition\n i5_well = i5_comp.container\n indices['i5 name'][idx] = i5_comp.external_id\n indices['i5 plate'][idx] = i5_well.plate.external_id\n indices['i5 sequence'][idx] = i5_comp.barcode\n indices['i5 well'][idx] = i5_well.well_id\n\n i7_comp = lib_comp.i7_composition.primer_set_composition\n i7_well = i7_comp.container\n indices['i7 name'][idx] = i7_comp.external_id\n indices['i7 plate'][idx] = i7_well.plate.external_id\n indices['i7 sequence'][idx] = i7_comp.barcode\n indices['i7 well'][idx] = i7_well.well_id\n\n indices['index combo seq'][idx] = '%s%s' % (\n indices['i5 sequence'][idx], indices['i7 sequence'][idx])\n\n sample_names = np.asarray(sample_names)\n sample_wells = np.asarray(sample_wells)\n indices = pd.DataFrame(indices)\n\n return LibraryPrepShotgunProcess._format_picklist(\n sample_names, sample_wells, indices)", "def json(self):\n blocks = [block.to_json() for block in self.chain[1:]]\n return json.dumps({'blocks': blocks})", "def pdbChain_to_mdtrajChainid_li(chain,seg_to_chain,struc):\n chain = chain.upper()\n chain_segments=[seg for seg,chainval in seg_to_chain.items() if chainval==chain]\n if chain_segments:\n structable, bonds=struc.topology.to_dataframe()\n chainid_li=[]\n for segname in chain_segments:\n seg_chainid_li=structable.loc[structable['segmentID'] == segname].chainID.unique()\n chainid_li+=list(seg_chainid_li)\n chainid_li=list(set(chainid_li))\n return chainid_li\n else:\n return False", "def marshal(self):\n pieces = self.properties.encode()\n pieces.insert(\n 0, struct.pack('>HxxQ', self.properties.INDEX, self.body_size))\n return self._marshal(pieces)", "def init(self):\n\n logger.info(mm_chain.ackn_str)\n self.acknowledgements = mm_chain.ackn_str\n self.references = mm_chain.refs['chain']\n\n return", "def __init__(self):\n self.unconfirmed_transactions = [] \n self.chain = []\n self.create_genesis_block()", "def build_item(\n self,\n part,\n timestamp=1539023700,\n userdata=0,\n position=[0, 0, 0],\n up_vec=[0, 1, 0],\n at_vec=[0, 0, 1],\n skip_power_controls=False):\n # Get the obj path.\n item = self.retrieve_part(part)\n\n # Lock Everything if it's the BASE_FLAG or U_POWERLINE.\n # BASE_FLAG can break things if user moves it around.\n # As it acts as the \"origin\" of the base.\n locked_parts = [\"BASE_FLAG\", \"U_POWERLINE\", \"U_PIPELINE\", \"U_PORTALLINE\"]\n line_parts = [\"U_POWERLINE\", \"U_PIPELINE\", \"U_PORTALLINE\"]\n if part in locked_parts:\n item.lock_location[0] = True\n item.lock_location[1] = True\n item.lock_location[2] = True\n item.lock_rotation[0] = True\n item.lock_rotation[1] = True\n item.lock_rotation[2] = True\n item.lock_scale[0] = True\n item.lock_scale[1] = True\n item.lock_scale[2] = True\n \n # Add custom attributes.\n item[\"ObjectID\"] = part\n item[\"SnapID\"] = part\n item[\"Timestamp\"] = timestamp\n item[\"belongs_to_preset\"] = False\n # Add an order flag to retain order when generating data..\n item[\"Order\"] = self.part_order\n self.part_order += 1\n # Apply Colour\n is_powerline = part in line_parts\n is_pipeline = part in [\"U_PIPELINE\"]\n material.assign_material(\n item,\n userdata,\n powerline=is_powerline,\n pipeline=is_pipeline\n )\n\n # Move\n utils.move_to(item, position=position, up=up_vec, at=at_vec)\n\n # If the object is a powerline, we should create additional controls\n # for it.\n if is_powerline and not skip_power_controls:\n power.create_power_controls(item)\n # Select the new object.\n item.select_set(True)\n return item", "def connectPart(self,\n hsNondes: list,\n part: Union[TransPart, ChoicesOfFrameParts],\n en: Union[RtlSignal, bool],\n exclusiveEn: Optional[RtlSignal]=hBit(1)):\n busVld = self.dataIn.valid\n tToIntf = self.dataOut._fieldsToInterfaces\n\n if isinstance(part, ChoicesOfFrameParts):\n parentIntf = tToIntf[part.origin.parent.origin]\n try:\n sel = self._tmpRegsForSelect[parentIntf]\n except KeyError:\n sel = HsBuilder(self, parentIntf._select).buff().end\n self._tmpRegsForSelect[parentIntf] = sel\n unionGroup = ExclusieveListOfHsNodes(sel)\n\n # for unions\n for choice in part:\n # connect data signals of choices and collect info about\n # streams\n intfOfChoice = tToIntf[choice.tmpl.origin]\n selIndex, isSelected, isSelectValid = self.choiceIsSelected(\n intfOfChoice)\n _exclusiveEn = isSelectValid & isSelected & exclusiveEn\n\n unionMemberPart = ListOfOutNodeInfos()\n for p in choice:\n self.connectPart(unionMemberPart, p, en, _exclusiveEn)\n unionGroup.append(selIndex, unionMemberPart)\n\n hsNondes.append(unionGroup)\n\n if part.isLastPart():\n # synchronization of reading from _select register for unions\n selNode = InNodeInfo(sel, en)\n else:\n selNode = InNodeReadOnlyInfo(sel, en)\n hsNondes.append(selNode)\n return\n\n if part.isPadding:\n return\n\n fPartSig = self.getInDataSignal(part)\n fieldInfo = part.tmpl.origin\n\n try:\n signalsOfParts = self._signalsOfParts[part.tmpl]\n except KeyError:\n signalsOfParts = []\n self._signalsOfParts[part.tmpl] = signalsOfParts\n\n if part.isLastPart():\n # connect all parts in this group to output stream\n signalsOfParts.append(fPartSig)\n intf = self.dataOut._fieldsToInterfaces[fieldInfo]\n intf.data(self.byteOrderCare(\n Concat(\n *reversed(signalsOfParts)\n ))\n )\n on = OutNodeInfo(self, intf, en, exclusiveEn)\n hsNondes.append(on)\n else:\n dataVld = busVld & en & exclusiveEn\n # part is in some word as last part, we have to store its value to register\n # until the last part arrive\n fPartReg = self._reg(\"%s_part_%d\" % (fieldInfo.name,\n len(signalsOfParts)),\n fPartSig._dtype)\n If(dataVld,\n fPartReg(fPartSig)\n )\n signalsOfParts.append(fPartReg)", "def nid2partid(self, nids, ntype=...):\n ...", "def nid2partid(self, nids, ntype=...):\n ...", "def test_hierarchy_perceived_information_propagation(self):\n from openff.toolkit._tests.create_molecules import (\n dipeptide_hierarchy_added as create_dipeptide,\n )\n\n dipeptide_hierarchy_perceived = create_dipeptide()\n\n for atom in dipeptide_hierarchy_perceived.atoms:\n atom.metadata[\"chain_id\"] = \"A\"\n assert (\"A\", \"1\", \" \", \"ACE\") != dipeptide_hierarchy_perceived.residues[\n 0\n ].identifier\n dipeptide_hierarchy_perceived.update_hierarchy_schemes()\n assert (\"A\", \"1\", \" \", \"ACE\") == dipeptide_hierarchy_perceived.residues[\n 0\n ].identifier", "def createInnerRepresentation(self):\n\n while self._metastring_rest:\n self.addMetastringPointer(self._metastring_rest.pop(0))", "def _serialize(self, state, handle):\n raise NotImplementedError", "def serialize(cleaned_data):\n result = cleaned_data.copy()\n for fieldname in Step1Data.REFERENCE_FIELDS_NAMES:\n if result[fieldname]:\n result[fieldname] = result[fieldname].pk\n result[\"data\"] = result[\"data\"].isoformat()\n # print \"serialize to:\", result\n return result", "def encode(self):\r\n # Create dict from attributes. Maintain added order\r\n #jd = {'txpk': collections.OrderedDict()}\r\n jd = {'txpk':{}}\r\n\r\n for key in self.keys:\r\n val = getattr(self, key)\r\n\r\n if val is not None:\r\n if key == 'data':\r\n jd['txpk'][key] = val.decode('utf-8')\r\n else:\r\n jd['txpk'][key] = val\r\n #print('key',key)\r\n #print('valtype',type(val),val) \r\n #print(jd)\r\n \r\n return dumps(jd, separators=(',', ':'))", "def serialize(self):\n\t\treturn { 'type': self.type, 'parameters' : self.parameters}", "async def b_chain() -> dict:\n authority_chain = await chain.consensus()\n return {\"chain\": authority_chain[\"chain\"]}", "def _create_chained_picking(self, cr, uid, pick_name, picking, purchase_type, move, context=None):\n res = super(stock_move, self)._create_chained_picking(cr, uid, pick_name, picking, purchase_type, move, context=context)\n if picking.purchase_id:\n self.pool.get('stock.picking').write(cr, uid, [res], {'purchase_id': picking.purchase_id.id})\n self.pool.get('stock.picking').write(cr, uid, [res], {'invoice_state': picking.invoice_state})\n return res", "def nid2partid(self, nids, ntype): # -> None:\n ..." ]
[ "0.51896733", "0.5168605", "0.51672775", "0.51672775", "0.50873256", "0.5012869", "0.50128603", "0.49828157", "0.49484867", "0.49205998", "0.48682296", "0.48377433", "0.48344752", "0.4816704", "0.48074743", "0.47946405", "0.47819698", "0.47307587", "0.47307587", "0.470948", "0.46880582", "0.46699506", "0.46636954", "0.46246442", "0.4621194", "0.46177652", "0.46041772", "0.45956752", "0.4576775", "0.45758986", "0.45601752", "0.45586032", "0.4549727", "0.45488742", "0.45300308", "0.45288286", "0.4488865", "0.4470426", "0.4468321", "0.4466073", "0.4463078", "0.44562802", "0.44518414", "0.44456387", "0.44456387", "0.4441484", "0.44412827", "0.44327995", "0.44320095", "0.44278732", "0.44207388", "0.4416993", "0.44162983", "0.44130626", "0.4412816", "0.4412816", "0.44112128", "0.44095713", "0.44080964", "0.44070905", "0.44025746", "0.44005856", "0.44004744", "0.43999064", "0.4399524", "0.43966618", "0.43935353", "0.43906373", "0.43881038", "0.43804395", "0.43796667", "0.43787307", "0.43772933", "0.4374465", "0.43730968", "0.43709666", "0.43663764", "0.4359629", "0.43542698", "0.43533173", "0.43529573", "0.43529573", "0.43516603", "0.43495756", "0.43490428", "0.4346473", "0.43393338", "0.43390393", "0.4333873", "0.43316406", "0.43314302", "0.43314302", "0.4328178", "0.4322439", "0.43209988", "0.43155813", "0.43155143", "0.43092346", "0.43080333", "0.43078464", "0.43056336" ]
0.0
-1
Marshal information from the selected chainParts to create a vbenf label. Use a Reducer for elimination of unusable jets
def _make_vbenf_label(chain_parts): # toy label for development: run simple and dijet independently. # simple makes Et cuts on two jets. Independently (sharing possible) # of jets choosean by simple, the dijet # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6 assert len(chain_parts) == 1 scenario = chain_parts[0]['hypoScenario'] assert scenario.startswith('vbenf') args = _args_from_scenario(scenario) if not args: return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' arg_res = [ re.compile(r'(?P<lo>\d*)(?P<key>fbet)(?P<hi>\d*)'), re.compile(r'(?P<lo>\d*)(?P<key>mass)(?P<hi>\d*)'), re.compile(r'(?P<lo>\d*)(?P<key>et)(?P<hi>\d*)'), ] defaults = { 'et': ('101', 'inf'), 'mass': ('800', 'inf'), 'fbet': ('501', 'inf'), } argvals = {} while args: assert len(args) == len(arg_res) arg = args.pop() for r in arg_res: m = r.match(arg) if m is not None: arg_res.remove(r) gd = m.groupdict() key = gd['key'] try: lo = float(gd['lo']) except ValueError: lo = defaults[key][0] argvals[key+'lo'] = lo try: hi = float(gd['hi']) except ValueError: hi = defaults[key][1] argvals[key+'hi'] = hi assert len(args) == len(arg_res) assert len(args) == 0 return """ and ( [] simple ( [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)] ) combgen ( [(10et, 0eta320)] dijet ( [(%(masslo).0fdjmass, 26djdphi)] ) simple ( [(10et, 0eta320)(20et, 0eta320)] ) ) )""" % argvals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chainDict2jetLabel(chain_dict):\n\n # suported scenarios \n router = {\n 'simple': _make_simple_label,\n 'HT': _make_ht_label,\n 'vbenf': _make_vbenf_label,\n 'dijet': _make_dijet_label,\n 'combinationsTest': _make_combinationsTest_label,\n 'partitionsTest': _make_partitionsTest_label,\n }\n\n # chain_part - scenario association\n cp_sorter = {}\n for k in router: cp_sorter[k] = []\n\n for cp in chain_dict['chainParts']:\n if cp['signature'] != 'Jet' and cp['signature'] != 'Bjet': \n continue\n for k in cp_sorter:\n if cp['hypoScenario'].startswith(k):\n cp_sorter[k].append(cp)\n break\n\n # obtain labels by scenario.\n labels = []\n for k, chain_parts in cp_sorter.items():\n if chain_parts: labels.append(router[k](chain_parts))\n\n assert labels\n nlabels = len(labels)\n if nlabels == 1: return labels[0]\n if nlabels == 2:\n alabel = \"\"\"\\\nand([]\n %s\n %s)\"\"\" % (tuple(labels))\n return alabel\n\n # more than 2 labels is not expected\n assert False", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def _make_simple_partition_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n label = 'simplepartition(['\n for cp in cps:\n smcstr = str(cp['smc'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr:\n condition_str += ',%s)'\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def _text_write_preprocess(self):\n self.check()\n\n max_name_len = np.max([len(name) for name in self.name])\n fieldtypes = [\"U\" + str(max_name_len), \"f8\", \"f8\"]\n comp_names = self._get_lon_lat_component_names()\n frame_obj = self._get_frame_obj()\n frame_desc_str = _get_frame_desc_str(frame_obj)\n\n component_fieldnames = []\n for comp_name in comp_names:\n # This will add e.g. ra_J2000 and dec_J2000 for FK5\n component_fieldnames.append(comp_name + \"_\" + frame_desc_str)\n fieldnames = [\"source_id\"] + component_fieldnames\n stokes_names = [\"I\", \"Q\", \"U\", \"V\"]\n fieldshapes = [()] * 3\n\n if self.stokes_error is not None:\n stokes_error_names = [(f\"{k}_error\") for k in [\"I\", \"Q\", \"U\", \"V\"]]\n\n n_stokes = 0\n stokes_keep = []\n for si, total in enumerate(np.nansum(self.stokes.to(\"Jy\"), axis=(1, 2))):\n if total > 0:\n fieldnames.append(stokes_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n if self.stokes_error is not None:\n fieldnames.append(stokes_error_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n n_stokes += 1\n stokes_keep.append(total > 0)\n\n assert n_stokes >= 1, \"No components with nonzero flux.\"\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n fieldnames.append(\"subband_frequency\")\n else:\n fieldnames.append(\"frequency\")\n fieldtypes.append(\"f8\")\n fieldshapes.extend([(self.Nfreqs,)])\n elif self.reference_frequency is not None:\n fieldnames.extend([(\"reference_frequency\")])\n fieldtypes.extend([\"f8\"])\n fieldshapes.extend([()] * n_stokes + [()])\n if self.spectral_index is not None:\n fieldnames.append(\"spectral_index\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_rise_lst\"):\n fieldnames.append(\"rise_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_set_lst\"):\n fieldnames.append(\"set_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n dt = np.dtype(list(zip(fieldnames, fieldtypes, fieldshapes)))\n\n arr = np.empty(self.Ncomponents, dtype=dt)\n arr[\"source_id\"] = self.name\n\n for comp_ind, comp in enumerate(comp_names):\n arr[component_fieldnames[comp_ind]] = getattr(self.skycoord, comp).deg\n\n for ii in range(4):\n if stokes_keep[ii]:\n arr[stokes_names[ii]] = self.stokes[ii].T.to(\"Jy\").value\n if self.stokes_error is not None:\n arr[stokes_error_names[ii]] = self.stokes_error[ii].T.to(\"Jy\").value\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n arr[\"subband_frequency\"] = self.freq_array.to(\"Hz\").value\n else:\n arr[\"frequency\"] = self.freq_array.to(\"Hz\").value\n elif self.reference_frequency is not None:\n arr[\"reference_frequency\"] = self.reference_frequency.to(\"Hz\").value\n if self.spectral_index is not None:\n arr[\"spectral_index\"] = self.spectral_index\n\n if hasattr(self, \"_rise_lst\"):\n arr[\"rise_lst\"] = self._rise_lst\n if hasattr(self, \"_set_lst\"):\n arr[\"set_lst\"] = self._set_lst\n\n return arr", "def _body(self, x, ensembled_batch, non_ensembled_batch, idx):\n i, current_representations = x\n del x\n feats = self._slice_batch(i, ensembled_batch, non_ensembled_batch)\n representations_update = self.evoformer(*self.batch_expand(feats))\n new_representations = {}\n for k in current_representations:\n new_representations[k] = (\n current_representations[k] + representations_update[k])\n del representations_update\n return i+1, new_representations", "def get_mapped_feature_name(self):\n\n # open a h5 file in case we need it\n f5 = h5py.File(self.train_database[0], 'r')\n mol_name = list(f5.keys())[0]\n mapped_data = f5.get(mol_name + '/mapped_features/')\n chain_tags = ['_chain1', '_chain2']\n\n # if we select all the features\n if self.select_feature == \"all\":\n\n # redefine dict\n self.select_feature = {}\n\n # loop over the feat types and add all the feat_names\n for feat_type, feat_names in mapped_data.items():\n self.select_feature[feat_type] = [\n name for name in feat_names]\n\n # if a selection was made\n else:\n\n # we loop over the input dict\n for feat_type, feat_names in self.select_feature.items():\n\n # if for a given type we need all the feature\n if feat_names == 'all':\n if feat_type in mapped_data:\n self.select_feature[feat_type] = list(\n mapped_data[feat_type].keys())\n else:\n self.print_possible_features()\n raise KeyError('Feature type %s not found')\n\n # if we have stored the individual\n # chainA chainB data we need to expand the feature list\n # however when we reload a pretrained model we already\n # come with _chainA, _chainB features.\n # So then we shouldn't add the tags\n else:\n # TODO to refactor this part\n if feat_type not in mapped_data:\n self.print_possible_features()\n raise KeyError('Feature type %s not found')\n\n self.select_feature[feat_type] = []\n\n # loop over all the specified feature names\n for name in feat_names:\n\n # check if there is not _chainA or _chainB in the name\n cond = [tag not in name for tag in chain_tags]\n\n # if there is no chain tag in the name\n if np.all(cond):\n\n # if we have a wild card e.g. PSSM_*\n # we check the matches and add them\n if '*' in name:\n match = name.split('*')[0]\n possible_names = list(\n mapped_data[feat_type].keys())\n match_names = [\n n for n in possible_names\n if n.startswith(match)]\n self.select_feature[feat_type] += match_names\n\n # if we don't have a wild card we append\n # <feature_name>_chainA and <feature_name>_chainB\n # to the list\n else:\n self.select_feature[feat_type] += [\n name + tag for tag in chain_tags]\n # if there is a chain tag in the name\n # (we probably relaod a pretrained model)\n # and we simply append the feaature name\n else:\n self.select_feature[feat_type].append(\n name)\n\n f5.close()", "def DecodeStage():\n\n io = Io({\n 'if_id': Input(if_bundle),\n 'inst': Input(Bits(32)),\n 'stall': Input(Bits(1)),\n 'reg_write': Input(reg_write_bundle),\n 'ras_ctrl': Output(ras_ctrl_bundle),\n 'id_ex': Output(id_ex_bundle),\n 'rs1_data': Output(Bits(C['core-width'])),\n 'rs2_data': Output(Bits(C['core-width'])),\n })\n\n inst = Wire(Bits(32))\n\n with io.if_id.valid:\n inst <<= io.inst\n with otherwise:\n inst <<= 0\n\n regfile = Instance(RegisterFile())\n\n itype = Wire(Bits(ITypes.bitwidth))\n\n regfile.r0_addr <<= Rs1(inst)\n regfile.r0_en <<= ~io.stall\n regfile.r1_addr <<= Rs2(inst)\n regfile.r1_en <<= ~io.stall\n\n regfile.w0_addr <<= io.reg_write.w_addr\n regfile.w0_en <<= io.reg_write.w_en & ~io.stall\n regfile.w0_data <<= io.reg_write.w_data\n\n #\n # inst_data is metadata about the current instruction that is passed through\n # the pipeline unrelated to control signals. It's primary use is for hazard\n # detection and data forwarding.\n #\n\n io.id_ex.ctrl.valid <<= io.if_id.valid\n io.id_ex.ctrl.inst <<= inst\n io.id_ex.ctrl.pc <<= io.if_id.pc\n\n #\n # Hook up the register read outputs.\n #\n\n io.rs1_data <<= regfile.r0_data\n io.rs2_data <<= regfile.r1_data\n\n #\n # Control is a Python function that produces the primary decode logic. It\n # matches against a set of known instructions to produce control signals for\n # later stages in the pipeline. The known instructions are encoded in the\n # 'instructions' variable above.\n #\n\n Control(inst, itype, io.id_ex.ctrl)\n\n #\n # TODO: Documentation\n #\n\n HandleRasCtrl(io.ras_ctrl, inst, io.if_id.pc)\n\n #\n # GenerateImmediate produces logic that consume the itype (instruction\n # type, which is R, I, S, B, U, or J) and produces the immediate value for\n # this instruction.\n #\n\n io.id_ex.imm <<= GenerateImmediate(inst, itype)\n\n NameSignals(locals())", "def _make_dijet_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('dijet')\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>djmass)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1eta)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'j1et': ('100', 'inf'),\n 'j2et': ('100', 'inf'),\n 'j1eta': ('0', '320'),\n 'j2eta': ('0', '320'),\n 'djmass': ('1000', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n combgen(\n [(2)(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n ]\n \n dijet(\n [(%(djmasslo).0fdjmass)])\n simple([(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j2etlo).0fet, %(j2etalo).0feta%(j2etahi).0f)])\n )\"\"\" % argvals", "def _indentity_block(self, X, filters, f, stage, block):\n\t\tconv_layer_name = 'res' + str(stage) + block + '_branch'\n\t\tbn_layer_name = 'bm' + str(stage) + block + '_branch'\n\n\t\tX_shortcut = X\n\n\t\tF1, F2, F3 = filters\n\n\t\t# First component of main path\n\t\tX = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1, 1), padding = 'valid',\n\t\t\tname = conv_layer_name + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n\t\tX = BatchNormalization(axis = 3, name = bn_layer_name + '2a')(X)\n\t\tX = Activation('relu')(X)\n\n\t\t# Second component of main path\n\t\tX = Conv2D(filters = F2, kernel_size = (1, 1), strides = (1, 1), padding = 'same',\n\t\t\tname = conv_layer_name + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n\t\tX = BatchNormalization(axis = 3, name = bn_layer_name + '2b')(X)\n\t\tX = Activation('relu')(X)\n\n\t\t# Third component of main path\n\t\tX = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1, 1), padding = 'valid',\n\t\t\tname = conv_layer_name + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n\t\tX = BatchNormalization(axis = 3, name = bn_layer_name + '2c')(X)\n\n\t\t# Final step : adding the shortcut componet to X and applying 'relu' activation on the combination \n\t\tX = Add()([X_shortcut,X])\n\t\tX = Activation('relu')(X)\n\n\t\treturn X", "def output_fluent(fil,nodes,elems):\n print \"Nodal coordinates\"\n print nodes\n print \"Element connectivity\"\n print elems\n faces = array(Tet4.faces) # Turning faces into an array is important !\n print \"Tetraeder faces\"\n print faces\n elf = elems.take(faces,axis=1)\n # Remark: the shorter syntax elems[faces] takes its elements along the\n # axis 0. Then we would need to transpose() first (and probably\n # swap axes again later)\n print \"The faces of the elements:\"\n print elf\n # We need a copy to sort the nodes (sorting is done in-place)\n elfs = elf.copy()\n elfs.sort(axis=2) \n print \"The faces with sorted nodes:\"\n print elfs\n magic = elems.max()+1\n print \"Magic number = %d\" % magic\n code = encode(elfs[:,:,0],elfs[:,:,1],elfs[:,:,2],magic)\n # Remark how nice the encode function works on the whole array\n print \"Encoded faces:\"\n print code\n code = code.ravel()\n print code\n print \"Just A Check:\"\n print \"Element 5 face 2 is %s \" % elf[5,2]\n print \"Element 5 face 2 is %s \" % list(decode(code[4*5+2],magic))\n srt = code.argsort()\n print srt\n print code[srt]\n # Now shipout the faces in this order, removing the doubles\n j = -1 \n for i in srt:\n if j < 0: # no predecessor (or predecessor already shipped)\n j = i\n else:\n e1,f1 = j/4, j%4\n if code[i] == code[j]:\n e2,f2 = i/4, i%4\n j = -1\n else:\n e2 = -1\n j = i\n print \"Face %s belongs to el %s and el %s\" % ( elf[e1,f1], e2, e1 )", "def pre_pipeline(self, results):\n results[\"img_prefix\"] = self.img_prefix\n results[\"seg_prefix\"] = self.seg_prefix\n results[\"proposal_file\"] = self.proposal_file\n results[\"bbox_fields\"] = []\n results[\"mask_fields\"] = []\n results[\"seg_fields\"] = []\n results[\"site_fields\"] = []\n results[\"label_fields\"] = []", "def build_head(self):\n stages = [f'stage{i}' for i in range(1, 7)]\n for stage in stages:\n block = getattr(self.arch, stage)\n PAF, CFM = block.keys()\n PAF = build_blocks(block[PAF], 'head')\n CFM = build_blocks(block[CFM], 'head')\n setattr(self, f\"{stage}_PAF\", PAF)\n setattr(self, f\"{stage}_CFM\", CFM)", "def _make_combinationsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'combinationsTest'\n\n \n\n return \"\"\"\n combgen(\n [(2)(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def generate_code(self, parts: list):\n for i in range(len(parts)):\n\n if not self._involves_this_party(parts[i][0]):\n # not our data, skip job\n continue\n\n if parts[i][1] == \"python\":\n cg = PythonCodeGen(\n self.config,\n parts[i][0],\n f\"{self.config.system_configs['CODEGEN'].workflow_name}-python-job-{i}\"\n )\n cg.generate()\n elif parts[i][1] == \"jiff\":\n cg = JiffCodeGen(\n self.config,\n parts[i][0],\n f\"{self.config.system_configs['CODEGEN'].workflow_name}-jiff-job-{i}\"\n )\n cg.generate()\n else:\n raise Exception(f\"Unrecognized backend from partition: {parts[i][1]}.\")", "def __call__(self, blocks, with_cab=False):\n\n # for k, v in blocks.items():\n # print(k, v.shape)\n\n #down fpn\n f_down = self.FPN_Down_Fusion(blocks)\n # print(\"f_down shape: {}\".format(f_down.shape))\n #up fpn\n f_up = self.FPN_Up_Fusion(blocks)\n # print(\"f_up shape: {}\".format(f_up.shape))\n #fusion\n f_common = fluid.layers.elementwise_add(x=f_down, y=f_up)\n f_common = fluid.layers.relu(f_common)\n # print(\"f_common: {}\".format(f_common.shape))\n\n if self.with_cab:\n # print('enhence f_common with CAB.')\n f_common = self.cross_attention(f_common)\n\n f_score, f_border = self.SAST_Header1(f_common)\n f_tvo, f_tco = self.SAST_Header2(f_common)\n\n predicts = OrderedDict()\n predicts['f_score'] = f_score\n predicts['f_border'] = f_border\n predicts['f_tvo'] = f_tvo\n predicts['f_tco'] = f_tco\n return predicts", "def __call__(self, node):\n\n # should throw an error\n if node.cfgInterface == None:\n return\n\n # //\n # // Extract LFN base from included WorkflowSpec parameters\n #//\n if self.unmerged:\n base = node.getParameter(\"UnmergedLFNBase\")[0]\n else:\n base = node.getParameter(\"MergedLFNBase\")[0]\n mergedBase = node.getParameter(\"MergedLFNBase\")[0]\n\n acqEra=None\n if node.hasParameter(\"AcquisitionEra\"):\n acqEra = node.getParameter(\"AcquisitionEra\")[0]\n\n\n # //\n # // iterate over outputmodules/data tiers\n #// Generate LFN, PFN and Catalog for each module\n\n for modName, outModule in node.cfgInterface.outputModules.items():\n\n if ( not outModule.has_key('fileName') ):\n msg = \"OutputModule %s does not contain a fileName entry\" % modName\n raise RuntimeError, msg\n\n preserveLfnGroup = str(self.lfnGroup)\n lastBit = outModule['processedDataset']\n # //\n # // Skip if the file does not stage out. (i.e.\n #// --stageout-intermediates=False)\n if lastBit is None:\n msg = \"OutputModule does not stage out. Skipping.\"\n logging.debug(msg)\n continue\n # //but this guy has the AcquisitionEra at the beginning... delimited\n # // by a dash... we don't need it twice... we try to safely\n #// remove it from the beginning, basically punting if its not\n #\\\\ disadvantage of getting this from the ds name is having to\n # \\\\ then strip off -unmerged\n if acqEra is not None:\n thingtoStrip=\"%s-\" % acqEra\n mypieces = lastBit.split(thingtoStrip, 1)\n if len(mypieces) > 1: \n lastBit = mypieces[1]\n remainingBits = lastBit.split(\"-unmerged\", 1)[0]\n \n outModule['LFNBase'] = os.path.join(base,\n outModule['primaryDataset'],\n outModule['dataTier'],\n remainingBits,\n preserveLfnGroup)\n outModule['MergedLFNBase'] = os.path.join(mergedBase,\n outModule['primaryDataset'],\n outModule['dataTier'],\n remainingBits,\n preserveLfnGroup)\n\n return", "def build_stage2_6(self):\n paf, cfm = self.stage2_6.values()\n for i in range(2, 7):\n paf_ = OrderedDict([(k.replace('i', str(i)),paf[k]) for k in paf])\n cfm_ = OrderedDict([(k.replace('i', str(i)),cfm[k]) for k in cfm])\n stage_ = OrderedDict(PAF=paf_, CFM=cfm_)\n setattr(self, f'stage{i}', stage_)", "def slice_graph_bwd( endea, reg ): \r\n\tgraph = vcg_Graph.vcgGraph({\"title\":'\"Slice for %s\"' % reg, \\\r\n\t\t\"manhattan_edges\":\"no\", \"layoutalgorithm\":\"maxdepth\"})\r\n\t#\r\n\t# Retrieve the name of the current basic block\r\n\t# \r\n\tworklist = []\r\n\tdata_bib = {}\r\n\t\r\n\tstartnode = slice_node( 0, endea, reg )\t\t# start at the end of the slice node\r\n\trootnode = graph.Add_Node( startnode.to_name() )\r\n\tdata_bib[ startnode.to_name() ] = startnode\r\n\tworklist.insert( 0, rootnode )\r\n\twhile len( worklist ) > 0:\r\n\t\tcurrnode = worklist.pop()\r\n\t\tcurrslice = data_bib[ currnode.get_name() ]\r\n\t\t[tgt_reg, split] = currslice.get_target_reg_bwd()\r\n\t\tprint tgt_reg\r\n\t\tprint split\r\n\t\tif tgt_reg == \"END\":\r\n\t\t\t# Do not process this node any further\r\n\t\t\tpass\r\n\t\telif tgt_reg == \"\" or (( len( currslice.get_lines()) > 0) and \\\r\n\t\t\tcurrslice.startea != currslice.get_lines()[0][0]):\r\n\t\t\t# Do process this node further, nothing really going on \r\n\t\t\tprint \"ZEZ\"\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( 0,ref, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name() )\r\n\t\telse:\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( 0,ref, tgt_reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name())\r\n\t\t\txrefs = get_crefs_to( currslice.startea )\r\n\t\t\tif split:\r\n\t\t\t\tfor ref in xrefs:\r\n\t\t\t\t\tnewslice = slice_node( 0,ref, currslice.reg )\r\n\t\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\t\tgraph.Add_Link( newslice.to_name(), currnode.get_name())\r\n\treturn [ graph, data_bib ]", "def identity_block(self,X, f, filters, stage, block):\n # define name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve filters\n f1,f2,f3 = filters\n\n # Save the input value. This needs to be added back to the main path later.\n X_shortcut = X\n\n # First component of the main path\n X = Conv2D(filters= f1, kernel_size=(1,1), strides=(1,1), padding='valid', name=conv_name_base + '2a', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of the main path\n X = Conv2D(filters= f2, kernel_size=(f,f), strides=(1,1), padding='same', name=conv_name_base + '2b', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of the main path\n X = Conv2D(filters= f3, kernel_size=(1,1), strides=(1,1), padding='valid', name=conv_name_base + '2c', kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)\n\n # Add the shortcut value to the main path\n X = Add()([X_shortcut, X])\n X = Activation('relu')(X) \n\n return X", "def onBuildModels(self):\n if self.refSeriesNumber != '-1':\n ref = self.refSeriesNumber\n refLongName = self.seriesMap[ref]['LongName']\n labelNodes = slicer.util.getNodes('*'+refLongName+'*-label*')\n\n numNodes = slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelHierarchyNode\" )\n outHierarchy = None\n\n for n in xrange(numNodes):\n node = slicer.mrmlScene.GetNthNodeByClass( n, \"vtkMRMLModelHierarchyNode\" )\n if node.GetName() == 'mpReview-'+refLongName:\n outHierarchy = node\n break\n\n # Remove the previous models\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n slicer.mrmlScene.RemoveNode(modelNode)\n\n # if models hierarchy does not exist, create it.\n else:\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\n outHierarchy.SetScene( slicer.mrmlScene )\n outHierarchy.SetName( 'mpReview-'+refLongName )\n slicer.mrmlScene.AddNode( outHierarchy )\n\n progress = self.makeProgressIndicator(len(labelNodes))\n step = 0\n for label in labelNodes.values():\n labelName = label.GetName().split(':')[1]\n structureName = labelName[labelName[:-6].rfind(\"-\")+1:-6]\n # Only save labels with known structure names\n if any(structureName in s for s in self.structureNames):\n parameters = {}\n parameters[\"InputVolume\"] = label.GetID()\n parameters['FilterType'] = \"Sinc\"\n parameters['GenerateAll'] = True\n\n parameters[\"JointSmoothing\"] = False\n parameters[\"SplitNormals\"] = True\n parameters[\"PointNormals\"] = True\n parameters[\"SkipUnNamed\"] = True\n\n # create models for all labels\n parameters[\"StartLabel\"] = -1\n parameters[\"EndLabel\"] = -1\n\n parameters[\"Decimate\"] = 0\n parameters[\"Smooth\"] = 0\n\n parameters[\"ModelSceneFile\"] = outHierarchy\n\n progress.labelText = '\\nMaking Model for %s' % structureName\n progress.setValue(step)\n if progress.wasCanceled:\n break\n\n try:\n modelMaker = slicer.modules.modelmaker\n self.CLINode = slicer.cli.run(modelMaker, self.CLINode,\n parameters, wait_for_completion=True)\n except AttributeError:\n qt.QMessageBox.critical(slicer.util.mainWindow(),'Editor', 'The ModelMaker module is not available<p>Perhaps it was disabled in the application settings or did not load correctly.')\n step += 1\n progress.close()\n #\n\n if outHierarchy:\n collection = vtk.vtkCollection()\n outHierarchy.GetChildrenModelNodes(collection)\n n = collection.GetNumberOfItems()\n if n != 0:\n for i in xrange(n):\n modelNode = collection.GetItemAsObject(i)\n displayNode = modelNode.GetDisplayNode()\n displayNode.SetSliceIntersectionVisibility(1)\n displayNode.SetSliceIntersectionThickness(2)\n self.modelsVisibilityButton.checked = False\n self.updateViewRenderer()", "def make_label_data(self):\n from xml.etree.ElementTree import Element, SubElement, dump, ElementTree, parse\n\n if not self.graphicsView.hasImage():\n self.showImageSelectionMessageBox()\n return\n\n app_doc_data = AppDocData.instance()\n project = app_doc_data.getCurrentProject()\n\n smalls = []\n bigs = []\n\n symbol_list = app_doc_data.getTargetSymbolList(all=True)\n for symbol in symbol_list:\n if symbol.width and symbol.height:\n if symbol.width > 300 or symbol.height > 300:\n bigs.append(symbol.getName())\n else:\n smalls.append(symbol.getName())\n\n symbols = [item for item in self.graphicsView.scene().items() if issubclass(type(item), SymbolSvgItem)]\n names = [smalls, bigs]\n\n img = app_doc_data.activeDrawing.image_origin\n\n small_size = 500\n big_size = 850\n\n save_path = project.getTrainingSymbolFilePath()\n\n index = 0\n for size in [small_size, big_size]:\n offsets = [0, int(size / 2)]\n\n width, height = img.shape[1], img.shape[0]\n width_count, height_count = width // size + 2, height // size + 2\n b_width, b_height = width_count * size, height_count * size\n b_img = np.zeros((b_height, b_width), np.uint8) + 255\n b_img[:height, :width] = img[:, :]\n\n for offset in offsets:\n for row in range(height_count):\n for col in range(width_count):\n x, y = col * size + offset, row * size + offset\n tile_rect = QRectF(x, y, size, size)\n tile_symbols = []\n for symbol in [symbol for symbol in symbols if symbol.name in names[index]]:\n if tile_rect.contains(symbol.sceneBoundingRect()):\n tile_symbols.append(symbol)\n symbols.remove(symbol)\n\n if tile_symbols:\n training_uid = str(uuid.uuid4())\n training_image_path = os.path.join(save_path, training_uid + '.png')\n training_xml_path = os.path.join(save_path, training_uid + '.xml')\n\n # save image\n #_img = b_img[round(tile_rect.top()):round(tile_rect.bottom()),\n # round(tile_rect.left()):round(tile_rect.right())]\n #cv2.imwrite(training_image_path, _img)\n _img = self.graphicsView.image().copy(round(tile_rect.left()), round(tile_rect.top()), round(tile_rect.width()), round(tile_rect.height()))\n _img.save(training_image_path)\n\n # save label\n xml = Element('annotation')\n SubElement(xml, 'folder').text = 'None'\n SubElement(xml, 'filename').text = os.path.basename(save_path)\n\n pathNode = Element('path')\n pathNode.text = save_path.replace('/', '\\\\')\n xml.append(pathNode)\n\n sourceNode = Element('source')\n databaseNode = Element('database')\n databaseNode.text = 'Unknown'\n sourceNode.append(databaseNode)\n xml.append(sourceNode)\n\n sizeNode = Element('size')\n widthNode = Element('width')\n widthNode.text = str(int(tile_rect.width()))\n sizeNode.append(widthNode)\n heightNode = Element('height')\n heightNode.text = str(int(tile_rect.height()))\n sizeNode.append(heightNode)\n depthNode = Element('depth')\n depthNode.text = '3'\n sizeNode.append(depthNode)\n xml.append(sizeNode)\n\n segmentedNode = Element('segmented')\n segmentedNode.text = '0'\n xml.append(segmentedNode)\n\n labelContent = []\n counts = {}\n for item in tile_symbols:\n rect = item.sceneBoundingRect()\n label, xMin, yMin, xMax, yMax = item.name, int(rect.x() - 5 - x), int(rect.y() - 5 - y), int(rect.x() + rect.width() + 5 - x), int(rect.y() + rect.height() + 5 - y)\n xMin = xMin if xMin > 0 else 0\n yMin = yMin if yMin > 0 else 0\n xMax = xMax if xMax < size else size\n yMax = yMax if yMax < size else size\n\n if label == 'None' or label == '':\n continue\n if label not in labelContent:\n labelContent.append(label)\n counts[label] = 1\n else:\n counts[label] = counts[label] + 1\n\n objectNode = Element('object')\n nameNode = Element('name')\n nameNode.text = label\n objectNode.append(nameNode)\n poseNode = Element('pose')\n poseNode.text = 'Unspecified'\n objectNode.append(poseNode)\n truncatedNode = Element('truncated')\n truncatedNode.text = '0'\n objectNode.append(truncatedNode)\n difficultNode = Element('difficult')\n difficultNode.text = '0'\n objectNode.append(difficultNode)\n\n bndboxNode = Element('bndbox')\n xminNode = Element('xmin')\n xminNode.text = str(xMin)\n bndboxNode.append(xminNode)\n yminNode = Element('ymin')\n yminNode.text = str(yMin)\n bndboxNode.append(yminNode)\n xmaxNode = Element('xmax')\n xmaxNode.text = str(xMax)\n bndboxNode.append(xmaxNode)\n ymaxNode = Element('ymax')\n ymaxNode.text = str(yMax)\n bndboxNode.append(ymaxNode)\n objectNode.append(bndboxNode)\n\n xml.append(objectNode)\n\n ElementTree(xml).write(training_xml_path)\n\n index += 1\n\n QMessageBox.about(self, self.tr(\"Notice\"), self.tr('Successfully applied. '))", "def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]", "def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )", "def identity_block(X, f, filters, stage, block):\n \n # Defines name basis.\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieves Filters.\n F1, F2, F3 = filters\n \n # Saves the input value. This is needed later to add back to the main path. \n X_shortcut = X\n \n ##### MAIN PATH #####\n # First component of main path.\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n \n # Second component of main path.\n X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n \n # Third component of main path.\n X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform())(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n\n # Final step: Adds shortcut value to main path, and pass it through a RELU activation.\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n\n return X", "def identity_block(X, f, filters, stage, block):\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value. You'll need this later to add back to the main path. \n X_shortcut = X\n \n # First component of main path\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n # Second component of main path\n X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation\n X = Add()([X,X_shortcut])\n X = Activation('relu')(X)\n \n return X", "def make_mixture_info(parts, operation='+'):\n # type: (List[ModelInfo], str) -> ModelInfo\n # Build new parameter list\n combined_pars = []\n\n # When creating a mixture model that is a sum of product models (ie (1*2)+(3*4))\n # the parameters for models 1 & 2 will be prefixed with A & B respectively,\n # but so will the parameters for models 3 & 4. We need to rename models 3 & 4\n # so that they are prefixed with C & D to avoid overlap of parameter names.\n used_prefixes = []\n for part in parts:\n if part.composition and part.composition[0] == 'mixture':\n i = 0\n for submodel in part.composition[1]:\n npars = len(submodel.parameters.kernel_parameters)\n # List of params of one of the constituent models of part\n submodel_pars = part.parameters.kernel_parameters[i:i+npars]\n # Prefix of the constituent model\n prefix = submodel_pars[0].name[0]\n if prefix not in used_prefixes: # Haven't seen this prefix so far\n used_prefixes.append(prefix)\n i += npars\n continue\n # TODO: don't modify submodel --- it may be used elsewhere\n # Existing code probably doesn't keep a handle on the model\n # parts so its probably okay, but it's possible that a mix\n # on user defined mixture models models will change the\n # parameters used for the parts in the GUI. Even worse if the\n # same plugin is used twice. For example, twosphere.py\n # contains sphere+sphere and you create twosphere+twosphere.\n while prefix in used_prefixes:\n # This prefix has been already used, so change it to the\n # next letter that hasn't been used\n prefix = chr(ord(prefix) + 1)\n used_prefixes.append(prefix)\n prefix += \"_\"\n # Update the parameters of this constituent model to use the\n # new prefix\n for par in submodel_pars:\n # Strip {prefix}_ using par.name[2:], etc.\n # TODO: fails for AB_scale\n par.id = prefix + par.id[2:]\n par.name = prefix + par.name[2:]\n if par.length_control is not None:\n par.length_control = prefix + par.length_control[2:]\n i += npars\n\n for part in parts:\n # Parameter prefix per model, A_, B_, ...\n # Note that prefix must also be applied to id and length_control\n # to support vector parameters\n prefix = ''\n if not part.composition or part.composition[0] == 'product':\n # Model isn't a composition model, so its parameters don't have a\n # a prefix. Add the next available prefix\n prefix = chr(ord('A')+len(used_prefixes))\n used_prefixes.append(prefix)\n prefix += '_'\n\n if operation == '+':\n # If model is a sum model, each constituent model gets its own scale parameter\n scale_prefix = prefix\n if prefix == '' and getattr(part, \"operation\", '') == '*':\n # `part` is a composition product model. Find the prefixes of\n # its parameters to form a new prefix for the scale.\n # For example, a model with A*B*C will have ABC_scale.\n sub_prefixes = []\n for param in part.parameters.kernel_parameters:\n # Prefix of constituent model\n sub_prefix = param.id.split('_')[0]\n if sub_prefix not in sub_prefixes:\n sub_prefixes.append(sub_prefix)\n # Concatenate sub_prefixes to form prefix for the scale\n scale_prefix = ''.join(sub_prefixes) + '_'\n scale = Parameter(scale_prefix + 'scale', default=1.0,\n description=\"model intensity for \" + part.name)\n combined_pars.append(scale)\n for p in part.parameters.kernel_parameters:\n p = copy(p)\n p.name = prefix + p.name\n p.id = prefix + p.id\n if p.length_control is not None:\n p.length_control = prefix + p.length_control\n combined_pars.append(p)\n parameters = ParameterTable(combined_pars)\n # Allow for the scenario in which each component has all its PD parameters\n # active simultaneously. details.make_details() will throw an error if\n # too many are used from any one component.\n parameters.max_pd = sum(part.parameters.max_pd for part in parts)\n\n def random():\n \"\"\"Random set of model parameters for mixture model\"\"\"\n combined_pars = {}\n for k, part in enumerate(parts):\n prefix = chr(ord('A')+k) + '_'\n pars = part.random()\n combined_pars.update((prefix+k, v) for k, v in pars.items())\n return combined_pars\n\n model_info = ModelInfo()\n model_info.id = operation.join(part.id for part in parts)\n model_info.operation = operation\n model_info.name = '(' + operation.join(part.name for part in parts) + ')'\n model_info.filename = None\n model_info.title = 'Mixture model with ' + model_info.name\n model_info.description = model_info.title\n model_info.docs = model_info.title\n model_info.category = \"custom\"\n model_info.parameters = parameters\n model_info.random = random\n #model_info.single = any(part['single'] for part in parts)\n model_info.structure_factor = False\n #model_info.tests = []\n #model_info.source = []\n # Remember the component info blocks so we can build the model\n model_info.composition = ('mixture', parts)\n return model_info", "def tied_featurize(batch, device, chain_dict, fixed_position_dict=None, omit_AA_dict=None, tied_positions_dict=None, pssm_dict=None, bias_by_res_dict=None, ca_only=False):\n alphabet = 'ACDEFGHIKLMNPQRSTVWYX'\n B = len(batch)\n lengths = np.array([len(b['seq']) for b in batch], dtype=np.int32) #sum of chain seq lengths\n L_max = max([len(b['seq']) for b in batch])\n if ca_only:\n X = np.zeros([B, L_max, 1, 3])\n else:\n X = np.zeros([B, L_max, 4, 3])\n residue_idx = -100*np.ones([B, L_max], dtype=np.int32)\n chain_M = np.zeros([B, L_max], dtype=np.int32) #1.0 for the bits that need to be predicted\n pssm_coef_all = np.zeros([B, L_max], dtype=np.float32) #1.0 for the bits that need to be predicted\n pssm_bias_all = np.zeros([B, L_max, 21], dtype=np.float32) #1.0 for the bits that need to be predicted\n pssm_log_odds_all = 10000.0*np.ones([B, L_max, 21], dtype=np.float32) #1.0 for the bits that need to be predicted\n chain_M_pos = np.zeros([B, L_max], dtype=np.int32) #1.0 for the bits that need to be predicted\n bias_by_res_all = np.zeros([B, L_max, 21], dtype=np.float32)\n chain_encoding_all = np.zeros([B, L_max], dtype=np.int32) #1.0 for the bits that need to be predicted\n S = np.zeros([B, L_max], dtype=np.int32)\n omit_AA_mask = np.zeros([B, L_max, len(alphabet)], dtype=np.int32)\n # Build the batch\n letter_list_list = []\n visible_list_list = []\n masked_list_list = []\n masked_chain_length_list_list = []\n tied_pos_list_of_lists_list = []\n for i, b in enumerate(batch):\n if chain_dict != None:\n masked_chains, visible_chains = chain_dict[b['name']] #masked_chains a list of chain letters to predict [A, D, F]\n else:\n masked_chains = [item[-1:] for item in list(b) if item[:10]=='seq_chain_']\n visible_chains = []\n masked_chains.sort() #sort masked_chains \n visible_chains.sort() #sort visible_chains \n all_chains = masked_chains + visible_chains\n for i, b in enumerate(batch):\n mask_dict = {}\n a = 0\n x_chain_list = []\n chain_mask_list = []\n chain_seq_list = []\n chain_encoding_list = []\n c = 1\n letter_list = []\n global_idx_start_list = [0]\n visible_list = []\n masked_list = []\n masked_chain_length_list = []\n fixed_position_mask_list = []\n omit_AA_mask_list = []\n pssm_coef_list = []\n pssm_bias_list = []\n pssm_log_odds_list = []\n bias_by_res_list = []\n l0 = 0\n l1 = 0\n for step, letter in enumerate(all_chains):\n if letter in visible_chains:\n letter_list.append(letter)\n visible_list.append(letter)\n chain_seq = b[f'seq_chain_{letter}']\n chain_seq = ''.join([a if a!='-' else 'X' for a in chain_seq])\n chain_length = len(chain_seq)\n global_idx_start_list.append(global_idx_start_list[-1]+chain_length)\n chain_coords = b[f'coords_chain_{letter}'] #this is a dictionary\n chain_mask = np.zeros(chain_length) #0.0 for visible chains\n if ca_only:\n x_chain = np.array(chain_coords[f'CA_chain_{letter}']) #[chain_lenght,1,3] #CA_diff\n if len(x_chain.shape) == 2:\n x_chain = x_chain[:,None,:]\n else:\n x_chain = np.stack([chain_coords[c] for c in [f'N_chain_{letter}', f'CA_chain_{letter}', f'C_chain_{letter}', f'O_chain_{letter}']], 1) #[chain_lenght,4,3]\n x_chain_list.append(x_chain)\n chain_mask_list.append(chain_mask)\n chain_seq_list.append(chain_seq)\n chain_encoding_list.append(c*np.ones(np.array(chain_mask).shape[0]))\n l1 += chain_length\n residue_idx[i, l0:l1] = 100*(c-1)+np.arange(l0, l1)\n l0 += chain_length\n c+=1\n fixed_position_mask = np.ones(chain_length)\n fixed_position_mask_list.append(fixed_position_mask)\n omit_AA_mask_temp = np.zeros([chain_length, len(alphabet)], np.int32)\n omit_AA_mask_list.append(omit_AA_mask_temp)\n pssm_coef = np.zeros(chain_length)\n pssm_bias = np.zeros([chain_length, 21])\n pssm_log_odds = 10000.0*np.ones([chain_length, 21])\n pssm_coef_list.append(pssm_coef)\n pssm_bias_list.append(pssm_bias)\n pssm_log_odds_list.append(pssm_log_odds)\n bias_by_res_list.append(np.zeros([chain_length, 21]))\n if letter in masked_chains:\n masked_list.append(letter)\n letter_list.append(letter)\n chain_seq = b[f'seq_chain_{letter}']\n chain_seq = ''.join([a if a!='-' else 'X' for a in chain_seq])\n chain_length = len(chain_seq)\n global_idx_start_list.append(global_idx_start_list[-1]+chain_length)\n masked_chain_length_list.append(chain_length)\n chain_coords = b[f'coords_chain_{letter}'] #this is a dictionary\n chain_mask = np.ones(chain_length) #1.0 for masked\n if ca_only:\n x_chain = np.array(chain_coords[f'CA_chain_{letter}']) #[chain_lenght,1,3] #CA_diff\n if len(x_chain.shape) == 2:\n x_chain = x_chain[:,None,:]\n else:\n x_chain = np.stack([chain_coords[c] for c in [f'N_chain_{letter}', f'CA_chain_{letter}', f'C_chain_{letter}', f'O_chain_{letter}']], 1) #[chain_lenght,4,3] \n x_chain_list.append(x_chain)\n chain_mask_list.append(chain_mask)\n chain_seq_list.append(chain_seq)\n chain_encoding_list.append(c*np.ones(np.array(chain_mask).shape[0]))\n l1 += chain_length\n residue_idx[i, l0:l1] = 100*(c-1)+np.arange(l0, l1)\n l0 += chain_length\n c+=1\n fixed_position_mask = np.ones(chain_length)\n if fixed_position_dict!=None:\n fixed_pos_list = fixed_position_dict[b['name']][letter]\n if fixed_pos_list:\n fixed_position_mask[np.array(fixed_pos_list)-1] = 0.0\n fixed_position_mask_list.append(fixed_position_mask)\n omit_AA_mask_temp = np.zeros([chain_length, len(alphabet)], np.int32)\n if omit_AA_dict!=None:\n for item in omit_AA_dict[b['name']][letter]:\n idx_AA = np.array(item[0])-1\n AA_idx = np.array([np.argwhere(np.array(list(alphabet))== AA)[0][0] for AA in item[1]]).repeat(idx_AA.shape[0])\n idx_ = np.array([[a, b] for a in idx_AA for b in AA_idx])\n omit_AA_mask_temp[idx_[:,0], idx_[:,1]] = 1\n omit_AA_mask_list.append(omit_AA_mask_temp)\n pssm_coef = np.zeros(chain_length)\n pssm_bias = np.zeros([chain_length, 21])\n pssm_log_odds = 10000.0*np.ones([chain_length, 21])\n if pssm_dict:\n if pssm_dict[b['name']][letter]:\n pssm_coef = pssm_dict[b['name']][letter]['pssm_coef']\n pssm_bias = pssm_dict[b['name']][letter]['pssm_bias']\n pssm_log_odds = pssm_dict[b['name']][letter]['pssm_log_odds']\n pssm_coef_list.append(pssm_coef)\n pssm_bias_list.append(pssm_bias)\n pssm_log_odds_list.append(pssm_log_odds)\n if bias_by_res_dict:\n bias_by_res_list.append(bias_by_res_dict[b['name']][letter])\n else:\n bias_by_res_list.append(np.zeros([chain_length, 21]))\n\n \n letter_list_np = np.array(letter_list)\n tied_pos_list_of_lists = []\n tied_beta = np.ones(L_max)\n if tied_positions_dict!=None:\n tied_pos_list = tied_positions_dict[b['name']]\n if tied_pos_list:\n set_chains_tied = set(list(itertools.chain(*[list(item) for item in tied_pos_list])))\n for tied_item in tied_pos_list:\n one_list = []\n for k, v in tied_item.items():\n start_idx = global_idx_start_list[np.argwhere(letter_list_np == k)[0][0]]\n if isinstance(v[0], list):\n for v_count in range(len(v[0])):\n one_list.append(start_idx+v[0][v_count]-1)#make 0 to be the first\n tied_beta[start_idx+v[0][v_count]-1] = v[1][v_count]\n else:\n for v_ in v:\n one_list.append(start_idx+v_-1)#make 0 to be the first\n tied_pos_list_of_lists.append(one_list)\n tied_pos_list_of_lists_list.append(tied_pos_list_of_lists)\n\n\n \n x = np.concatenate(x_chain_list,0) #[L, 4, 3]\n all_sequence = \"\".join(chain_seq_list)\n m = np.concatenate(chain_mask_list,0) #[L,], 1.0 for places that need to be predicted\n chain_encoding = np.concatenate(chain_encoding_list,0)\n m_pos = np.concatenate(fixed_position_mask_list,0) #[L,], 1.0 for places that need to be predicted\n\n pssm_coef_ = np.concatenate(pssm_coef_list,0) #[L,], 1.0 for places that need to be predicted\n pssm_bias_ = np.concatenate(pssm_bias_list,0) #[L,], 1.0 for places that need to be predicted\n pssm_log_odds_ = np.concatenate(pssm_log_odds_list,0) #[L,], 1.0 for places that need to be predicted\n\n bias_by_res_ = np.concatenate(bias_by_res_list, 0) #[L,21], 0.0 for places where AA frequencies don't need to be tweaked\n\n l = len(all_sequence)\n x_pad = np.pad(x, [[0,L_max-l], [0,0], [0,0]], 'constant', constant_values=(np.nan, ))\n X[i,:,:,:] = x_pad\n\n m_pad = np.pad(m, [[0,L_max-l]], 'constant', constant_values=(0.0, ))\n m_pos_pad = np.pad(m_pos, [[0,L_max-l]], 'constant', constant_values=(0.0, ))\n omit_AA_mask_pad = np.pad(np.concatenate(omit_AA_mask_list,0), [[0,L_max-l]], 'constant', constant_values=(0.0, ))\n chain_M[i,:] = m_pad\n chain_M_pos[i,:] = m_pos_pad\n omit_AA_mask[i,] = omit_AA_mask_pad\n\n chain_encoding_pad = np.pad(chain_encoding, [[0,L_max-l]], 'constant', constant_values=(0.0, ))\n chain_encoding_all[i,:] = chain_encoding_pad\n\n pssm_coef_pad = np.pad(pssm_coef_, [[0,L_max-l]], 'constant', constant_values=(0.0, ))\n pssm_bias_pad = np.pad(pssm_bias_, [[0,L_max-l], [0,0]], 'constant', constant_values=(0.0, ))\n pssm_log_odds_pad = np.pad(pssm_log_odds_, [[0,L_max-l], [0,0]], 'constant', constant_values=(0.0, ))\n\n pssm_coef_all[i,:] = pssm_coef_pad\n pssm_bias_all[i,:] = pssm_bias_pad\n pssm_log_odds_all[i,:] = pssm_log_odds_pad\n\n bias_by_res_pad = np.pad(bias_by_res_, [[0,L_max-l], [0,0]], 'constant', constant_values=(0.0, ))\n bias_by_res_all[i,:] = bias_by_res_pad\n\n # Convert to labels\n indices = np.asarray([alphabet.index(a) for a in all_sequence], dtype=np.int32)\n S[i, :l] = indices\n letter_list_list.append(letter_list)\n visible_list_list.append(visible_list)\n masked_list_list.append(masked_list)\n masked_chain_length_list_list.append(masked_chain_length_list)\n\n\n isnan = np.isnan(X)\n mask = np.isfinite(np.sum(X,(2,3))).astype(np.float32)\n X[isnan] = 0.\n\n # Conversion\n pssm_coef_all = torch.from_numpy(pssm_coef_all).to(dtype=torch.float32, device=device)\n pssm_bias_all = torch.from_numpy(pssm_bias_all).to(dtype=torch.float32, device=device)\n pssm_log_odds_all = torch.from_numpy(pssm_log_odds_all).to(dtype=torch.float32, device=device)\n\n tied_beta = torch.from_numpy(tied_beta).to(dtype=torch.float32, device=device)\n\n jumps = ((residue_idx[:,1:]-residue_idx[:,:-1])==1).astype(np.float32)\n bias_by_res_all = torch.from_numpy(bias_by_res_all).to(dtype=torch.float32, device=device)\n phi_mask = np.pad(jumps, [[0,0],[1,0]])\n psi_mask = np.pad(jumps, [[0,0],[0,1]])\n omega_mask = np.pad(jumps, [[0,0],[0,1]])\n dihedral_mask = np.concatenate([phi_mask[:,:,None], psi_mask[:,:,None], omega_mask[:,:,None]], -1) #[B,L,3]\n dihedral_mask = torch.from_numpy(dihedral_mask).to(dtype=torch.float32, device=device)\n residue_idx = torch.from_numpy(residue_idx).to(dtype=torch.long,device=device)\n S = torch.from_numpy(S).to(dtype=torch.long,device=device)\n X = torch.from_numpy(X).to(dtype=torch.float32, device=device)\n mask = torch.from_numpy(mask).to(dtype=torch.float32, device=device)\n chain_M = torch.from_numpy(chain_M).to(dtype=torch.float32, device=device)\n chain_M_pos = torch.from_numpy(chain_M_pos).to(dtype=torch.float32, device=device)\n omit_AA_mask = torch.from_numpy(omit_AA_mask).to(dtype=torch.float32, device=device)\n chain_encoding_all = torch.from_numpy(chain_encoding_all).to(dtype=torch.long, device=device)\n if ca_only:\n X_out = X[:,:,0]\n else:\n X_out = X\n return X_out, S, mask, lengths, chain_M, chain_encoding_all, letter_list_list, visible_list_list, masked_list_list, masked_chain_length_list_list, chain_M_pos, omit_AA_mask, residue_idx, dihedral_mask, tied_pos_list_of_lists_list, pssm_coef_all, pssm_bias_all, pssm_log_odds_all, bias_by_res_all, tied_beta", "def compiler_output(input_ckt, hier_graph_dict, design_name:str, result_dir:pathlib.Path, pdk_dir:pathlib.Path, uniform_height=False):\n layers_json = pdk_dir / 'layers.json'\n with open(layers_json,\"rt\") as fp:\n pdk_data=json.load(fp)\n design_config = pdk_data[\"design_info\"]\n\n if not result_dir.exists():\n result_dir.mkdir()\n logger.debug(f\"Writing results in dir: {result_dir} {hier_graph_dict}\")\n input_dir = input_ckt.parents[0]\n\n verilog_tbl = { 'modules': [], 'global_signals': []}\n\n design_setup = read_setup(input_dir / (design_name + '.setup'))\n try:\n POWER_PINS = [design_setup['GND'][0],design_setup['POWER'][0]]\n except (IndexError, ValueError):\n POWER_PINS = []\n logger.info(\"Power and ground nets not found. Power grid will not be constructed.\")\n\n #read lef to not write those modules as macros\n lef_path = pathlib.Path(__file__).resolve().parent.parent / 'config'\n all_lef = read_lef(lef_path)\n logger.debug(f\"Available library cells: {', '.join(all_lef)}\")\n\n primitives = {}\n for name,member in hier_graph_dict.items():\n logger.debug(f\"Found module: {name} {member['graph'].nodes()}\")\n graph = member[\"graph\"]\n constraints = member[\"constraints\"]\n\n for const in constraints:\n if isinstance(const, constraint.GuardRing):\n primitives['guard_ring'] = {'primitive':'guard_ring'}\n\n logger.debug(f\"Reading nodes from graph: {name}\")\n for node, attr in graph.nodes(data=True):\n if 'net' in attr['inst_type']: continue\n #Dropping floating ports\n lef_name = attr['inst_type']\n\n if \"values\" in attr and (lef_name in all_lef):\n block_name, block_args = generate_lef(lef_name, attr, primitives, design_config, uniform_height)\n #block_name_ext = block_name.replace(lef_name,'')\n logger.debug(f\"Created new lef for: {block_name} {lef_name}\")\n #Multiple instances of same module\n if 'inst_copy' in attr:\n for nm in list(hier_graph_dict.keys()):\n if nm == lef_name + attr['inst_copy']:\n if block_name not in hier_graph_dict.keys():\n logger.debug('Trying to modify a dictionary while iterating over it!')\n hier_graph_dict[block_name] = hier_graph_dict.pop(nm)\n else:\n #For cells with extra parameters than current primitive naming convention\n all_lef.append(nm)\n graph.nodes[node][\"inst_type\"] = block_name\n all_lef.append(block_name)\n\n # Only unit caps are generated\n if block_name.lower().startswith('cap'):\n graph.nodes[node]['inst_type'] = block_args['primitive']\n block_args['primitive'] = block_name\n else:\n graph.nodes[node]['inst_type'] = block_name\n\n if block_name in primitives:\n if block_args != primitives[block_name]:\n logging.warning(f\"two different primitve {block_name} of size {primitives[block_name]} {block_args}got approximated to same unit size\")\n else:\n primitives[block_name] = block_args\n elif \"values\" in attr and 'inst_copy' in attr:\n member[\"graph\"].nodes[node][\"inst_type\"]= lef_name + attr[\"inst_copy\"]\n all_lef.append(block_name)\n\n else:\n logger.debug(f\"No physical information found for: {name}\")\n logger.debug(f\"generated data for {name} : {pprint.pformat(primitives, indent=4)}\")\n logger.debug(f\"All available cell generator with updates: {all_lef}\")\n for name,member in hier_graph_dict.items():\n graph = member[\"graph\"]\n logger.debug(f\"Found module: {name} {graph.nodes()}\")\n inoutpin = []\n floating_ports=[]\n if \"ports_match\" in member and member[\"ports_match\"]:\n for key in member[\"ports_match\"].keys():\n if key not in POWER_PINS:\n inoutpin.append(key)\n if member[\"ports\"]:\n logger.debug(f'Found module ports: {member[\"ports\"]} {member[\"name\"]}')\n floating_ports = set(inoutpin) - set(member[\"ports\"]) - set(design_setup['POWER']) -set(design_setup['GND'])\n if len(list(floating_ports))> 0:\n logger.error(f\"floating ports found: {name} {floating_ports}\")\n raise SystemExit('Please remove floating ports')\n else:\n inoutpin = member[\"ports\"]\n if name not in all_lef:\n\n ## Removing constraints to fix cascoded cmc\n if name not in design_setup['DIGITAL']:\n logger.debug(f\"call constraint generator writer for block: {name}\")\n stop_points = design_setup['POWER'] + design_setup['GND'] + design_setup['CLOCK']\n constraints = member[\"constraints\"]\n if name not in design_setup['NO_CONST']:\n constraints = FindConst(graph, name, inoutpin, member[\"ports_weight\"], constraints, stop_points)\n constraints = CapConst(graph, name, design_config[\"unit_size_cap\"], constraints, design_setup['MERGE_SYMM_CAPS'])\n hier_graph_dict[name] = hier_graph_dict[name].copy(\n update={'constraints': constraints}\n )\n ## Write out modified netlist & constraints as JSON\n logger.debug(f\"call verilog writer for block: {name}\")\n wv = WriteVerilog(name, inoutpin, hier_graph_dict, POWER_PINS)\n verilog_tbl['modules'].append( wv.gen_dict())\n if len(POWER_PINS)>0:\n for i, nm in enumerate(POWER_PINS):\n verilog_tbl['global_signals'].append( { 'prefix' :'global_power', 'formal' : f'supply{i}', 'actual' : nm})\n\n with (result_dir / f'{design_name}.verilog.json').open( 'wt') as fp:\n json.dump( verilog_tbl, fp=fp, indent=2)\n\n with (result_dir / f'{design_name}.v').open( 'wt') as fp:\n write_verilog( verilog_tbl, fp)\n\n logger.info(\"Completed topology identification.\")\n logger.debug(f\"OUTPUT verilog json netlist at: {result_dir}/{design_name}.verilog.json\")\n logger.debug(f\"OUTPUT verilog netlist at: {result_dir}/{design_name}.v\")\n logger.debug(f\"OUTPUT const file at: {result_dir}/{design_name}.pnr.const.json\")\n return primitives", "def convert2EbnerParam(joblib,list_prefix,directory):\n \n key=[p[0] for p in joblib]\n element=[p[1] for p in joblib]\n\n \n listSlice=element[key.index('listSlice')] #list of slice\n parameters=element[key.index('EvolutionParameters')][-1,:,:] #estimated paramters of the registration\n rejectedSlice=element[key.index('RejectedSlices')] #rejected slices\n\n \n images,mask = createVolumesFromAlist(listSlice.copy()) #list of images corresponding to differents original stacks\n \n for i_slice in range(len(listSlice)): \n slicei=listSlice[i_slice]\n slicei.set_parameters(parameters[i_slice,:]) #set parameters to the last estimated parameters\n \n mat = np.array([[-1,0,0,0],[0,-1,0,0],[0,0,1,0],[0,0,0,1]]) #matrix to convert affine matrix from nibabel to itk\n\n for n in range(len(images)): #for each stack\n \n imagen = images[n]\n \n for i_slice in range(len(images[n])): #for each slices (in each stacks)\n \n slicei=imagen[i_slice]\n s = (slicei.get_orientation(),slicei.get_index_slice())\n \n if s not in rejectedSlice:\n dimension=3\n X,Y,Z= slicei.get_slice().get_fdata().shape\n center= -slicei.get_center()\n centerMat = np.eye(4)\n centerMat[0:3,3] = center[0:3]\n invcenterMat = np.eye(4)\n invcenterMat[0:3,3] = -center[0:3] \n p = slicei.get_parameters()\n matrix = mat @ centerMat @ rigidMatrix([p[0],p[1],p[2],p[3],p[4],p[5]]) @ invcenterMat @ mat\n test = sitk.AffineTransform(dimension)\n test.SetMatrix(matrix[0:3,0:3].flatten())\n test.SetTranslation(matrix[0:3,3])\n images_index = slicei.get_index_image()\n sitk.WriteTransform(test,\"%s/%s_slice%d.tfm\" %(directory,list_prefix[images_index],slicei.get_index_slice())) #save rigid transformation, computed at the barycenter of the image, adatpted to itk\n #else:\n #print(s)", "def build(width, height, depth, classes, stages, filters, include_top, pooling,\n reg=1e-3, bnEps=2e-5, bnMom=0.0):\n inputShape = (height, width, depth)\n chanDim = -1\n\n if K.image_data_format() == \"channels_first\":\n inputShape = (depth, height, width)\n chanDim = 1\n\n inputs = Input(shape=inputShape)\n\n\n # block 1 (initial conv block)\n x = ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)\n x = Conv2D(64, (7,7), use_bias=False, strides=(2,2),\n kernel_initializer=\"he_normal\", kernel_regularizer=l2(reg))(x)\n x = BatchNormalization(axis=chanDim, name=\"bn_conv1\")(x)\n x = Activation(\"relu\")(x)\n x = ZeroPadding2D(padding=((1,1), (1,1)), name=\"pool1_pad\")(x)\n x = MaxPooling2D(3, strides=2)(x)\n\n for i in range(0, len(stages)):\n stride = (1,1) if i == 0 else (2,2) # block 2 (projection block) w stride(1,1)\n\n print(\"Stage {}, Stride={}\".format(i, stride))\n x = SEResNet.residual_module(x, filters[i+1], stride,\n chanDim=chanDim, red=True, bnEps=bnEps, bnMom=bnMom)\n for j in range(0, stages[i] + 1): #stacking res block to each depth layer\n x = SEResNet.residual_module(x, filters[i+1], stride=(1,1),\n chanDim=chanDim, bnEps=bnEps,\n bnMom=bnMom)\n x = BatchNormalization(axis=chanDim, epsilon=bnEps,\n momentum=bnMom)(x)\n x = Activation(\"relu\")(x)\n\n if include_top:\n x = GlobalAveragePooling2D()(x)\n x = Dense(classes, use_bias=False, kernel_regularizer=l2(reg),\n activation='softmax')(x)\n else:\n if pooling == 'avg':\n print(\"Adding average pool\")\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n model = Model(inputs=inputs, outputs=x, name=\"SEResNet\")\n return model", "def _processLabel(self, kitti_label):\n label = {\n 'category': kitti_label['type'].lower(),\n 'box2D': kitti_label['bbox'].copy(),\n 'box3D': {\n 'location': {\n 'x': kitti_label['location']['x'],\n 'y': kitti_label['location']['y'] - kitti_label['dimensions']['height'] / 2.0, # move to center\n 'z': kitti_label['location']['z'],\n },\n 'dimensions': kitti_label['dimensions'].copy(),\n 'rotation_y': kitti_label['rotation_y'],\n },\n 'info': {\n 'truncated': kitti_label['truncated'],\n 'occluded': kitti_label['occluded'],\n },\n }\n if 'trackId' in kitti_label:\n # set trackId if given\n label['info']['trackId'] = kitti_label['trackId']\n return label", "def generate_semisupervized_label(self, idx_known, idx_unknown):\n tmp_df = self.data_info.set_index(['patientID','body_part'])\n # associate semi-supervized settings\n if len(idx_known) > 0:\n df_known = tmp_df.loc[idx_known,:]\n df_known['semi_label'] = df_known.abnormal_XR.apply(lambda x: -1 if x==1 else 1)\n df_unknown = tmp_df.loc[idx_unknown,:]\n df_unknown['semi_label'] = 0\n return pd.concat([df_known, df_unknown], axis=0).reset_index()\n else:\n df_unknown = tmp_df.loc[idx_unknown,:]\n df_unknown['semi_label'] = 0\n return df_unknown.reset_index()", "def serialize(mode):\r\n serialize_version(mode)\r\n vcb.serialize(mode) \r\n for x in xfrms:\r\n x.serialize(mode)", "def label(self, cfg):\n rep = \"\"\n nl = \"\"\n for node in cfg.nodes:\n rep += nl + \"{}\\tgen={}\\tkill={}\\tout={}\".format(\n node, \n set(self.gen.get(node)),\n set(self.kill.get(node)),\n set(self.out.get(node)))\n nl = \"\\n\"\n return rep", "def m_dump_tf():\n\n # Info about ENS and Resolver Smart Contracts\n #print(f\"ENS address: {ens.address()}\")\n print(f\"ENS address (contract address): {ENS.address}\")\n print(f\"Resolver address (contract address): {resolver.address()}\")\n print(f\"Resolver address from ENS(root): {ens.resolver('root')}\")\n print(\"\\n\")\n\n print(f\"Owner of ROOT record: {ens.owner('root')}\")\n ROOT_address, ROOT_key = wallet.account_from_name(\"ROOT\", \"ThePassword\")\n print(f\"ROOT address from wallet: {ROOT_address}\")\n\n n_subnodes = ens.numberSubnodes(0)\n print(f\"Number of subnodes of root: {n_subnodes}\")\n for i in range(n_subnodes):\n hash = ens.subnode(index=i).hex()\n print(f\" Subnode hash {i}: {ens.subnode(i).hex()}\")\n name = resolver.name(node_hash=hash)\n print(f\" Name: {name}\")\n resolver_address = ENS.functions.resolver(hash).call()\n print(f\" Resolver: {resolver_address}\")\n ala_address = ens.resolver(name)\n print(f\" Resolver by name: {ala_address}\")\n owner = owner = ENS.functions.owner(hash).call()\n print(f\" Owner: {owner}\")\n ROOT_address, ROOT_key = wallet.account_from_name(\"Alastria\", \"ThePassword\")\n print(f\" Address from wallet: {ROOT_address}\")\n\n n_subnodes = ens.numberSubnodes(\"ala\")\n print(f\"Number of subnodes of {name}: {n_subnodes}\")\n for i in range(n_subnodes):\n hash = ens.subnode(node_name=\"ala\", index=i).hex()\n print(f\" Subnode hash {i}: {hash}\")\n name = resolver.name(node_hash=hash)\n print(f\" Name: {name}\")\n resolver_address = ENS.functions.resolver(hash).call()\n print(f\" Resolver: {resolver_address}\")\n ala_address = ens.resolver(name)\n print(f\" Resolver by name: {ala_address}\")\n owner = owner = ENS.functions.owner(hash).call()\n print(f\" Owner: {owner}\")", "def serialize(self):\n return pickle.dumps([block.serialize() for block in self.chain])", "def identity_block(X, f, filters, stage, block):\n\n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n # Retrieve Filters\n F1, F2, F3 = filters\n\n # Save the input value.\n X_shortcut = X\n\n # First component of main path\n X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n\n # Second component of main path\n X = Conv2D(filters = F2, kernel_size= (f,f),strides= (1,1), padding= 'same' , name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv2D(filters = F3, kernel_size= (1,1),strides= (1,1), padding= 'valid' , name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation\n X = layers.add([X,X_shortcut])\n X = Activation('relu')(X)\n\n return X", "def build_label_transform():\n\n return NALabelEncoder()", "def _make_ht_label(chain_parts):\n\n assert len(chain_parts) == 1, '_make_ht_label, no. of chain parts != 1'\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('HT'), '_make_ht_label(): scenario does not start with HT'\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>ht)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'ht': ('0', 'inf'),\n 'et': ('0', 'inf'),\n 'eta': ('0', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n nargs = len(args)\n assert len(args) <= len(arg_res), 'bad num of args %d, expected < %d' % (len(args),\n len(arg_res))\n\n # obtain argument values frrom scenario\n while args:\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = float(defaults[key][0])\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = float(defaults[key][1])\n argvals[key+'hi'] = hi\n\n print (argvals)\n assert len(argvals) == 2*nargs, 'no of args: %d, expected %d' % (len(argvals), 2*nargs)\n\n print ('sent 100')\n result = \"\"\"\n ht([(%(htlo).0fht) \n (%(etlo).0fet)\n (%(etalo).0feta%(etahi).0f)\n ])\"\"\" % argvals\n print (result)\n return result", "def makeBinaryChains():\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\n\t# Do some basic argument checking for this model\n\tif (len(types) < 2):\n\t\tprint \"Number of defined types must equal two for binary chain calculations.\"\n\t\treturn\n\tif (maxsize == 0):\n\t\tprint \"Must specify a valid maximum number for one or more components.\"\n\t\treturn\n\n\tallChains = []\n\tnewChainsA = [[]]\n\tnewChainsB = []\n\t\n\ttypeA = types[0]\n\ttypeB = types[1]\n\t\n\t# start the chain with a single type A component\n\taddComponent(newChainsA[0],typeA,0,0)\n\n\tdepth = 0\n\tfor n in range(maxsize):\n\t\tdepth+=1\n\t\t\n\t\t# go through all the chains created last iteration and append B components\n\t\tnewChainsB = []\n\t\tfor thisChain in newChainsA:\n\n\t\t\t# get a list of new available sites in the provided chain\n\t\t\t# by setting depth -1, we will only add to components added last round\n\t\t\topenSites = makeSiteList(thisChain,typeB,depth-1)\n\t\t\t\n\t\t\t# make all the descendants from the current chain and append them to the pool\n\t\t\tif (n == 0) and (typeA['sym']): #if the starting binder is symmetric, no need to start chains at all its sites\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,-1)\n\t\t\telse:\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,depth)\n\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsB))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsB\n\t\t\n\t\tdepth+=1\n\t\t\n\t\t# add an additional component to all the previously modified chains\n\t\tnewChainsA = []\n\t\tfor thisChain in newChainsB:\n\n\t\t\topenSites = makeSiteList(thisChain,typeA,depth-1)\n\t\t\tnewChainsA = newChainsA + fillSites(openSites,thisChain,typeA,depth)\n\t\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsA))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsA\n\n\treturn allChains", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HState2CProcDef, self).__init__(name='HState2CProcDef', num_nodes=0, edges=[])\n \n \n # Set the graph attributes\n self[\"mm__\"] = ['HimesisMM']\n \n self[\"name\"] = \"\"\"State2CProcDef\"\"\"\n self[\"GUID__\"] = uuid.uuid3(uuid.NAMESPACE_DNS,'State2CProcDef')\n \n # match model. We only support one match model\n self.add_node()\n self.vs[0][\"mm__\"] = \"\"\"MatchModel\"\"\"\n \n # apply model node\n self.add_node()\n self.vs[1][\"mm__\"] = \"\"\"ApplyModel\"\"\"\n \n # paired with relation between match and apply models\n self.add_node()\n self.vs[2][\"mm__\"] = \"\"\"paired_with\"\"\"\n \n \n # match class State() node\n self.add_node()\n\n self.vs[3][\"mm__\"] = \"\"\"State\"\"\" \n self.vs[3][\"attr1\"] = \"\"\"+\"\"\" \n # match_contains node for class State()\n self.add_node()\n self.vs[4][\"mm__\"] = \"\"\"match_contains\"\"\"\n # match class Transition() node\n self.add_node()\n\n self.vs[5][\"mm__\"] = \"\"\"Transition\"\"\" \n self.vs[5][\"attr1\"] = \"\"\"1\"\"\" \n # match_contains node for class Transition()\n self.add_node()\n self.vs[6][\"mm__\"] = \"\"\"match_contains\"\"\"\n # match class EntryPoint() node\n self.add_node()\n\n self.vs[7][\"mm__\"] = \"\"\"EntryPoint\"\"\" \n self.vs[7][\"attr1\"] = \"\"\"1\"\"\" \n # match_contains node for class EntryPoint()\n self.add_node()\n self.vs[8][\"mm__\"] = \"\"\"match_contains\"\"\"\n # match class StateMachine() node\n self.add_node()\n\n self.vs[9][\"mm__\"] = \"\"\"StateMachine\"\"\" \n self.vs[9][\"attr1\"] = \"\"\"1\"\"\" \n # match_contains node for class StateMachine()\n self.add_node()\n self.vs[10][\"mm__\"] = \"\"\"match_contains\"\"\"\n \n \n # apply class LocalDef() node\n self.add_node()\n\n self.vs[11][\"mm__\"] = \"\"\"LocalDef\"\"\" \n self.vs[11][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class LocalDef()\n self.add_node()\n self.vs[12][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class ProcDef() node\n self.add_node()\n\n self.vs[13][\"mm__\"] = \"\"\"ProcDef\"\"\" \n self.vs[13][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class ProcDef()\n self.add_node()\n self.vs[14][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[15][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[15][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class Name()\n self.add_node()\n self.vs[16][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[17][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[17][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class Name()\n self.add_node()\n self.vs[18][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[19][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[19][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class Name()\n self.add_node()\n self.vs[20][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[21][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[21][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class Name()\n self.add_node()\n self.vs[22][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class ConditionSet() node\n self.add_node()\n\n self.vs[23][\"mm__\"] = \"\"\"ConditionSet\"\"\" \n self.vs[23][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class ConditionSet()\n self.add_node()\n self.vs[24][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class Inst() node\n self.add_node()\n\n self.vs[25][\"mm__\"] = \"\"\"Inst\"\"\" \n self.vs[25][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class Inst()\n self.add_node()\n self.vs[26][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[27][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[27][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class Name()\n self.add_node()\n self.vs[28][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[29][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[29][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class Name()\n self.add_node()\n self.vs[30][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[31][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[31][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class Name()\n self.add_node()\n self.vs[32][\"mm__\"] = \"\"\"apply_contains\"\"\"\n # apply class Name() node\n self.add_node()\n\n self.vs[33][\"mm__\"] = \"\"\"Name\"\"\" \n self.vs[33][\"attr1\"] = \"\"\"1\"\"\"\n # apply_contains node for class Name()\n self.add_node()\n self.vs[34][\"mm__\"] = \"\"\"apply_contains\"\"\"\n \n \n # match association State--initialTransition-->Transition node\n self.add_node()\n self.vs[35][\"attr1\"] = \"\"\"initialTransition\"\"\"\n self.vs[35][\"mm__\"] = \"\"\"directLink_S\"\"\"\n # match association Transition--dest-->EntryPoint node\n self.add_node()\n self.vs[36][\"attr1\"] = \"\"\"dest\"\"\"\n self.vs[36][\"mm__\"] = \"\"\"directLink_S\"\"\"\n # match association EntryPoint--owningStateMachine-->StateMachine node\n self.add_node()\n self.vs[37][\"attr1\"] = \"\"\"owningStateMachine\"\"\"\n self.vs[37][\"mm__\"] = \"\"\"directLink_S\"\"\"\n \n # apply association LocalDef--def-->ProcDef node\n self.add_node()\n self.vs[38][\"attr1\"] = \"\"\"def\"\"\"\n self.vs[38][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association ProcDef--channelNames-->Name node\n self.add_node()\n self.vs[39][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[39][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association ProcDef--channelNames-->Name node\n self.add_node()\n self.vs[40][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[40][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association ProcDef--channelNames-->Name node\n self.add_node()\n self.vs[41][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[41][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association ProcDef--channelNames-->Name node\n self.add_node()\n self.vs[42][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[42][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association ProcDef--p-->ConditionSet node\n self.add_node()\n self.vs[43][\"attr1\"] = \"\"\"p\"\"\"\n self.vs[43][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association ConditionSet--alternative-->Inst node\n self.add_node()\n self.vs[44][\"attr1\"] = \"\"\"alternative\"\"\"\n self.vs[44][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[45][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[45][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[46][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[46][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[47][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[47][\"mm__\"] = \"\"\"directLink_T\"\"\"\n # apply association Inst--channelNames-->Name node\n self.add_node()\n self.vs[48][\"attr1\"] = \"\"\"channelNames\"\"\"\n self.vs[48][\"mm__\"] = \"\"\"directLink_T\"\"\"\n \n # backward association State---->LocalDef node\n self.add_node()\n\n self.vs[49][\"mm__\"] = \"\"\"backward_link\"\"\"\n \n \n \n \n \n \n # Add the edges\n self.add_edges([\n (0,4), # matchmodel -> match_contains\n (4,3), # match_contains -> match_class State()\n (0,6), # matchmodel -> match_contains\n (6,5), # match_contains -> match_class Transition()\n (0,8), # matchmodel -> match_contains\n (8,7), # match_contains -> match_class EntryPoint()\n (0,10), # matchmodel -> match_contains\n (10,9), # match_contains -> match_class StateMachine()\n (1,12), # applymodel -> apply_contains\n (12,11), # apply_contains -> apply_class LocalDef()\n (1,14), # applymodel -> apply_contains\n (14,13), # apply_contains -> apply_class ProcDef()\n (1,16), # applymodel -> apply_contains\n (16,15), # apply_contains -> apply_class Name()\n (1,18), # applymodel -> apply_contains\n (18,17), # apply_contains -> apply_class Name()\n (1,20), # applymodel -> apply_contains\n (20,19), # apply_contains -> apply_class Name()\n (1,22), # applymodel -> apply_contains\n (22,21), # apply_contains -> apply_class Name()\n (1,24), # applymodel -> apply_contains\n (24,23), # apply_contains -> apply_class ConditionSet()\n (1,26), # applymodel -> apply_contains\n (26,25), # apply_contains -> apply_class Inst()\n (1,28), # applymodel -> apply_contains\n (28,27), # apply_contains -> apply_class Name()\n (1,30), # applymodel -> apply_contains\n (30,29), # apply_contains -> apply_class Name()\n (1,32), # applymodel -> apply_contains\n (32,31), # apply_contains -> apply_class Name()\n (1,34), # applymodel -> apply_contains\n (34,33), # apply_contains -> apply_class Name()\n (3,35), # match_class State() -> association initialTransition\n (35,5), # association initialTransition -> match_class Transition()\n (5,36), # match_class Transition() -> association dest\n (36,7), # association dest -> match_class EntryPoint()\n (7,37), # match_class EntryPoint() -> association owningStateMachine\n (37,9), # association owningStateMachine -> match_class StateMachine()\n (11,38), # apply_class LocalDef() -> association def\n (38,13), # association def -> apply_class ProcDef()\n (13,39), # apply_class ProcDef() -> association channelNames\n (39,15), # association channelNames -> apply_class Name()\n (13,40), # apply_class ProcDef() -> association channelNames\n (40,17), # association channelNames -> apply_class Name()\n (13,41), # apply_class ProcDef() -> association channelNames\n (41,19), # association channelNames -> apply_class Name()\n (13,42), # apply_class ProcDef() -> association channelNames\n (42,21), # association channelNames -> apply_class Name()\n (13,43), # apply_class ProcDef() -> association p\n (43,23), # association p -> apply_class ConditionSet()\n (23,44), # apply_class ConditionSet() -> association alternative\n (44,25), # association alternative -> apply_class Inst()\n (25,45), # apply_class Inst() -> association channelNames\n (45,27), # association channelNames -> apply_class Name()\n (25,46), # apply_class Inst() -> association channelNames\n (46,29), # association channelNames -> apply_class Name()\n (25,47), # apply_class Inst() -> association channelNames\n (47,31), # association channelNames -> apply_class Name()\n (25,48), # apply_class Inst() -> association channelNames\n (48,33), # association channelNames -> apply_class Name()\n (11,49), # apply_class LocalDef() -> backward_association\n (49,3), # backward_association -> apply_class State()\n (0,2), # matchmodel -> pairedwith\n (2,1) # pairedwith -> applyModel\t\t\t\t\n\t\t])\n\n # Add the attribute equations\n self[\"equations\"] = [((3,'isComposite'),('constant','true')), ((11,'ApplyAttribute'),('constant','solveRef')), ((13,'name'),('constant','C')), ((15,'literal'),('constant','enp')), ((17,'literal'),('constant','exit')), ((19,'literal'),('constant','exack')), ((21,'literal'),('constant','sh')), ((23,'ApplyAttribute'),('constant','solveRef')), ((25,'name'),('concat',(('constant','S'),(9,'name')))), ((27,'literal'),('concat',(('constant','A'),('concat',((7,'name'),('constant','A')))))), ((29,'literal'),('constant','exit_in')), ((31,'literal'),('constant','exack_in')), ((33,'literal'),('constant','sh_in')), ]", "def calculate_217f_part_count(**attributes):\n _msg = ''\n\n # Dictionary containing MIL-HDBK-217FN2 parts count base hazard rates.\n # First key is the subcategory_id, second key is the specification id. If\n # the resistor subcategory is NOT specification dependent, then the second\n # key will be zero. Current subcategory IDs are:\n #\n # 1. Fixed, Composition (RC, RCR)\n # 2. Fixed, Film (RL, RLR, RN, RNC, RNN, RNR)\n # 3. Fixed, Film, Power (RD)\n # 4. Fixed, Film, Network (RZ)\n # 5. Fixed, Wirewound, Power (RB, RBR)\n # 6. Fixed, Wirewound, Power, Chassis Mounted (RE, RER)\n # 7. Thermistor\n # 8. Variable, Wirewound (RT, RTR)\n # 9. Variable, Wirewound, Precision (RR)\n # 10. Variable, Wirewound, Semiprecision (RA, RK)\n # 11. Variable, Non-Wirewound (RJ, RJR)\n # 12. Variable, Composition (RV)\n # 13. Variable,Non-Wirewound, Film and Precision (RQ, RVC)\n #\n # These keys return a list of base hazard rates. The hazard rate to use is\n # selected from the list depending on the active environment.\n _dic_lambda_b = {\n 1: [\n 0.0005, 0.0022, 0.0071, 0.0037, 0.012, 0.0052, 0.0065, 0.016,\n 0.025, 0.025, 0.00025, 0.0098, 0.035, 0.36\n ],\n 2: {\n 1: [\n 0.0012, 0.0027, 0.011, 0.0054, 0.020, 0.0063, 0.013, 0.018,\n 0.033, 0.030, 0.00025, 0.014, 0.044, 0.69\n ],\n 2: [\n 0.0012, 0.0027, 0.011, 0.0054, 0.020, 0.0063, 0.013, 0.018,\n 0.033, 0.030, 0.00025, 0.014, 0.044, 0.69\n ],\n 3: [\n 0.0014, 0.0031, 0.013, 0.0061, 0.023, 0.0072, 0.014, 0.021,\n 0.038, 0.034, 0.00028, 0.016, 0.050, 0.78\n ],\n 4: [\n 0.0014, 0.0031, 0.013, 0.0061, 0.023, 0.0072, 0.014, 0.021,\n 0.038, 0.034, 0.00028, 0.016, 0.050, 0.78\n ]\n },\n 3: [\n 0.012, 0.025, 0.13, 0.062, 0.21, 0.078, 0.10, 0.19, 0.24, 0.32,\n 0.0060, 0.18, 0.47, 8.2\n ],\n 4: [\n 0.0023, 0.0066, 0.031, 0.013, 0.055, 0.022, 0.043, 0.077, 0.15,\n 0.10, 0.0011, 0.055, 0.15, 1.7\n ],\n 5: [\n 0.0085, 0.018, 0.10, 0.045, 0.16, 0.15, 0.17, 0.30, 0.38, 0.26,\n 0.0068, 0.13, 0.37, 5.4\n ],\n 6: {\n 1: [\n 0.014, 0.031, 0.16, 0.077, 0.26, 0.073, 0.15, 0.19, 0.39, 0.42,\n 0.0042, 0.21, 0.62, 9.4\n ],\n 2: [\n 0.013, 0.028, 0.15, 0.070, 0.24, 0.065, 0.13, 0.18, 0.35, 0.38,\n 0.0038, 0.19, 0.56, 8.6\n ]\n },\n 7: [\n 0.008, 0.18, 0.096, 0.045, 0.15, 0.044, 0.088, 0.12, 0.24, 0.25,\n 0.004, 0.13, 0.37, 5.5\n ],\n 8: [\n 0.065, 0.32, 1.4, 0.71, 1.6, 0.71, 1.9, 1.0, 2.7, 2.4, 0.032, 1.3,\n 3.4, 62.0\n ],\n 9: [\n 0.025, 0.055, 0.35, 0.15, 0.58, 0.16, 0.26, 0.35, 0.58, 1.1, 0.013,\n 0.52, 1.6, 24.0\n ],\n 10: [\n 0.33, 0.73, 7.0, 2.9, 12.0, 3.5, 5.3, 7.1, 9.8, 23.0, 0.16, 11.0,\n 33.0, 510.0\n ],\n 11: [\n 0.15, 0.35, 3.1, 1.2, 5.4, 1.9, 2.8, 0.0, 0.0, 9.0, 0.075, 0.0,\n 0.0, 0.0\n ],\n 12: [\n 0.15, 0.34, 2.9, 1.2, 5.0, 1.6, 2.4, 0.0, 0.0, 7.6, 0.076, 0.0,\n 0.0, 0.0\n ],\n 13: [\n 0.043, 0.15, 0.75, 0.35, 1.3, 0.39, 0.78, 1.8, 2.8, 2.5, 0.21, 1.2,\n 3.7, 49.0\n ],\n 14: [\n 0.05, 0.11, 1.1, 0.45, 1.7, 2.8, 4.6, 4.6, 7.5, 3.3, 0.025, 1.5,\n 4.7, 67.0\n ],\n 15: [\n 0.048, 0.16, 0.76, 0.36, 1.3, 0.36, 0.72, 1.4, 2.2, 2.3, 0.024,\n 1.2, 3.4, 52.0\n ]\n }\n\n # List containing piQ values for parts count method. The list positions\n # corrspond to the following quality levels:\n #\n # 0. Established reliability level S\n # 1. Established reliability level R\n # 2. Established reliability level P\n # 3. Established reliability level M\n # 4. Non-established reliability MIL-SPEC\n # 5. Non-established reliability lower\n #\n # The quality_id attribute is used to select the proper value of piQ.\n _lst_piQ = [0.030, 0.10, 0.30, 1.0, 3.0, 10.0]\n\n # Select the base hazard rate.\n try:\n if attributes['subcategory_id'] in [2, 6]:\n _lst_base_hr = _dic_lambda_b[attributes['subcategory_id']][\n attributes['specification_id']]\n else:\n _lst_base_hr = _dic_lambda_b[attributes['subcategory_id']]\n except KeyError:\n _lst_base_hr = [0.0]\n\n try:\n attributes['lambda_b'] = _lst_base_hr[\n attributes['environment_active_id'] - 1]\n except IndexError:\n attributes['lambda_b'] = 0.0\n\n # Select the piQ.\n try:\n attributes['piQ'] = _lst_piQ[attributes['quality_id'] - 1]\n except IndexError:\n attributes['piQ'] = 0.0\n\n # Confirm all inputs are within range. If not, set the message. The\n # hazard rate will be calculated anyway, but will be zero.\n if attributes['lambda_b'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: Base hazard rate is 0.0 when ' \\\n 'calculating resistor, hardware ID: ' \\\n '{0:d}, subcategory ID: {1:d}, specification ID: {2:d}, ' \\\n 'active environment ID: {3:d}, and quality ID: ' \\\n '{4:d}.\\n'.format(attributes['hardware_id'],\n attributes['subcategory_id'],\n attributes['specification_id'],\n attributes['environment_active_id'],\n attributes['quality_id'])\n\n if attributes['piQ'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piQ is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}, quality ID: ' \\\n '{1:d}.'.format(attributes['hardware_id'],\n attributes['quality_id'])\n\n # Calculate the hazard rate.\n attributes['hazard_rate_active'] = (\n attributes['lambda_b'] * attributes['piQ'])\n\n return attributes, _msg", "def identity_block(self,X,stage,block):\n conv_name_base='res'+str(stage)+block+'_branch'\n bn_name_base='bn'+str(stage)+block+'_branch'\n \n #retrieve the filters\n #F1,F2,F3=filters\n \n X_shortcut=X\n \n #first component of main path\n X=Conv3D(self.filter, (3,3,1),kernel_initializer=self.init,kernel_regularizer=self.regularizer,padding='same')(X)\n X=BatchNormalization(axis=3)(X)\n X=Activation('relu')(X)\n \n #second component of main path\n X=Conv3D(self.filter, (3,3,1),kernel_initializer=self.init,kernel_regularizer=self.regularizer,padding='same')(X)\n\n \n #final step, add the shortcut\n X=Add()([X,X_shortcut])\n X=Activation('relu')(X)\n \n \n return X", "def run(self, voxels, entry='all', freq_raw=False):\n\n # Note. in this case we are processing all raw data into the data \n # attribute, so despite having multiple raw FIDs, we are really \n # only processing one voxel, so no for loop\n\n # local reference to input data\n self.raw = self._dataset.get_source_data('prep')\n\n # Choose voxel - for saving result for current single voxel plot\n self.voxel = voxels[0]\n\n # select the chain processing functor based on the entry point\n if entry == 'all':\n funct_fidsum_wbnaa.do_processing_all(self)\n else:\n print('oooops!')\n\n # save data and parameter results into the Block results arrays\n self._block.data[0,0,0,:] = self.time_summed_offset.copy()\n \n # Return values specific to calling Tab that contains this Block.Chain\n # Used to update its self.view (plot_panel_spectrum object).\n\n plot_results = { 'freq_current' : self.freq_current.copy(),\n 'freq_summed' : self.freq_summed.copy(),\n 'freq_summed_offset' : self.freq_summed_offset.copy() }\n \n return plot_results", "def dumpData(self,out,index):\n #--SCVR\n out.pack('4siBB2sB',\n 'SCVR', 5+len(self.text), index+48, self.type, self.func, self.oper)\n if self.text: out.write(self.text)\n #--Value\n if isinstance(self.value,int):\n out.packSub('INTV','i', self.value)\n else:\n out.packSub('FLTV','f', self.value)", "def __init__(self,config = None):\n \n self.join_path = join_path\n self.label_path = cfg['labels_path']\n self.pick_path = (cfg['result_path'] + cfg['pickle_path'])\n self.label_dir = os.path.join(CWD_PATH,self.join_path, self.label_path)\n\n #Variables inherent to the Fluent data: \n self.num_ins = 4\n\n self.scale_var = cfg['scale_var']\n # User set values are below. These can be adjusted in config.yml \n self.MSE_thresh1 = (cfg['thresh1']*self.scale_var)**2\n self.MSE_thresh2 = (cfg['thresh2']*self.scale_var)**2\n self.MSE_thresh3 = (cfg['thresh3']*self.scale_var)**2\n \n self.rew_goal = cfg['reward'] * self.scale_var\n\n self.noise = cfg['noise']\n self.minmaxbuffer = cfg['minmaxbuffer']\n\n # Get the function of input-output mapping, and max & min:\n [self.O_CH4_flow_uniformity, mins,maxes] = self.get_funcs('O_CH4_flow_uniformity')\n [self.O_CH4_mol_frac, mins,maxes] = self.get_funcs('O_CH4_mol_frac')\n [self.O_t, mins, maxes] = self.get_funcs('O_t')\n \n self.mins = mins# * self.scale_var\n self.maxes = maxes#* self.scale_var\n #Action range is a percentage of the total range\n self.action_range = cfg['action_range']*self.scale_var\n\n #Action space is the up & down range for the 4 actions \n self.action_space = Box(-self.action_range, self.action_range, shape=(self.num_ins,), dtype=np.float32)\n\n # For ref, this is a 10d state space:\n #in: 1 ch4 flow, 2 ch4 t, 3 o2 flow, 4 o2 t,\n #out: 5 flow unif, 6 mol frac, 7 temp\n #out - target: 8 flow unif, 9 mol frac, 10 temp\n \n self.observation_space = Tuple((Box(self.mins.values[0],self.maxes.values[0],shape=(1,), dtype=np.float32),\n Box(self.mins.values[1],self.maxes.values[1],shape=(1,), dtype=np.float32),\n Box(self.mins.values[2],self.maxes.values[2],shape=(1,), dtype=np.float32),\n Box(self.mins.values[3],self.maxes.values[3],shape=(1,), dtype=np.float32),\n Box(self.mins.values[4],self.maxes.values[4],shape=(1,), dtype=np.float32),\n Box(self.mins.values[5],self.maxes.values[5],shape=(1,), dtype=np.float32),\n Box(self.mins.values[6],self.maxes.values[6],shape=(1,), dtype=np.float32),\n Box(self.mins.values[4],self.maxes.values[4],shape=(1,), dtype=np.float32),\n Box(self.mins.values[5],self.maxes.values[5],shape=(1,), dtype=np.float32),\n Box(self.mins.values[6],self.maxes.values[6],shape=(1,), dtype=np.float32)))\n \n # TODO this isn't really a proper gym spec\n self._spec = lambda: None\n self._spec.id = \"AllVar-v0\"\n \n # For rendering:\n self.viewer = None\n self.labels = cfg['labels']\n \n #initialize variables for tracking:\n self.episode = 0\n self.reward = 0\n self.reset()", "def compile_xilinx_graph(self):\n pass", "def levelsets_to_vector_field(levelsets, stepsize):\r\n vector_field_shape = levelsets[0][0].shape\r\n y_comp_combined = np.ndarray(vector_field_shape)\r\n x_comp_combined = np.ndarray(vector_field_shape)\r\n y_comp_combined.fill(np.nan)\r\n x_comp_combined.fill(np.nan)\r\n\r\n for source, target in levelsets:\r\n labels_present = set(np.array([source.flatten(),target.flatten()]).flatten())\r\n labels_present.remove(0)#relates to background\r\n\r\n #print(labels_present)\r\n for l in labels_present:\r\n\r\n source_cluster = source == l\r\n target_cluster = target == l\r\n\r\n\r\n \"\"\"plt.imshow(source_cluster.astype(np.int32)+target_cluster.astype(np.int32))\r\n plt.show()\r\n print(\"-----------\")\"\"\"\r\n\r\n #plot_gradient_field(source_cluster.astype(np.int32), target_cluster.astype(np.int32))\r\n\r\n y_comp, x_comp = array_to_vector_field(source_cluster, target_cluster, stepsize=stepsize)\r\n y_comp_combined[~np.isnan(y_comp)] = y_comp[~np.isnan(y_comp)]\r\n x_comp_combined[~np.isnan(x_comp)] = x_comp[~np.isnan(x_comp)]\r\n return y_comp_combined, x_comp_combined", "def convert_propbank(detail=True):\n\n out_dir = \"../data/wsj_propbank/\"\n os.system(\"rm -rf %s\" % (out_dir, ))\n os.system(\"mkdir -p %s\" % (out_dir, ))\n\n pb_instances = propbank.instances()\n # Count at first\n verb2idx = {}\n verb2frames = {}\n for i in range(0, len(pb_instances)):\n inst = pb_instances[i]\n verb_lemma, frame = inst.roleset.split(\".\")\n if verb_lemma not in verb2idx:\n verb2idx[verb_lemma] = []\n verb2idx[verb_lemma].append(i)\n if verb_lemma not in verb2frames:\n verb2frames[verb_lemma] = []\n if frame not in verb2frames[verb_lemma]:\n verb2frames[verb_lemma].append(frame)\n verb_nums = len(verb2idx.keys())\n verb_counter = 0\n\n pair_label = {'-LRB-':'(', '-RRB-':')', '-LCB-':'(', '-RCB-':')'}\n for verb_lemma, idxs in verb2idx.items():\n verb_counter += 1\n if len(verb2frames[verb_lemma]) < 2:\n continue\n fh = open(\"%s/%s\" % (out_dir, verb_lemma), \"w\")\n if detail:\n print(\"processing %s(%s/%s)\"\n % (verb_lemma, verb_counter, verb_nums))\n for i in idxs:\n inst = pb_instances[i]\n fileid = inst.fileid\n sent_num = inst.sentnum\n verb_pos = inst.wordnum\n verb_lemma, frame = inst.roleset.split(\".\")\n section = [x for x in fileid if x.isdigit()][0:2]\n section = \"\".join(section)\n fileid_for_ptb = \"WSJ/%s/%s\" % (section, fileid.upper())\n\n tagged_sent = ptb.tagged_sents(fileid_for_ptb)[sent_num]\n # Change tagged_sent from [tuples] to [list]\n tagged_sent = [[x[0], x[1]]for x in tagged_sent]\n verb_bak = tagged_sent[verb_pos][0]\n verb_identifier = \"verb_identifier_xxxxx\"\n tagged_sent[verb_pos][0] = verb_identifier\n sent = []\n for (token, tag)in tagged_sent:\n if tag != '-NONE-':\n if token in pair_label:\n token = pair_label[token]\n sent.append(token)\n sent = \" \".join(sent)\n sent_toks = nltk.sent_tokenize(sent)\n candidate_sent = None\n for sent_tok in sent_toks:\n if sent_tok.find(verb_identifier) >= 0:\n candidate_sent = sent_tok\n left_sent, right_sent = candidate_sent.split(verb_identifier)\n left_sent = left_sent.strip()\n right_sent = right_sent.strip()\n out_line = \"%s\\t%s\\t%s\\t%s\" % (frame, left_sent, verb_bak, right_sent)\n out_line = remove_punctuations(out_line)\n print(out_line, file=fh)\n fh.close()", "def render(self): # pragma: no cover\n from graphviz import Digraph\n dot = Digraph(name=\"top\")\n for block in self.blocks:\n if isinstance(block, Branch):\n label = \"if \" + astor.to_source(block.cond)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"invhouse\"})\n elif isinstance(block, Yield):\n label = astor.to_source(block.value)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"oval\"})\n elif isinstance(block, BasicBlock):\n label = \"\\n\".join(astor.to_source(stmt).rstrip() for stmt in block.statements)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"box\"})\n elif isinstance(block, HeadBlock):\n label = \"Initial\"\n dot.node(str(id(block)) + \"_start\", label.rstrip(), {\"shape\": \"doublecircle\"})\n label = \"\\n\".join(astor.to_source(stmt).rstrip() for stmt in block.initial_statements)\n # label += \"\\nLive Ins : \" + str(block.live_ins)\n # label += \"\\nLive Outs : \" + str(block.live_outs)\n # label += \"\\nGen : \" + str(block.gen)\n # label += \"\\nKill : \" + str(block.kill)\n dot.node(str(id(block)), label.rstrip(), {\"shape\": \"box\"})\n dot.edge(str(id(block)) + \"_start\", str(id(block)))\n else:\n raise NotImplementedError(type(block))\n # for source, sink, label in self.edges:\n for sink, label in block.outgoing_edges:\n dot.edge(str(id(block)), str(id(sink)), label)\n\n\n file_name = tempfile.mktemp(\"gv\")\n dot.render(file_name, view=True)\n # with open(\"cfg.dot\", \"w\") as file:\n # file.write(dot.source)\n # exit()", "def process_tree(tree):\n c = circuit()\n l = line()\n names = {}\n procedures = []\n for lst in tree.children:\n print(lst)\n if type(lst[0]) is str:\n names[lst[0]] = lst[1]\n else:\n procedures.append(lst)\n print(names)\n #print(procedures)\n\n for proc in procedures:\n\n proc_elements_names = proc[0]\n proc_name = proc[1]\n\n #print(proc_elements_names)\n #print(proc_name)\n\n if proc_name == \"set_mode\":\n mode_name = proc_elements_names[0]\n if mode_name != \"draw-mode\": \n c.set_mode(mode_name)\n elif mode_name == \"draw-mode\":\n l1 = line()\n # draw mode is different from other modes\n for element in names:\n e = CompleteElement(element)\n e.set_other_attrs(names[element])\n e.process_other_attrs()\n l1.addElement(e)\n c.connectInSeries(l1)\n c.set_mode(\"draw-mode\")\n \n \n if proc_name == \"series\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n l = l1\n c.connectInSeries(l)\n #raise SyntaxError(\"Alias {0} referrenced before assignment\".format(item[0]))\n\n elif proc_name == \"parallel\":\n l1 = line()\n for element in proc_elements_names:\n l1.addElement(names[element])\n c.connectInParallel(l1)\n l1 = line()\n\n\n elif proc_name == \"add_parallel\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n l1 = line()\n l1.addElement(names[new_element])\n c.connection.append(l1)\n\n\n elif proc_name == \"add_series\":\n new_element = proc_elements_names[1]\n old_element = proc_elements_names[0]\n for ln in c.connection:\n for e in ln.elements:\n if names[old_element] == e:\n ln.addElement(names[new_element])\n\n\n c.evaluate(\"output.png\")\n #print(c)", "def forward(self, Ca, mask, residue_idx, chain_labels):\n if self.augment_eps > 0:\n Ca = Ca + self.augment_eps * torch.randn_like(Ca)\n\n D_neighbors, E_idx, mask_neighbors = self._dist(Ca, mask)\n\n Ca_0 = torch.zeros(Ca.shape, device=Ca.device)\n Ca_2 = torch.zeros(Ca.shape, device=Ca.device)\n Ca_0[:,1:,:] = Ca[:,:-1,:]\n Ca_1 = Ca\n Ca_2[:,:-1,:] = Ca[:,1:,:]\n\n V, O_features = self._orientations_coarse(Ca, E_idx)\n \n RBF_all = []\n RBF_all.append(self._rbf(D_neighbors)) #Ca_1-Ca_1\n RBF_all.append(self._get_rbf(Ca_0, Ca_0, E_idx)) \n RBF_all.append(self._get_rbf(Ca_2, Ca_2, E_idx))\n\n RBF_all.append(self._get_rbf(Ca_0, Ca_1, E_idx))\n RBF_all.append(self._get_rbf(Ca_0, Ca_2, E_idx))\n\n RBF_all.append(self._get_rbf(Ca_1, Ca_0, E_idx))\n RBF_all.append(self._get_rbf(Ca_1, Ca_2, E_idx))\n\n RBF_all.append(self._get_rbf(Ca_2, Ca_0, E_idx))\n RBF_all.append(self._get_rbf(Ca_2, Ca_1, E_idx))\n\n\n RBF_all = torch.cat(tuple(RBF_all), dim=-1)\n\n\n offset = residue_idx[:,:,None]-residue_idx[:,None,:]\n offset = gather_edges(offset[:,:,:,None], E_idx)[:,:,:,0] #[B, L, K]\n\n d_chains = ((chain_labels[:, :, None] - chain_labels[:,None,:])==0).long()\n E_chains = gather_edges(d_chains[:,:,:,None], E_idx)[:,:,:,0]\n E_positional = self.embeddings(offset.long(), E_chains)\n E = torch.cat((E_positional, RBF_all, O_features), -1)\n \n\n E = self.edge_embedding(E)\n E = self.norm_edges(E)\n \n return E, E_idx", "def trans_setup():\n # slot7 slot6 slot5 slot4 slot3 slot2 slot1 <------ beam direction (slot 8 is currently B-fiber only)\n # Be Be Be Be Be Be Be lens material\n # 1.5 1.5 0.5 0.5 0.5 0.5 0.5 lens radius [mm]\n # 1 1 5 8 4 2 1 number of lenses\n lens_R=[0.5,0.5,0.5,0.5,0.5,1.5,1.5]\n lens_mat=['Be','Be','Be','Be','Be','Be','Be']\n lens_N=[1,2,4,8,5,1,1]\n trans_pos=[35.2,35.8]\n return {'lens_material':lens_mat,'lens_number':lens_N,'lens_radius':lens_R,'trans_position':trans_pos}", "def serialize(self):\n p = []\n p.append(self.particleCode)\n if self.particleCode == 'manual':\n p.append(str(self.particleMass))\n p.append(str(self.particleCharge))\n p.append(str(self.x0))\n p.append(str(self.y0))\n p.append(str(self.z0))\n if self.useKineticEnergy:\n p.append('Y')\n p.append(str(self.kineticEnergy))\n else:\n p.append('n')\n p.append(str(self.vx0))\n p.append(str(self.vy0))\n p.append(str(self.vz0))\n p.append(self.fieldCode)\n if self.fieldCode == 'Drift':\n p.append(str(self.fieldBaseStrength[0]))\n p.append(str(self.fieldBaseStrength[1]))\n p.append(str(self.fieldBaseStrength[2]))\n p.append(str(self.fieldGradient[0]))\n elif self.fieldCode == 'Smooth':\n p.append(str(self.fieldBaseStrength[0]))\n p.append(str(self.fieldGradient[0]))\n p.append(str(self.fieldGradient[1]))\n elif self.fieldCode == 'Sharp':\n p.append(str(self.fieldBaseStrength[0]))\n p.append(str(self.fieldGradient[0]))\n p.append(str(self.fieldGradient[1]))\n p.append(str(self.fieldLength))\n elif self.fieldCode == 'Sine':\n p.append(str(self.fieldBaseStrength[0]))\n p.append(str(self.fieldGradient[0]))\n p.append(str(self.fieldGradient[1]))\n p.append(str(self.fieldLength))\n p.append(str(self.fieldFreq))\n elif self.fieldCode == 'Helix':\n p.append(str(self.fieldBaseStrength[0]))\n p.append(str(self.fieldBaseStrength[1]))\n p.append(str(self.fieldGradient[0]))\n p.append(str(self.fieldGradient[1]))\n p.append(str(self.fieldGradient[2]))\n p.append(str(self.fieldLength))\n p.append(str(self.fieldFreq))\n elif self.fieldCode == 'Tokamak':\n p.append(str(self.fieldBaseStrength[0]))\n p.append(str(self.fieldBaseStrength[1]))\n p.append(str(self.fieldGradient[0]))\n p.append(str(self.fieldGradient[1]))\n p.append(str(self.fieldGradient[2]))\n p.append(str(self.fieldGradient[3]))\n p.append(str(self.fieldGradient[4]))\n p.append(str(self.fieldLength))\n p.append(str(self.fieldFreq))\n else:\n for val in self.fieldBaseStrength:\n p.append(str(val))\n p.append(str(self.initialTime))\n p.append(str(self.endTime))\n p.append(str(self.timeStep))\n p.append(str(self.tolerance))\n return p", "def fst_tostring(self, fst_1, idx=False):\n\n fst_string = 'Transducer\\n'\n for state in fst_1.states:\n for arc in state.arcs:\n olabel = self.wmap[arc.olabel].encode('utf-8') if not idx and arc.olabel in self.wmap else arc.olabel\n\n fst_string += '{} -> {} / {} : {} / {}\\n'.format(state.stateid, arc.nextstate, arc.ilabel, olabel,\n float(arc.weight))\n\n if state.final:\n fst_string += '%s / %s\\n' % (state.stateid, state.final)\n\n fst_string += '-------\\n'\n\n return fst_string", "def chainLabel(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n cdef char label[2]\n label[0] = freesasa_structure_atom_chain(self._c_structure,i)\n label[1] = '\\0'\n return label", "def createAddOrSelectLabelMapNode(self, script=False):\r\n # productive\r\n profprint()\r\n print \"creating label map for working intensity volume\"\r\n # create, select label map\r\n volLogic = slicer.modules.volumes.logic()\r\n sliceLogic = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic()\r\n vn = sliceLogic.GetBackgroundLayer().GetVolumeNode()\r\n self.labelMapNode = slicer.util.getNode(vn.GetName() + \"-label\")\r\n if not self.labelMapNode:\r\n self.labelMapNode = volLogic.CreateAndAddLabelVolume(slicer.mrmlScene, vn, vn.GetName() + \"-label\")\r\n # select label volume\r\n if not script: #TODO guess there is a bug here (at least while testing with parSearch): also changes the main volume!!\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetReferenceActiveLabelVolumeID(self.labelMapNode.GetID())\r\n #slicer.app.applicationLogic().PropagateVolumeSelection() #<<<this line causes unpredictable volume switching\r\n #set half transparency\r\n scRed=slicer.app.layoutManager().sliceWidget(\"Red\").sliceController()\r\n scRed.setLabelMapOpacity(.5)\r\n scYel=slicer.app.layoutManager().sliceWidget(\"Yellow\").sliceController()\r\n scYel.setLabelMapOpacity(.5)\r\n scGrn=slicer.app.layoutManager().sliceWidget(\"Green\").sliceController()\r\n scGrn.setLabelMapOpacity(.5)\r\n #enable label map outline display mode\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\r\n if sRed == None :\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\r\n sRed.SetUseLabelOutline(1)\r\n sYel = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\r\n if sYel == None :\r\n sYel = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\r\n sYel.SetUseLabelOutline(1)\r\n sGrn = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGrn == None :\r\n sGrn = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n sGrn.SetUseLabelOutline(1)\r\n self.editorWidget.setMasterNode(vn)\r\n self.editorWidget.setMergeNode(self.labelMapNode)", "def results(self):\n self.report('workchain succesfully completed')\n self.out('scf_parameters', self.ctx.workchain_scf.out.output_parameters)\n self.out('band_parameters', self.ctx.workchain_bands.out.output_parameters)\n self.out('band_structure', self.ctx.workchain_bands.out.output_band)\n\n if 'group' in self.inputs:\n output_band = self.ctx.workchain_bands.out.output_band\n group, _ = Group.get_or_create(name=self.inputs.group.value)\n group.add_nodes(output_band)\n self.report(\"storing the output_band<{}> in the group '{}'\"\n .format(output_band.pk, self.inputs.group.value))", "def pack_firmware(self, work_dir, jobclient, ro, rw, version_string=\"\"):\n dts_file_path = ro / \"zephyr\" / \"zephyr.dts\"\n\n # Copy the inputs into the work directory so that Binman can\n # find them under a hard-coded name.\n shutil.copy2(ro / \"zephyr\" / self.ro_file, work_dir / \"zephyr_ro.bin\")\n shutil.copy2(rw / \"zephyr\" / self.rw_file, work_dir / \"zephyr_rw.bin\")\n\n # Version in FRID/FWID can be at most 31 bytes long (32, minus\n # one for null character).\n if len(version_string) > 31:\n version_string = version_string[:31]\n\n proc = jobclient.popen(\n [\n \"binman\",\n \"-v\",\n \"5\",\n \"build\",\n \"-a\",\n \"version={}\".format(version_string),\n \"-d\",\n dts_file_path,\n \"-m\",\n \"-O\",\n work_dir,\n ],\n cwd=work_dir,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding=\"utf-8\",\n )\n\n zmake.multiproc.log_output(self.logger, logging.DEBUG, proc.stdout)\n zmake.multiproc.log_output(self.logger, logging.ERROR, proc.stderr)\n if proc.wait(timeout=60):\n raise OSError(\"Failed to run binman\")\n\n yield work_dir / \"zephyr.bin\", \"zephyr.bin\"\n yield ro / \"zephyr\" / \"zephyr.elf\", \"zephyr.ro.elf\"\n yield rw / \"zephyr\" / \"zephyr.elf\", \"zephyr.rw.elf\"", "def _add_label_switching_node( self,\n node_tree,\n label_vec,\n last_element,\n label_ID_node=None,\n node_index=0,\n uv_map=None, \n node_offset=[0,0]):\n\n # define local variables #######################################################################################\n _step_node_width = 200 # x seperation of nodes\n _step_node_height = 200 # y seperation of nodes\n ################################################################################ end of define local variables #\n\n # create image ID handle #######################################################################################\n if label_ID_node is None:\n label_ID_node = node_tree.node_tree.nodes.new(\"ShaderNodeValue\")\n label_ID_node.location = ((node_offset[0]-400,node_offset[1]-100))\n label_ID_node.name = \"label_step_ID\"\n label_ID_node.label = \"label_step_ID\"\n label_ID_node.outputs[0].default_value = 1\n ############################################################################### end of create image ID handle #\n\n # create image nodes ###########################################################################################\n _x_offset = (node_index+1)*_step_node_width + node_offset[0]\n _y_offset = (node_index+1)*_step_node_height + node_offset[1]\n\n _semantic_node_offset = [(node_index+1)*_step_node_width*2 + node_offset[0]-1000,(node_index+1)*\\\n _step_node_height + node_offset[1]+200]\n\n _semantic_tree, self._semantic_pass_id = self.create_semantic_nodes(node_tree=self._world_node_tree,\n label_ID_vec=label_vec,\n num_label_per_channel=15, # TODO add in script\n env_mode=True,\n uv_map=uv_map,\n node_offset=_semantic_node_offset)\n\n _semantic_tree.inputs[0].default_value = 1\n\n # create new mix node ######################################################################################\n _current_mix_shader_node = node_tree.node_tree.nodes.new(\"ShaderNodeMixRGB\")\n _current_mix_shader_node.location = (((node_index+1)*_step_node_width*2 + node_offset[0],\n (node_index+1)*_step_node_height + node_offset[1]))\n ############################################################################### end of create new mix node #\n\n # create compare node ######################################################################################\n _current_compare_node = node_tree.node_tree.nodes.new(\"ShaderNodeMath\")\n _current_compare_node.location = (((node_index+1)*_step_node_width*2 + node_offset[0],\n node_offset[1]-_step_node_height))\n _current_compare_node.operation = 'COMPARE'\n _current_compare_node.inputs[0].default_value = node_index\n _current_compare_node.inputs[2].default_value = 0 # delta value should be zero for equal comparison\n ############################################################################### end of create compare node #\n\n\n # link nodes togther #######################################################################################\n node_tree.node_tree.links.new(_current_mix_shader_node.inputs[0], _current_compare_node.outputs[0])\n if last_element is not None:\n node_tree.node_tree.links.new(_current_mix_shader_node.inputs[1], last_element.outputs[0])\n node_tree.node_tree.links.new(_current_mix_shader_node.inputs[2], _semantic_tree.outputs[0])\n \n node_tree.node_tree.links.new(_current_compare_node.inputs[1], label_ID_node.outputs[0])\n ################################################################################ end of link nodes togther #\n #################################################################################### end of create image nodes #\n\n # return last mix shader node\n return _current_mix_shader_node, label_ID_node", "def main(self, case, profile):\n case[\"branch\"][:, BR_STATUS] = ones(case[\"branch\"].shape[0])\n mpc = ext2int(case)\n baseMVA, bus, gen, branch, gencost = mpc[\"baseMVA\"], mpc[\"bus\"], mpc[\"gen\"], mpc[\"branch\"], mpc[\"gencost\"]\n\n nb = shape(mpc['bus'])[0] ## number of buses\n nl = shape(mpc['branch'])[0] ## number of branches\n ng = shape(mpc['gen'])[0] ## number of dispatchable injections\n\n f = branch[:, F_BUS] ## list of \"from\" buses\n t = branch[:, T_BUS] ## list of \"to\" buses\n i = range(nl) ## double set of row indices\n # Connection matrix\n Cf = sparse((ones(nl), (i, f)), (nl, nb))\n Ct = sparse((ones(nl), (i, t)), (nl, nb))\n Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng))\n Branch_R = branch[:, BR_R]\n Branch_X = branch[:, BR_X]\n Cf = Cf.T\n Ct = Ct.T\n # Obtain the boundary information\n Slmax = branch[:, RATE_A] / baseMVA\n\n Pij_l = -Slmax\n Qij_l = -Slmax\n Iij_l = zeros(nl)\n Vm_l = bus[:, VMIN] ** 2\n Pg_l = gen[:, PMIN] / baseMVA\n Qg_l = gen[:, QMIN] / baseMVA\n Alpha_l = zeros(nl)\n Beta_f_l = zeros(nl)\n Beta_t_l = zeros(nl)\n\n Pij_u = Slmax\n Qij_u = Slmax\n Iij_u = Slmax\n Vm_u = bus[:, VMAX] ** 2\n Pg_u = 2 * gen[:, PMAX] / baseMVA\n Qg_u = 2 * gen[:, QMAX] / baseMVA\n Alpha_u = ones(nl)\n Beta_f_u = ones(nl)\n Beta_t_u = ones(nl)\n bigM = max(Vm_u)\n # For the spanning tree constraints\n Root_node = find(bus[:, BUS_TYPE] == REF)\n Root_line = find(branch[:, F_BUS] == Root_node)\n\n Span_f = zeros((nb, nl))\n Span_t = zeros((nb, nl))\n for i in range(nb):\n Span_f[i, find(branch[:, F_BUS] == i)] = 1\n Span_t[i, find(branch[:, T_BUS] == i)] = 1\n\n Alpha_l[Root_line] = 1\n Alpha_u[Root_line] = 1\n Beta_f_l[Root_line] = 0\n Beta_f_l[Root_line] = 0\n\n T = len(profile)\n nx = int(3 * nl + nb + 2 * ng)\n lx = concatenate([Alpha_l, Beta_f_l, Beta_t_l, tile(concatenate([Pij_l, Qij_l, Iij_l, Vm_l, Pg_l, Qg_l]), T)])\n ux = concatenate([Alpha_u, Beta_f_u, Beta_t_u, tile(concatenate([Pij_u, Qij_u, Iij_u, Vm_u, Pg_u, Qg_u]), T)])\n vtypes = [\"b\"] * 2 * nl + [\"c\"] * nl + [\"c\"] * nx * T\n\n # Define the decision variables\n NX = lx.shape[0]\n\n # Alpha = Beta_f + Beta_t\n Aeq_f = zeros((nl, NX))\n beq_f = zeros(nl)\n Aeq_f[:, 0: nl] = -eye(nl)\n Aeq_f[:, nl: 2 * nl] = eye(nl)\n Aeq_f[:, 2 * nl:3 * nl] = eye(nl)\n\n # sum(alpha)=nb-1\n Aeq_alpha = zeros((1, NX))\n beq_alpha = zeros(1)\n Aeq_alpha[0, 0: nl] = ones(nl)\n beq_alpha[0] = nb - 1\n\n # Span_f*Beta_f+Span_t*Beta_t = Spanning_tree\n Aeq_span = zeros((nb, NX))\n beq_span = ones(nb)\n beq_span[Root_node] = 0\n Aeq_span[:, nl:2 * nl] = Span_f\n Aeq_span[:, 2 * nl: 3 * nl] = Span_t\n\n # Add system level constraints\n # 1) Active power balance\n Aeq_p = zeros((nb * T, NX))\n beq_p = zeros(nb * T)\n for i in range(T):\n Aeq_p[i * nb:(i + 1) * nb, 3 * nl + i * nx:3 * nl + (i + 1) * nx] = hstack([Ct - Cf, zeros((nb, nl)),\n -diag(Ct * Branch_R) * Ct,\n zeros((nb, nb)), Cg,\n zeros((nb, ng))]).toarray()\n beq_p[i * nb:(i + 1) * nb] = profile[i] * bus[:, PD] / baseMVA\n\n # 2) Reactive power balance\n Aeq_q = zeros((nb * T, NX))\n beq_q = zeros(nb * T)\n for i in range(T):\n Aeq_q[i * nb:(i + 1) * nb, 3 * nl + i * nx:3 * nl + (i + 1) * nx] = hstack([zeros((nb, nl)), Ct - Cf,\n -diag(Ct * Branch_X) * Ct,\n zeros((nb, nb)),\n zeros((nb, ng)), Cg]).toarray()\n beq_q[i * nb:(i + 1) * nb] = profile[i] * bus[:, QD] / baseMVA\n\n Aeq = vstack([Aeq_f, Aeq_alpha, Aeq_span, Aeq_p, Aeq_q]).toarray()\n beq = concatenate([beq_f, beq_alpha, beq_span, beq_p, beq_q])\n\n # Inequality constraints\n A = zeros((nl * T, NX))\n b = zeros(nl * T)\n for i in range(T):\n A[i * nl:(i + 1) * nl, 3 * nl + i * nx + 2 * nl: 3 * nl + i * nx + 3 * nl] = eye(nl)\n A[i * nl:(i + 1) * nl, 0: nl] = -diag(Iij_u)\n\n A_temp = zeros((nl * T, NX))\n b_temp = zeros(nl * T)\n for i in range(T):\n A_temp[i * nl:(i + 1) * nl, 3 * nl + i * nx: 3 * nl + i * nx + nl] = eye(nl)\n A_temp[i * nl:(i + 1) * nl, 0: nl] = -diag(Pij_u)\n A = concatenate([A, A_temp])\n b = concatenate([b, b_temp])\n #\n A_temp = zeros((nl * T, NX))\n b_temp = zeros(nl * T)\n for i in range(T):\n A_temp[i * nl:(i + 1) * nl, 3 * nl + i * nx + nl: 3 * nl + i * nx + 2 * nl] = eye(nl)\n A_temp[i * nl:(i + 1) * nl, 0:nl] = -diag(Qij_u)\n A = concatenate([A, A_temp])\n b = concatenate([b, b_temp])\n\n A_temp = zeros((nl * T, NX))\n for i in range(T):\n A_temp[i * nl:(i + 1) * nl, 3 * nl + i * nx:3 * nl + i * nx + nl] = -2 * diag(Branch_R)\n A_temp[i * nl:(i + 1) * nl, 3 * nl + i * nx + nl:3 * nl + i * nx + 2 * nl] = -2 * diag(Branch_X)\n A_temp[i * nl:(i + 1) * nl, 3 * nl + i * nx + 2 * nl:3 * nl + i * nx + 3 * nl] = diag(Branch_R ** 2) + \\\n diag(Branch_X ** 2)\n A_temp[i * nl:(i + 1) * nl, 3 * nl + i * nx + 3 * nl:3 * nl + i * nx + 3 * nl + nb] = \\\n (Cf.T - Ct.T).toarray()\n A_temp[i * nl:(i + 1) * nl, 0:nl] = eye(nl) * bigM\n b_temp = ones(nl * T) * bigM\n A = concatenate([A, A_temp])\n b = concatenate([b, b_temp])\n\n A_temp = zeros((nl * T, NX))\n for i in range(T):\n A_temp[i * nl:(i + 1) * nl, 3 * nl + i * nx:3 * nl + i * nx + nl] = 2 * diag(Branch_R)\n A_temp[i * nl:(i + 1) * nl, 3 * nl + i * nx + nl:3 * nl + i * nx + 2 * nl] = 2 * diag(Branch_X)\n A_temp[i * nl:(i + 1) * nl, 3 * nl + i * nx + 2 * nl:3 * nl + i * nx + 3 * nl] = -diag(Branch_R ** 2) - \\\n diag(Branch_X ** 2)\n A_temp[i * nl:(i + 1) * nl, 3 * nl + i * nx + 3 * nl:3 * nl + i * nx + 3 * nl + nb] = \\\n (-Cf.T + Ct.T).toarray()\n A_temp[i * nl:(i + 1) * nl, 0: nl] = eye(nl) * bigM\n b_temp = ones(nl * T) * bigM\n A = concatenate([A, A_temp])\n b = concatenate([b, b_temp])\n\n Qc = dict()\n for t in range(T):\n for i in range(nl):\n Qc[t * nl + i] = [[int(3 * nl + t * nx + i), int(3 * nl + t * nx + i + nl),\n int(3 * nl + t * nx + i + 2 * nl), int(3 * nl + t * nx + f[i] + 3 * nl)],\n [int(3 * nl + t * nx + i), int(3 * nl + t * nx + i + nl),\n int(3 * nl + t * nx + f[i] + 3 * nl), int(3 * nl + t * nx + i + 2 * nl)],\n [1, 1, -1 / 2, -1 / 2]]\n c = zeros(NX)\n q = zeros(NX)\n c0 = 0\n for t in range(T):\n for i in range(ng):\n c[3 * nl + t * nx + i + 3 * nl + nb] = gencost[i, 5] * baseMVA\n q[3 * nl + t * nx + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA\n c0 += gencost[i, 6]\n\n sol = miqcp(c, q, Aeq=Aeq, beq=beq, A=A, b=b, xmin=lx, xmax=ux, vtypes=vtypes, Qc=Qc)\n xx = sol[0]\n Alpha = xx[0:nl]\n Beta_f = xx[nl:2 * nl]\n Beta_t = xx[2 * nl:3 * nl]\n Pij = zeros((nl, T))\n Qij = zeros((nl, T))\n Iij = zeros((nl, T))\n Vi = zeros((nb, T))\n Pg = zeros((ng, T))\n Qg = zeros((ng, T))\n for i in range(T):\n Pij[:, i] = xx[3 * nl + i * nx:3 * nl + i * nx + nl]\n Qij[:, i] = xx[3 * nl + i * nx + nl:3 * nl + i * nx + 2 * nl]\n Iij[:, i] = xx[3 * nl + i * nx + 2 * nl:3 * nl + i * nx + 3 * nl]\n Vi[:, i] = xx[3 * nl + i * nx + 3 * nl:3 * nl + i * nx + 3 * nl + nb]\n Pg[:, i] = xx[3 * nl + i * nx + 3 * nl + nb:3 * nl + i * nx + 3 * nl + nb + ng]\n Qg[:, i] = xx[3 * nl + i * nx + 3 * nl + nb + ng:3 * nl + i * nx + 3 * nl + nb + 2 * ng]\n\n primal_residual = zeros((nl, T))\n for t in range(T):\n for i in range(nl):\n primal_residual[i, t] = Pij[i, t] * Pij[i, t] + Qij[i, t] * Qij[i, t] - Iij[i, t] * Vi[int(f[i]), t]\n\n sol = {\"Pij\": Pij,\n \"Qij\": Qij,\n \"Iij\": Iij,\n \"Vi\": Vi,\n \"Pg\": Pg,\n \"Qg\": Qg,\n \"Alpha\": Alpha,\n \"Beta_f\": Beta_f,\n \"Beta_t\": Beta_t,\n \"residual\": primal_residual,\n \"obj\": sol[1] + c0}\n\n return sol", "def write_bc_vtk(self):\n print \"Creating boundary condition arrays\"\n obst_array = np.zeros(self.nnodes)\n obst_array[list(self.obst_list)] = 100.\n\n #print type(self.inlet_list)\n inlet_array = np.zeros(self.nnodes)\n inlet_array[list(self.inlet_list)] = 200.\n\n outlet_array = np.zeros(self.nnodes)\n outlet_array[list(self.outlet_list)] = 300.\n\n solid_array = np.zeros(self.nnodes)\n solid_array[list(self.solid_list)] = 500.\n \n dims = [int(self.Nx), int(self.Ny), int(self.Nz)]\n origin = [0., 0., 0.]\n dx = self.x[1] - self.x[0]\n spacing = [dx, dx, dx] #uniform lattice\n \n print \"Writing boundary conditions to VTK files\"\n writeVTK(inlet_array,'inlet','inlet.vtk',dims,origin,spacing)\n writeVTK(outlet_array,'outlet','outlet.vtk',dims,origin,spacing)\n writeVTK(obst_array,'obst','obst.vtk',dims,origin,spacing)\n writeVTK(solid_array,'solid','solid.vtk',dims,origin,spacing)", "def write_bc_vtk(self):\n print \"Creating boundary condition arrays\"\n obst_array = np.zeros(self.nnodes)\n obst_array[list(self.obst_list)] = 100.\n\n #print type(self.inlet_list)\n inlet_array = np.zeros(self.nnodes)\n inlet_array[list(self.inlet_list)] = 200.\n\n outlet_array = np.zeros(self.nnodes)\n outlet_array[list(self.outlet_list)] = 300.\n\n solid_array = np.zeros(self.nnodes)\n solid_array[list(self.solid_list)] = 500.\n \n dims = [int(self.Nx), int(self.Ny), int(self.Nz)]\n origin = [0., 0., 0.]\n dx = self.x[1] - self.x[0]\n spacing = [dx, dx, dx] #uniform lattice\n \n print \"Writing boundary conditions to VTK files\"\n writeVTK(inlet_array,'inlet','inlet.vtk',dims,origin,spacing)\n writeVTK(outlet_array,'outlet','outlet.vtk',dims,origin,spacing)\n writeVTK(obst_array,'obst','obst.vtk',dims,origin,spacing)\n writeVTK(solid_array,'solid','solid.vtk',dims,origin,spacing)", "def feature_extract(self, CT_pairs):\n instances = []\n for pair in CT_pairs:\n config = pair[0]\n label = pair[1]\n data = []\n featureset = {}\n \n # for nltk NaiveBayes feature selection stuff when doing MaxEnt decoding parser commit this\n# featureset[\"topOfBuffer\"] = self.token_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.token_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = (self.token_dict[config.sigma.top()], self.token_dict[config.beta.top()])\n# featureset[\"topOfBuffer\"] = self.POS_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.POS_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = tuple((self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]))\n \n # add the (StackTopPOS,BufferTopPOS,bufferchildren_POS) feature\n #value_set = tuple([self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]] + [self.POS_dict[child] for child in self.getBufferChildren(config.beta.top())])\n #featureset[\"bufferStackbufferChildrenPair\"] = value_set\n \n # for MaxEnt decoding stuff\n # token variants\n data.append((\"topOfBuffer\",self.token_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.token_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.token_dict[config.sigma.top()],self.token_dict[config.beta.top()]))\n #POS variants\n data.append((\"topOfBuffer\",self.POS_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.POS_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.POS_dict[config.sigma.top()],self.POS_dict[config.beta.top()]))\n ins = Instance(label=label, data=data)\n #ins = Instance(label=label, data=featureset)\n instances.append(ins)\n \n return instances", "def update_heads(info,\n heads):\n\n info[\"model_params\"][\"boltzmann_dict\"][\"num_heads\"] = heads\n # Concatenate the fingerprints produced by the different heads\n info[\"model_params\"][\"boltzmann_dict\"][\"head_pool\"] = \"concatenate\"\n\n readoutdict = info[\"model_params\"][\"readoutdict\"]\n feat_dim = info[\"model_params\"][\"mol_basis\"]\n\n for key, lst in readoutdict.items():\n for i, dic in enumerate(lst):\n if \"param\" in dic and \"in_features\" in dic.get(\"param\", {}):\n # make sure that the input dimension to the readout is equal to\n # `heads * feat_dim`, where `feat_dim` is the feature dimension\n # produced by each head\n readoutdict[key][i][\"param\"][\"in_features\"] = feat_dim * heads\n break\n info[\"model_params\"][\"readoutdict\"] = readoutdict", "def detection_head_graph(self, feature_map, filters):\n x = KL.Conv2D(filters, (1, 1), strides=(1, 1),\n name=\"detection_head_\" + \"stage_1\", use_bias=True, padding=\"same\")(feature_map)\n x = KL.Activation('relu', name='detection_head_stage_1_activation')(x)\n x = KL.Conv2D(filters, (1, 1), strides=(1, 1),\n name=\"detection_head_\" + \"stage_2\", use_bias=True, padding=\"same\")(x)\n x = KL.Activation('relu', name='detection_head_stage_2_activation')(x)\n x = KL.Conv2D(3, (1, 1), strides=(1, 1),\n name=\"detection_head_\" + \"final_stage\", use_bias=True, padding=\"same\")(x)\n return x", "def gexf_graph():\n # you must replace these lines and supply your own graph\n gexf = Gexf(\"author\", \"title\")\n mygraph = gexf.addGraph(\"undirected\", \"static\", \"A web network\")\n atr_type = mygraph.addNodeAttribute('Type', type='string')\n atr_id = mygraph.addNodeAttribute('id', type='string')\n atr_label = mygraph.addNodeAttribute('label', type='string')\n atr_color_r = mygraph.addNodeAttribute('color_r', type='string', defaultValue='0')\n atr_color_g = mygraph.addNodeAttribute('color_g', type='string', defaultValue='0')\n atr_color_b = mygraph.addNodeAttribute('color_b', type='string', defaultValue='0')\n k = 0\n for i in range(min_parts()):\n tmp = mygraph.addNode(set_num[i], name[i], r=\"0\", g=\"0\", b=\"0\")\n tmp.addAttribute(atr_type, \"set\")\n tmp.addAttribute(atr_id, set_num[i])\n tmp.addAttribute(atr_label, name[i])\n for j in range(len(Parts[i][\"Parts\"])):\n if mygraph.nodeExists(Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"])==0:\n temp = mygraph.addNode((Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), Parts[i][\"Parts\"][j][\"name\"], r=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2], 16)), g=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4], 16)), b=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6], 16)))\n temp.addAttribute(atr_type, \"part\")\n temp.addAttribute(atr_id, (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]))\n temp.addAttribute(atr_label, Parts[i][\"Parts\"][j][\"name\"])\n temp.addAttribute(atr_color_r, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2])\n temp.addAttribute(atr_color_g, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4])\n temp.addAttribute(atr_color_b, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6])\n mygraph.addEdge(str(k), set_num[i], (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), weight=Parts[i][\"Parts\"][j][\"quantity\"])\n k = k+1\n output_file = open(\"bricks_graph.gexf\", \"wb\")\n gexf.write(output_file)\n return -1", "def _viterbi_decode(self, feats):\n backpointers = []\n\n init_vvars = torch.full((self.batch_size, 1, self.tagset_size), -10000.).to(self.device)\n init_vvars[:, 0, self.tag2idx[START_TAG]] = 0.\n\n forward_var = init_vvars\n for i in range(feats.shape[1]):\n feat = feats[:,i,:]\n\n next_tag_var = forward_var + self.transitions + feat.view(self.batch_size, self.tagset_size, 1)\n best_tag_var, best_tag_id = torch.max(next_tag_var, dim=-1)\n backpointers.append(best_tag_id)\n forward_var = best_tag_var.view(self.batch_size, 1, self.tagset_size)\n\n # Transition to STOP_TAG\n terminal_var = forward_var + self.transitions[self.tag2idx[STOP_TAG]]\n path_score, best_tag_id = torch.max(terminal_var, dim=-1)\n\n # Follow the back pointers to decode the best path.\n best_path = [best_tag_id]\n for bptrs_t in reversed(backpointers):\n # gather the value in bptrs_t according to best_tag_id\n best_tag_id = bptrs_t.gather(dim=-1,index=best_tag_id)\n best_path.append(best_tag_id)\n \n # Pop off the start tag (we dont want to return that to the caller)\n best_path.pop()\n best_path.reverse()\n best_path = torch.cat(best_path, dim=1)\n # best_path = [[self.idx2tag[j] for j in i] for i in best_path.tolist()]\n \n return path_score, best_path", "def encodeToCartBoxesLabels(self, gt_instances):\n raw_boxes_xywh = np.zeros((self.config_data[\"max_boxes_per_frame\"], 5))\n ### initialize gronud truth labels as np.zeros ###\n gt_labels = np.zeros(list(self.cart_shape[1:3]) + \\\n [len(self.anchor_boxes_cart)] + \\\n [len(self.config_data[\"all_classes\"]) + 5]) \n\n ### start transferring box to ground turth label format ###\n for i in range(len(gt_instances[\"classes\"])):\n if i > self.config_data[\"max_boxes_per_frame\"]:\n continue\n class_name = gt_instances[\"classes\"][i]\n box_xywh = gt_instances[\"cart_boxes\"][i]\n class_id = self.config_data[\"all_classes\"].index(class_name)\n if i <= self.config_data[\"max_boxes_per_frame\"]:\n raw_boxes_xywh[i, :4] = box_xywh\n raw_boxes_xywh[i, 4] = class_id\n class_onehot = helper.smoothOnehot(class_id, \\\n len(self.config_data[\"all_classes\"]))\n exist_positive = False\n grid_strid = self.cart_grid_strides\n anchors = self.anchor_boxes_cart\n box_xywh_scaled = box_xywh[np.newaxis, :].astype(np.float32)\n box_xywh_scaled[:, :2] /= grid_strid\n anchors_xywh = np.zeros([len(anchors), 4])\n anchors_xywh[:, :2] = np.floor(box_xywh_scaled[:, :2]) + 0.5\n anchors_xywh[:, 2:] = anchors.astype(np.float32)\n\n iou_scaled = helper.iou2d(box_xywh_scaled, anchors_xywh)\n ### NOTE: 0.3 is from YOLOv4, maybe this should be different here ###\n ### it means, as long as iou is over 0.3 with an anchor, the anchor\n ### should be taken into consideration as a ground truth label\n iou_mask = iou_scaled > 0.3\n\n if np.any(iou_mask):\n xind, yind = np.floor(np.squeeze(box_xywh_scaled)[:2]).astype(np.int32)\n ### TODO: consider changing the box to raw yolohead output format ###\n gt_labels[xind, yind, iou_mask, 0:4] = box_xywh\n gt_labels[xind, yind, iou_mask, 4:5] = 1.\n gt_labels[xind, yind, iou_mask, 5:] = class_onehot\n exist_positive = True\n\n if not exist_positive:\n ### NOTE: this is the normal one ###\n ### it means take the anchor box with maximum iou to the raw\n ### box as the ground truth label\n iou_mask = iou_scaled == iou_scaled.max()\n\n if np.any(iou_mask):\n xind, yind = np.floor(np.squeeze(box_xywh_scaled)[:2]).astype(np.int32)\n ### TODO: consider changing the box to raw yolohead output format ###\n gt_labels[xind, yind, iou_mask, 0:4] = box_xywh\n gt_labels[xind, yind, iou_mask, 4:5] = 1.\n gt_labels[xind, yind, iou_mask, 5:] = class_onehot\n\n has_label = False\n if gt_labels.max() != 0:\n has_label = True\n gt_labels = np.where(gt_labels == 0, 1e-16, gt_labels)\n return gt_labels, has_label, raw_boxes_xywh", "def _fusion(self, expert_outputs):\n raise NotImplementedError", "def reagent_label_data(bcl_step):\n\n define_step = None\n indexes = {}\n flowcell_total_reads = 0\n\n for inp, outp in bcl_step.input_output_maps:\n pre_bcl_steps = MASTER_STEPS_UDFS['reagent_labels']['steps']['pre_bcl']\n if inp['parent-process'].type.name not in pre_bcl_steps:\n continue\n\n if outp['output-generation-type'] != 'PerReagentLabel':\n continue\n\n lane = inp['uri']\n art = outp['uri']\n\n index_reads = art.udf.get(\n MASTER_STEPS_UDFS['reagent_labels']['udfs']['reads'])\n\n if index_reads is None or art.qc_flag == 'FAILED':\n continue\n\n flowcell_total_reads += index_reads\n\n sample = art.samples[0] # Will always be only one sample in the list\n application_tag = sample.udf.get('Sequencing Analysis')\n\n if application_tag[0:2] in MASTER_STEPS_UDFS['reagent_labels'][\n 'exclue_tags']:\n continue\n\n if not define_step:\n define_step_outputs, flowcell_target_reads, define_step = get_define_step_data(\n lane)\n\n if not sample.id in define_step_outputs:\n LOG.info('This sample whas put as a pool into the define step: ' +\n sample.id + ' ' + application_tag)\n continue\n\n index = art.reagent_labels[0]\n\n container, lane_nr = lane.location\n if index not in indexes:\n indexes[index] = {\n '_id': '_'.join([index, container.name]),\n 'url': index.replace(' ', ''),\n 'index_total_reads': index_reads,\n 'index_target_reads': define_step_outputs[sample.id],\n 'flowcell_target_reads': flowcell_target_reads,\n 'index': index,\n 'sample': sample.id,\n 'lanes': {\n lane_nr: dict(art.udf.items())\n },\n 'flowcell_id': container.name,\n 'flowcell_type': container.type.name,\n 'define_step_udfs': dict(define_step.udf.items()),\n 'define_step_id': define_step.id,\n 'bcl_step_id': bcl_step.id\n }\n else:\n indexes[index]['lanes'][lane_nr] = dict(art.udf.items())\n indexes[index]['index_total_reads'] += index_reads\n\n for index, data in indexes.items():\n data['flowcell_total_reads'] = flowcell_total_reads\n indexes[index] = filter_none(data)\n\n return indexes", "def create_pipeline_flow(\n self, cmp_deriv_subject_directory, nipype_deriv_subject_directory\n ):\n acquisition_model = self.stages[\"Diffusion\"].config.diffusion_imaging_model\n recon_tool = self.stages[\"Diffusion\"].config.recon_processing_tool\n\n recon_model = \"DTI\"\n\n if acquisition_model == \"DSI\":\n recon_model = \"SHORE\"\n else:\n if recon_tool == \"Dipy\" and self.stages[\"Diffusion\"].config.dipy_recon_config.local_model:\n recon_model = \"CSD\"\n elif recon_tool == \"MRtrix\" and self.stages[\"Diffusion\"].config.mrtrix_recon_config.local_model:\n recon_model = \"CSD\"\n\n tracking_model = self.stages[\"Diffusion\"].config.diffusion_model\n\n if tracking_model == \"Deterministic\":\n tracking_model = \"DET\"\n elif tracking_model == \"Probabilistic\":\n tracking_model = \"PROB\"\n\n if self.parcellation_scheme == \"Lausanne2018\":\n bids_atlas_label = \"L2018\"\n elif self.parcellation_scheme == \"NativeFreesurfer\":\n bids_atlas_label = \"Desikan\"\n elif self.parcellation_scheme == \"Custom\":\n bids_atlas_label = self.custom_atlas_name\n if self.custom_atlas_res is not None and self.custom_atlas_res != \"\":\n bids_atlas_label += f'_res-{self.custom_atlas_res}'\n\n # Clear previous outputs\n self.clear_stages_outputs()\n\n # Create diffusion workflow with input and output Identityinterface nodes\n diffusion_flow = pe.Workflow(\n name=\"diffusion_pipeline\",\n base_dir=os.path.abspath(nipype_deriv_subject_directory),\n )\n\n diffusion_inputnode = pe.Node(\n interface=util.IdentityInterface(\n fields=[\n \"diffusion\",\n \"bvecs\",\n \"bvals\",\n \"T1\",\n \"aseg\",\n \"aparc_aseg\",\n \"brain\",\n \"T2\",\n \"brain_mask\",\n \"wm_mask_file\",\n \"roi_volumes\",\n \"roi_graphMLs\",\n \"subjects_dir\",\n \"subject_id\",\n \"parcellation_scheme\",\n ]\n ),\n name=\"inputnode\",\n )\n diffusion_inputnode.inputs.parcellation_scheme = self.parcellation_scheme\n diffusion_inputnode.inputs.atlas_info = self.atlas_info\n\n diffusion_outputnode = pe.Node(\n interface=util.IdentityInterface(fields=[\"connectivity_matrices\"]),\n name=\"outputnode\",\n )\n\n diffusion_flow.add_nodes([diffusion_inputnode, diffusion_outputnode])\n\n # Data import\n datasource = self.create_datagrabber_node(\n base_directory=cmp_deriv_subject_directory,\n bids_atlas_label=bids_atlas_label\n )\n\n # Data sinker for output\n sinker = self.create_datasinker_node(\n base_directory=cmp_deriv_subject_directory,\n bids_atlas_label=bids_atlas_label,\n recon_model=recon_model,\n tracking_model=tracking_model\n )\n\n # fmt:off\n diffusion_flow.connect(\n [\n (datasource, diffusion_inputnode, [(\"diffusion\", \"diffusion\"),\n (\"bvecs\", \"bvecs\"),\n (\"bvals\", \"bvals\"),\n (\"T1\", \"T1\"),\n (\"aseg\", \"aseg\"),\n (\"aparc_aseg\", \"aparc_aseg\"),\n (\"brain\", \"brain\"),\n (\"brain_mask\", \"brain_mask\"),\n (\"wm_mask_file\", \"wm_mask_file\")]),\n ]\n )\n # fmt:on\n\n merge_roi_volumes = pe.Node(interface=Merge(5), name=\"merge_roi_volumes\")\n merge_roi_graphmls = pe.Node(interface=Merge(5), name=\"merge_roi_graphmls\")\n\n def remove_non_existing_scales(roi_volumes):\n \"\"\"Returns a list which do not contained any empty element.\n\n Parameters\n ----------\n roi_volumes : list\n A list of output parcellations that might contain empty element\n in the case of the monoscale Desikan scheme for instance\n\n Returns\n -------\n out_roi_volumes : list\n The list with no empty element\n \"\"\"\n out_roi_volumes = []\n for vol in roi_volumes:\n if vol is not None:\n out_roi_volumes.append(vol)\n return out_roi_volumes\n\n # fmt:off\n diffusion_flow.connect(\n [\n (datasource, merge_roi_volumes, [(\"roi_volume_s1\", \"in1\"),\n (\"roi_volume_s2\", \"in2\"),\n (\"roi_volume_s3\", \"in3\"),\n (\"roi_volume_s4\", \"in4\"),\n (\"roi_volume_s5\", \"in5\")]),\n (datasource, merge_roi_graphmls, [(\"roi_graphml_s1\", \"in1\"),\n (\"roi_graphml_s2\", \"in2\"),\n (\"roi_graphml_s3\", \"in3\"),\n (\"roi_graphml_s4\", \"in4\"),\n (\"roi_graphml_s5\", \"in5\")]),\n (merge_roi_volumes, diffusion_inputnode, [((\"out\", remove_non_existing_scales), \"roi_volumes\")],),\n (merge_roi_graphmls, diffusion_inputnode, [((\"out\", remove_non_existing_scales), \"roi_graphMLs\")],),\n ]\n )\n # fmt:on\n\n if self.stages[\"Preprocessing\"].enabled:\n preproc_flow = self.create_stage_flow(\"Preprocessing\")\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, preproc_flow, [(\"diffusion\", \"inputnode.diffusion\"),\n (\"brain\", \"inputnode.brain\"),\n (\"aseg\", \"inputnode.aseg\"),\n (\"aparc_aseg\", \"inputnode.aparc_aseg\"),\n (\"brain_mask\", \"inputnode.brain_mask\"),\n (\"wm_mask_file\", \"inputnode.wm_mask_file\"),\n (\"roi_volumes\", \"inputnode.roi_volumes\"),\n (\"bvecs\", \"inputnode.bvecs\"),\n (\"bvals\", \"inputnode.bvals\"),\n (\"T1\", \"inputnode.T1\")]),\n ]\n )\n # fmt:on\n\n if self.stages[\"Registration\"].enabled:\n reg_flow = self.create_stage_flow(\"Registration\")\n # fmt:off\n diffusion_flow.connect(\n [\n # (diffusion_inputnode,reg_flow,[('T2','inputnode.T2')]),\n (preproc_flow, reg_flow, [(\"outputnode.T1\", \"inputnode.T1\"),\n (\"outputnode.act_5TT\", \"inputnode.act_5TT\"),\n (\"outputnode.gmwmi\", \"inputnode.gmwmi\"),\n (\"outputnode.bvecs_rot\", \"inputnode.bvecs\"),\n (\"outputnode.bvals\", \"inputnode.bvals\"),\n (\"outputnode.wm_mask_file\", \"inputnode.wm_mask\"),\n (\"outputnode.partial_volume_files\", \"inputnode.partial_volume_files\",),\n (\"outputnode.roi_volumes\", \"inputnode.roi_volumes\"),\n (\"outputnode.brain\", \"inputnode.brain\"),\n (\"outputnode.brain_mask\", \"inputnode.brain_mask\"),\n (\"outputnode.brain_mask_full\", \"inputnode.brain_mask_full\"),\n (\"outputnode.diffusion_preproc\", \"inputnode.target\"),\n (\"outputnode.dwi_brain_mask\", \"inputnode.target_mask\")]),\n (preproc_flow, sinker, [(\"outputnode.bvecs_rot\", \"dwi.@bvecs_rot\"),\n (\"outputnode.diffusion_preproc\", \"dwi.@diffusion_preproc\"),\n (\"outputnode.dwi_brain_mask\", \"dwi.@diffusion_brainmask\")]),\n ]\n )\n # fmt:on\n if self.stages[\"Registration\"].config.registration_mode == \"BBregister (FS)\":\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, reg_flow, [(\"subjects_dir\", \"inputnode.subjects_dir\"), (\"subject_id\", \"inputnode.subject_id\")]),\n ]\n )\n # fmt:on\n\n if self.stages[\"Diffusion\"].enabled:\n diff_flow = self.create_stage_flow(\"Diffusion\")\n # fmt:off\n diffusion_flow.connect(\n [\n (preproc_flow, diff_flow, [(\"outputnode.diffusion_preproc\", \"inputnode.diffusion\")]),\n (reg_flow, diff_flow, [(\"outputnode.wm_mask_registered_crop\", \"inputnode.wm_mask_registered\",),\n (\"outputnode.brain_mask_registered_crop\", \"inputnode.brain_mask_registered\",),\n (\"outputnode.partial_volumes_registered_crop\", \"inputnode.partial_volumes\",),\n (\"outputnode.roi_volumes_registered_crop\", \"inputnode.roi_volumes\",),\n (\"outputnode.act_5tt_registered_crop\", \"inputnode.act_5tt_registered\",),\n (\"outputnode.gmwmi_registered_crop\", \"inputnode.gmwmi_registered\",),\n (\"outputnode.grad\", \"inputnode.grad\"),\n (\"outputnode.bvals\", \"inputnode.bvals\"),\n (\"outputnode.bvecs\", \"inputnode.bvecs\")]),\n (reg_flow, sinker, [(\"outputnode.target_epicorrected\", \"dwi.@bdiffusion_reg_crop\",),\n (\"outputnode.grad\", \"dwi.@diffusion_grad\"),\n (\"outputnode.affine_transform\", \"xfm.@affine_transform\"),\n (\"outputnode.warp_field\", \"xfm.@warp_field\"),\n (\"outputnode.T1_registered_crop\", \"anat.@T1_reg_crop\"),\n (\"outputnode.act_5tt_registered_crop\", \"anat.@act_5tt_reg_crop\",),\n (\"outputnode.gmwmi_registered_crop\", \"anat.@gmwmi_reg_crop\"),\n (\"outputnode.brain_registered_crop\", \"anat.@brain_reg_crop\"),\n (\"outputnode.brain_mask_registered_crop\", \"anat.@brain_mask_reg_crop\",),\n (\"outputnode.wm_mask_registered_crop\", \"anat.@wm_mask_reg_crop\",),\n (\"outputnode.roi_volumes_registered_crop\", \"anat.@roivs_reg_crop\",),\n (\"outputnode.partial_volumes_registered_crop\", \"anat.@pves_reg_crop\",)],),\n ]\n )\n # fmt:on\n\n if self.stages[\"Connectome\"].enabled:\n self.stages[\"Connectome\"].config.probtrackx = False\n self.stages[\"Connectome\"].config.subject = self.global_conf.subject\n con_flow = self.create_stage_flow(\"Connectome\")\n # fmt:off\n diffusion_flow.connect(\n [\n (diffusion_inputnode, con_flow, [(\"parcellation_scheme\", \"inputnode.parcellation_scheme\"),\n (\"atlas_info\", \"inputnode.atlas_info\"),\n (\"roi_graphMLs\", \"inputnode.roi_graphMLs\")]),\n (diff_flow, con_flow, [(\"outputnode.track_file\", \"inputnode.track_file\"),\n (\"outputnode.FA\", \"inputnode.FA\"),\n (\"outputnode.ADC\", \"inputnode.ADC\"),\n (\"outputnode.AD\", \"inputnode.AD\"),\n (\"outputnode.RD\", \"inputnode.RD\"),\n (\"outputnode.roi_volumes\", \"inputnode.roi_volumes_registered\",),\n (\"outputnode.skewness\", \"inputnode.skewness\"),\n (\"outputnode.kurtosis\", \"inputnode.kurtosis\"),\n (\"outputnode.P0\", \"inputnode.P0\"),\n (\"outputnode.mapmri_maps\", \"inputnode.mapmri_maps\"),\n (\"outputnode.shore_maps\", \"inputnode.shore_maps\")]),\n (con_flow, diffusion_outputnode, [(\"outputnode.connectivity_matrices\", \"connectivity_matrices\")]),\n (diff_flow, sinker, [(\"outputnode.fod_file\", \"dwi.@fod_file\"),\n (\"outputnode.FA\", \"dwi.@FA\"),\n (\"outputnode.ADC\", \"dwi.@ADC\"),\n (\"outputnode.AD\", \"dwi.@AD\"),\n (\"outputnode.RD\", \"dwi.@RD\"),\n (\"outputnode.skewness\", \"dwi.@skewness\"),\n (\"outputnode.kurtosis\", \"dwi.@kurtosis\"),\n (\"outputnode.P0\", \"dwi.@P0\"),\n (\"outputnode.mapmri_maps\", \"dwi.@mapmri_maps\"),\n (\"outputnode.shore_maps\", \"dwi.@shore_maps\")]),\n (con_flow, sinker, [(\"outputnode.streamline_final_file\", \"dwi.@streamline_final_file\"),\n (\"outputnode.connectivity_matrices\", \"dwi.@connectivity_matrices\")]),\n ]\n )\n # fmt:on\n\n return diffusion_flow", "def pack_firmware(self, work_dir, jobclient, version_string=\"\"):\n raise NotImplementedError(\"Abstract method not implemented\")", "def __call__(self, node):\n\n # should throw an error\n if node.cfgInterface == None:\n return\n\n # //\n # // Extract LFN base from included WorkflowSpec parameters\n #//\n base = node.getParameter(\"UnmergedLFNBase\")[0]\n\n # //\n # // iterate over outputmodules/data tiers\n #// Generate LFN, PFN and Catalog for each module\n for modName, outModule in node.cfgInterface.outputModules.items():\n if ( not outModule.has_key('fileName') ):\n msg = \"OutputModule %s does not contain a fileName entry\" % modName\n raise RuntimeError, msg\n outModule['logicalFileName'] = os.path.join(base, outModule['dataTier'], str(self.lfnGroup))\n outModule['logicalFileName'] += '/'\n outModule['logicalFileName'] += outModule['fileName']\n\n return", "def encodeToLabels(self, gt_instances):\n raw_boxes_xyzwhd = np.zeros((self.config_data[\"max_boxes_per_frame\"], 7))\n ### initialize gronud truth labels as np.zeors ###\n gt_labels = np.zeros(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes)] + \\\n [len(self.config_data[\"all_classes\"]) + 7])\n\n ### start transferring box to ground turth label format ###\n for i in range(len(gt_instances[\"classes\"])):\n if i > self.config_data[\"max_boxes_per_frame\"]:\n continue\n class_name = gt_instances[\"classes\"][i]\n box_xyzwhd = gt_instances[\"boxes\"][i]\n class_id = self.config_data[\"all_classes\"].index(class_name)\n if i < self.config_data[\"max_boxes_per_frame\"]:\n raw_boxes_xyzwhd[i, :6] = box_xyzwhd\n raw_boxes_xyzwhd[i, 6] = class_id\n class_onehot = helper.smoothOnehot(class_id, len(self.config_data[\"all_classes\"]))\n \n exist_positive = False\n\n grid_strid = self.grid_strides\n anchor_stage = self.anchor_boxes\n box_xyzwhd_scaled = box_xyzwhd[np.newaxis, :].astype(np.float32)\n box_xyzwhd_scaled[:, :3] /= grid_strid\n anchorstage_xyzwhd = np.zeros([len(anchor_stage), 6])\n anchorstage_xyzwhd[:, :3] = np.floor(box_xyzwhd_scaled[:, :3]) + 0.5\n anchorstage_xyzwhd[:, 3:] = anchor_stage.astype(np.float32)\n\n iou_scaled = helper.iou3d(box_xyzwhd_scaled, anchorstage_xyzwhd, \\\n self.input_size)\n ### NOTE: 0.3 is from YOLOv4, maybe this should be different here ###\n ### it means, as long as iou is over 0.3 with an anchor, the anchor\n ### should be taken into consideration as a ground truth label\n iou_mask = iou_scaled > 0.3\n\n if np.any(iou_mask):\n xind, yind, zind = np.floor(np.squeeze(box_xyzwhd_scaled)[:3]).\\\n astype(np.int32)\n ### TODO: consider changing the box to raw yolohead output format ###\n gt_labels[xind, yind, zind, iou_mask, 0:6] = box_xyzwhd\n gt_labels[xind, yind, zind, iou_mask, 6:7] = 1.\n gt_labels[xind, yind, zind, iou_mask, 7:] = class_onehot\n exist_positive = True\n\n if not exist_positive:\n ### NOTE: this is the normal one ###\n ### it means take the anchor box with maximum iou to the raw\n ### box as the ground truth label\n anchor_ind = np.argmax(iou_scaled)\n xind, yind, zind = np.floor(np.squeeze(box_xyzwhd_scaled)[:3]).\\\n astype(np.int32)\n gt_labels[xind, yind, zind, anchor_ind, 0:6] = box_xyzwhd\n gt_labels[xind, yind, zind, anchor_ind, 6:7] = 1.\n gt_labels[xind, yind, zind, anchor_ind, 7:] = class_onehot\n\n has_label = False\n for label_stage in gt_labels:\n if label_stage.max() != 0:\n has_label = True\n gt_labels = [np.where(gt_i == 0, 1e-16, gt_i) for gt_i in gt_labels]\n return gt_labels, has_label, raw_boxes_xyzwhd", "def worker(selection_idx, results_table):\n randgen = np.random.RandomState()\n \n # Data-specific positive set partition (the real-world dataset consists of multiple motif classes, always exactly 3 instances of each class stored consequently).\n # The partition assures that the training and test sets do not share instances of the same motif class\n positive_n_train = round(0.8 * len(positive_set_) / 3) * 3\n block_start_idx = randgen.randint(positive_n_train / 3 + 1) * 3 \n block_end_idx = block_start_idx + len(positive_set_) - positive_n_train\n positive_set_part_train, positive_set_part_test = (np.concatenate((positive_set_[: block_start_idx], positive_set_[block_end_idx: ])), positive_set_[block_start_idx: block_end_idx])\n \n # Negative set partition with random selection of elements to match the size of the positive set\n negative_set = negative_set_[randgen.choice(len(negative_set_), size = positive_set_.shape[0], replace = False)]\n negative_n = len(negative_set)\n negative_n_train = round(negative_n * 0.8)\n negative_set_part_train, negative_set_part_test = (negative_set[: negative_n_train], negative_set[negative_n_train: ])\n \n data_part_train = np.float64(np.concatenate((positive_set_part_train, negative_set_part_train)))\n labels_part_train = np.concatenate((np.ones(len(positive_set_part_train), dtype = 'i1'), np.zeros(len(negative_set_part_train), dtype = 'i1')))\n data_part_test = np.float64(np.concatenate((positive_set_part_test, negative_set_part_test)))\n labels_part_test = np.concatenate((np.ones(len(positive_set_part_test), dtype = 'i1'), np.zeros(len(negative_set_part_test), dtype = 'i1')))\n \n # Specifying the pipeline and the CV structure\n pruner = feature_selection.VarianceThreshold()\n scaler = preprocessing.StandardScaler()\n feature_selector = feature_selection.SelectKBest(feature_selection.f_classif)\n classifier = svm.SVC(kernel = 'rbf', gamma = 0.01, class_weight = 'balanced')\n pipeline0 = pipeline.Pipeline([\n ('pruning', pruner),\n ('scaling', scaler),\n ('selection', feature_selector),\n ('classification', classifier)\n ])\n cv_structure = model_selection.StratifiedShuffleSplit(n_splits = 10, test_size = 0.2)\n scoring = 'recall_macro' #same as balanced accuracy\n grid = model_selection.GridSearchCV(pipeline0, scoring = scoring, param_grid = param_grid, cv = cv_structure, n_jobs = 1)\n \n # Training the pipeline, saving the data\n grid.fit(data_part_train, labels_part_train)\n results_table[selection_idx][0] = np.log10(grid.best_params_['classification__C'])\n results_table[selection_idx][1] = grid.best_params_['selection__k']\n results_table[selection_idx][2] = grid.best_score_\n \n # Testing the pipeline, saving the data\n results_table[selection_idx][3] = grid.score(data_part_test, labels_part_test)", "def _vVBEL_XXXX(self,vKNOT=None,OBJTYPE=None):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n \r\n #'KVR','PZON_NAME', 'FSTF_NAME', 'STOF_NAME', 'GMIX_NAME','UTMP_NAME'\r\n\r\n\r\n\r\n vXXXX=None\r\n\r\n vXXXX=pd.merge(self.dataFrames[OBJTYPE],vKNOT,left_on='fkKI',right_on='pk',suffixes=('','_i')) \r\n vXXXX=vXXXX[['fkKK','BESCHREIBUNG','IDREFERENZ','pk','tk','NAME','CONT','CONT_VKNO','pk_i','ZKOR']]\r\n vXXXX.rename(columns={'NAME':'NAME_i','CONT':'CONT_i','CONT_VKNO':'CONT_VKNO_i','ZKOR':'Z_i'},inplace=True)\r\n\r\n vXXXX=pd.merge(vXXXX,vKNOT,left_on='fkKK',right_on='pk',suffixes=('','_k'))\r\n vXXXX=vXXXX[['BESCHREIBUNG','IDREFERENZ','pk','tk','NAME_i','CONT_i','CONT_VKNO_i','Z_i','pk_i','NAME','CONT','CONT_VKNO','pk_k','ZKOR']]\r\n vXXXX.rename(columns={'NAME':'NAME_k','CONT':'CONT_k','CONT_VKNO':'CONT_VKNO_k','ZKOR':'Z_k'},inplace=True)\r\n \r\n vXXXX=vXXXX.assign(OBJTYPE=lambda x: OBJTYPE)\r\n vXXXX=vXXXX[['OBJTYPE','BESCHREIBUNG','IDREFERENZ','pk','tk','NAME_i','CONT_i','CONT_VKNO_i','Z_i','pk_i','NAME_k','CONT_k','CONT_VKNO_k','Z_k','pk_k']]\r\n\r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.debug(logStrFinal) \r\n vXXXX=None\r\n \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))\r\n return vXXXX", "def process_circuits(self, processor_fn, updated_aliases=None):\n P = processor_fn # shorthand\n cpy = LsGermsSerialStructure(self.Ls, list(map(P, self.germs)),\n self.nMinorRows, self.nMinorCols,\n updated_aliases, self.sequenceRules)\n cpy.allstrs = list(map(P, self.allstrs))\n cpy.allstrs_set = set(cpy.allstrs)\n cpy.unindexed = list(map(P, self.unindexed))\n cpy._plaquettes = {k: v.process_circuits(P, updated_aliases) for k, v in self._plaquettes.items()}\n cpy._firsts = [(L, P(germ)) for (L, germ) in self._firsts]\n cpy._baseStrToLGerm = {P(base): (L, P(germ)) for base, (L, germ) in self._baseStrToLGerm.items()}\n return cpy", "def build(self):\n super(VaporStateBlockData, self).build()\n\n # Object reference for molecular weight if needed by CV1D\n # Molecular weights\n self.mw_comp = Reference(self.params.mw_comp)\n\n self.flow_mol = Var(initialize=1.0,\n domain=NonNegativeReals,\n units=pyunits.mol / pyunits.s,\n doc='Total molar flowrate')\n\n self.mole_frac_comp = Var(self.component_list,\n domain=NonNegativeReals,\n bounds=(0, 1),\n units=None,\n initialize=1 / len(self.component_list),\n doc='Component mole fractions [-]')\n\n self.pressure = Var(initialize=101325,\n domain=NonNegativeReals,\n units=pyunits.Pa,\n doc='Pressure [Pa]')\n\n self.temperature = Var(initialize=298.15,\n domain=NonNegativeReals,\n units=pyunits.K,\n doc='Temperature [K]')\n\n # Sum mole fractions if not inlet block\n if self.config.defined_state is False:\n def sum_component_eqn(b):\n return b.flow_mol == sum(b.flow_mol_comp[j]\n for j in b._params.component_list)\n self.sum_component_eqn = Constraint(rule=sum_component_eqn)", "def exec_attention(self,curr_step): \n\n assert(self.curr_step_idx > 0 and self.dlist is not None), \"Step Error: Must call init before combine\" \n \n detectType = curr_step[\"detectionNetwork\"]\n paramsFile = curr_step[\"paramsFile\"]\n funclist = curr_step[\"funclist\"]\n\n #verify raw data & dlist\n # self.B_VER(self.sess_path, self.dlist)\n raw_datadir = self.sess_path\n dest_datadir = self.sess_path \n\n model_dict = {\"detectType\": detectType, \"paramsFile\" : paramsFile} \n\n for i, folder in enumerate(self.dlist):\n flist = funclist[i]\n self.data_utils.DETECT(raw_datadir, folder, dest_datadir, model_dict, flist=[], preview=False)\n self.default_vis(curr_step)", "def encode(self):\n payload = []\n\n # Generate Payload\n if self.IsEnsembleData:\n payload += self.EnsembleData.encode()\n if self.IsAncillaryData:\n payload += self.AncillaryData.encode()\n if self.IsAmplitude:\n payload += self.Amplitude.encode()\n if self.IsCorrelation:\n payload += self.Correlation.encode()\n if self.IsBeamVelocity:\n payload += self.BeamVelocity.encode()\n if self.IsInstrumentVelocity:\n payload += self.InstrumentVelocity.encode()\n if self.IsEarthVelocity:\n payload += self.EarthVelocity.encode()\n if self.IsGoodBeam:\n payload += self.GoodBeam.encode()\n if self.IsGoodEarth:\n payload += self.GoodEarth.encode()\n if self.IsBottomTrack:\n payload += self.BottomTrack.encode()\n if self.IsRangeTracking:\n payload += self.RangeTracking.encode()\n if self.IsSystemSetup:\n payload += self.SystemSetup.encode()\n if self.IsNmeaData:\n payload += self.NmeaData.encode()\n\n # Generate the header\n # Get the ensemble number\n ens_num = 0\n if self.IsEnsembleData:\n ens_num = self.EnsembleData.EnsembleNumber\n\n # Get the payload size\n payload_size = len(payload)\n\n header = Ensemble.generate_ens_header(ens_num, payload_size)\n\n # Generate the Checksum CITT\n # Parameters found at https: // pycrc.org / models.html\n #crc = pycrc.algorithms.Crc(width=16, poly=0x1021,\n # reflect_in=False, xor_in=0x1d0f,\n # reflect_out=False, xor_out=0x0000)\n #checksum = crc.bit_by_bit_fast(binascii.a2b_hex(bytes(payload)))\n #checksum = Ensemble.int32_to_bytes(CRCCCITT().calculate(input_data=bytes(payload)))\n checksum = crc16.crc16xmodem(payload)\n\n\n result = []\n result += header\n result += payload\n result += checksum\n\n return bytearray(result)", "def transform(self, inputs: list, stage: str) -> datapack.DataPack:", "def predict_structure(prefix, model_runner_1: alphafold.model.model.RunModel,\n model_runner_3: alphafold.model.model.RunModel,\n feature_dict, Ls: list[int], model_params: haiku.Params, use_model, do_relax=False,\n random_seed=0):\n\n # Minkyung's code\n # add big enough number to residue index to indicate chain breaks\n idx_res = feature_dict['residue_index']\n L_prev = 0\n # Ls: number of residues in each chain\n for L_i in Ls[:-1]:\n idx_res[L_prev + L_i:] += 200\n L_prev += L_i\n chains = list(\"\".join([ascii_uppercase[n] * L for n, L in enumerate(Ls)]))\n feature_dict['residue_index'] = idx_res\n\n # Run the models.\n plddts, paes = [], []\n unrelaxed_pdb_lines = []\n relaxed_pdb_lines = []\n\n print(f\"Use_model {use_model}\")\n\n for model_name, params in model_params.items():\n if model_name in use_model:\n print(f\"running {model_name}\")\n # swap params to avoid recompiling\n # note: models 1,2 have diff number of params compared to models 3,4,5\n if any(str(m) in model_name for m in [1, 2]): model_runner = model_runner_1\n if any(str(m) in model_name for m in [3, 4, 5]): model_runner = model_runner_3\n model_runner.params = params\n\n processed_feature_dict: affeatures.FeatureDict = model_runner.process_features(feature_dict,\n random_seed=random_seed)\n # prediction_result is a dictionary of NumPy feature arrays\n prediction_result: dict = model_runner.predict(processed_feature_dict)\n unrelaxed_protein: protein.Protein = protein.from_prediction(processed_feature_dict, prediction_result)\n unrelaxed_pdb_lines.append(protein.to_pdb(unrelaxed_protein))\n plddts.append(prediction_result['plddt'])\n paes.append(prediction_result['predicted_aligned_error'])\n\n if do_relax:\n # Relax the prediction.\n amber_relaxer: relax.AmberRelaxation = relax.AmberRelaxation(max_iterations=0, tolerance=2.39,\n stiffness=10.0, exclude_residues=[],\n max_outer_iterations=20)\n relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein)\n relaxed_pdb_lines.append(relaxed_pdb_str)\n\n # rerank models based on predicted lddt\n lddt_rank = np.mean(plddts, -1).argsort()[::-1]\n out = {}\n print(\"reranking models based on avg. predicted lDDT\")\n for n, r in enumerate(lddt_rank):\n print(f\"model_{n + 1} {np.mean(plddts[r])}\")\n\n unrelaxed_pdb_path = f'{prefix}_unrelaxed_model_{n + 1}.pdb'\n with open(unrelaxed_pdb_path, 'w') as f:\n f.write(unrelaxed_pdb_lines[r])\n set_bfactor(unrelaxed_pdb_path, plddts[r], idx_res, chains)\n\n if do_relax:\n relaxed_pdb_path = f'{prefix}_relaxed_model_{n + 1}.pdb'\n with open(relaxed_pdb_path, 'w') as f: f.write(relaxed_pdb_lines[r])\n set_bfactor(relaxed_pdb_path, plddts[r], idx_res, chains)\n\n out[f\"model_{n + 1}\"] = {\"plddt\": plddts[r], \"pae\": paes[r]}\n return out", "def __init__(self, objects=()):\n\n vtk.vtkPropAssembly.__init__(self)\n\n self.name = \"\"\n self.created = \"\"\n self.trail = None\n self.trail_points = []\n self.trail_segment_size = 0\n self.trail_offset = None\n self.shadows = []\n self.info = {}\n self.rendered_at = set()\n self.transform = None\n self.scalarbar = None\n\n for a in vedo.utils.flatten(objects):\n if a:\n self.AddPart(a)\n\n self.PickableOff()", "def dovisitcomb(allv) :\n allvisits = allv[0]\n load = allv[1]\n field = allv[2][0]\n apogee_id = allv[2][1]\n clobber = allv[2][2]\n pixelmask=bitmask.PixelBitMask()\n\n # already done?\n outdir=os.path.dirname(load.filename('Field',field=field))\n outdir=outdir.replace('/stars/','/rv/')\n if os.path.exists(outdir+'/'+apogee_id+'.pkl') and not clobber:\n print(apogee_id,' already done visitcomb')\n fp=open(outdir+'/'+apogee_id+'.pkl','rb')\n try: \n out=pickle.load(fp)\n fp.close()\n return out\n except: \n print('error loading: ', apogee_id+'.pkl')\n pass\n\n # do the combination\n apstar=visitcomb(allvisits,load=load,plot=False)\n\n # dump\n pickle.dump(apstar,open(outdir+'/'+apogee_id+'.pkl','wb'))\n\n return apstar", "def _form_computation_graph(self, idx):\n _list, _set = list, set\n if type(idx) is int:\n node_layers = [np.array([idx], dtype=np.int64)]\n elif type(idx) is list:\n node_layers = [np.array(idx, dtype=np.int64)]\n\n for _ in range(self.n_layers):\n prev = node_layers[-1]\n arr = [node for node in prev]\n arr.extend([e[0] for node in arr for e in self.nbrs_t[node]])\n arr = np.array(_list(_set(arr)), dtype=np.int64)\n node_layers.append(arr)\n node_layers.reverse()\n\n mappings = [{j: i for (i, j) in enumerate(arr)} for arr in node_layers]\n\n return node_layers, mappings", "def genereate_echo_picklist(self):\n sample_names = []\n sample_wells = []\n indices = {'i5 name': {}, 'i5 plate': {}, 'i5 sequence': {},\n 'i5 well': {}, 'i7 name': {}, 'i7 plate': {},\n 'i7 sequence': {}, 'i7 well': {}, 'index combo': {},\n 'index combo seq': {}}\n\n for idx, well in enumerate(chain.from_iterable(self.plates[0].layout)):\n # Add the sample well\n sample_wells.append(well.well_id)\n # Get the sample name - we need to go back to the SampleComposition\n lib_comp = well.composition\n sample_comp = lib_comp.normalized_gdna_composition\\\n .gdna_composition.sample_composition\n sample_names.append(sample_comp.content)\n # Retrieve all the information about the indices\n i5_comp = lib_comp.i5_composition.primer_set_composition\n i5_well = i5_comp.container\n indices['i5 name'][idx] = i5_comp.external_id\n indices['i5 plate'][idx] = i5_well.plate.external_id\n indices['i5 sequence'][idx] = i5_comp.barcode\n indices['i5 well'][idx] = i5_well.well_id\n\n i7_comp = lib_comp.i7_composition.primer_set_composition\n i7_well = i7_comp.container\n indices['i7 name'][idx] = i7_comp.external_id\n indices['i7 plate'][idx] = i7_well.plate.external_id\n indices['i7 sequence'][idx] = i7_comp.barcode\n indices['i7 well'][idx] = i7_well.well_id\n\n indices['index combo seq'][idx] = '%s%s' % (\n indices['i5 sequence'][idx], indices['i7 sequence'][idx])\n\n sample_names = np.asarray(sample_names)\n sample_wells = np.asarray(sample_wells)\n indices = pd.DataFrame(indices)\n\n return LibraryPrepShotgunProcess._format_picklist(\n sample_names, sample_wells, indices)", "def __rechaindict__(c):\n from TriggerMenu.menu.DictFromChainName import DictFromChainName\n dfcn = DictFromChainName()\n\n pl1 = []\n for pch in c['chainParts']:\n pl1.append(pch['L1item'])\n\n newname = c['chainName'].replace('dv_','').replace('TestChain','j')\n nchlist = [ newname ,c['chainCounter'],c['L1item'],pl1,c['stream'],\n c['groups'],c['EBstep'] ]\n \n return dfcn.getChainDict(nchlist)", "def hierarchical( x, output_prefix, labels_to_register=[2,3,4,5], is_test=False, verbose=True ):\n if verbose:\n print(\"Read\")\n tfn = get_data('T_template0', target_extension='.nii.gz' )\n tfnw = get_data('T_template0_WMP', target_extension='.nii.gz' )\n tlrfn = get_data('T_template0_LR', target_extension='.nii.gz' )\n bfn = antspynet.get_antsxnet_data( \"croppedMni152\" )\n\n ##### read images and do simple bxt ops\n templatea = ants.image_read( tfn )\n if verbose:\n print(\"bxt\")\n templatea = ( templatea * antspynet.brain_extraction( templatea, 't1' ) ).iMath( \"Normalize\" )\n templateawmprior = ants.image_read( tfnw )\n templatealr = ants.image_read( tlrfn )\n templateb = ants.image_read( bfn )\n templateb = ( templateb * antspynet.brain_extraction( templateb, 't1' ) ).iMath( \"Normalize\" )\n imgbxt = brain_extraction( x )\n img = x * imgbxt\n\n if verbose:\n print(\"rbp\")\n\n # this is an unbiased method for identifying predictors that can be used to\n # rank / sort data into clusters, some of which may be associated\n # with outlierness or low-quality data\n templatesmall = ants.resample_image( templateb, (91,109,91), use_voxels=True )\n rbp = random_basis_projection( img, templatesmall )\n\n if verbose:\n print(\"intensity\")\n\n ##### intensity modifications\n img = ants.iMath( img, \"Normalize\" ) * 255.0\n img = ants.denoise_image( img, imgbxt, noise_model='Gaussian')\n img = ants.n4_bias_field_correction( img ).iMath(\"Normalize\")\n\n # optional - quick look at result\n bxt_png = output_prefix + \"_brain_extraction_dnz_n4_view.png\"\n ants.plot(img,axis=2,ncol=8,nslices=24, crop=True, black_bg=False,\n filename = bxt_png )\n\n if verbose:\n print(\"hemi\")\n\n # assuming data is reasonable quality, we should proceed with the rest ...\n mylr = label_hemispheres( img, templatea, templatealr )\n\n if verbose:\n print(\"parcellation\")\n\n ##### hierarchical labeling\n myparc = deep_brain_parcellation( img, templateb,\n do_cortical_propagation = not is_test, verbose=False )\n\n ##### accumulate data into data frames\n hemi = map_segmentation_to_dataframe( \"hemisphere\", myparc['hemisphere_labels'] )\n tissue = map_segmentation_to_dataframe( \"tissues\", myparc['tissue_segmentation'] )\n dktl = map_segmentation_to_dataframe( \"lobes\", myparc['dkt_lobes'] )\n dktp = map_segmentation_to_dataframe( \"dkt\", myparc['dkt_parcellation'] )\n dktc = None\n if not is_test:\n dktc = map_segmentation_to_dataframe( \"dkt\", myparc['dkt_cortex'] )\n\n tissue_seg_png = output_prefix + \"_seg.png\"\n ants.plot( img, myparc['tissue_segmentation'], axis=2, nslices=21, ncol=7,\n alpha=0.6, filename=tissue_seg_png,\n crop=True, black_bg=False )\n\n if verbose:\n print(\"WMH\")\n\n ##### below here are more exploratory nice to have outputs\n myhypo = t1_hypointensity(\n img,\n myparc['tissue_segmentation'], # segmentation\n myparc['tissue_probabilities'][3], # wm posteriors\n templatea,\n templateawmprior )\n\n if verbose:\n print(\"registration\")\n\n ##### traditional deformable registration as a high-resolution complement to above\n wm_tractsL = None\n wm_tractsR = None\n wmtdfL = None\n wmtdfR = None\n reg = None\n if labels_to_register is not None:\n reg = hemi_reg(\n input_image = img,\n input_image_tissue_segmentation = myparc['tissue_segmentation'],\n input_image_hemisphere_segmentation = mylr,\n input_template=templatea,\n input_template_hemisphere_labels=templatealr,\n output_prefix = output_prefix + \"_SYN\",\n labels_to_register = labels_to_register,\n is_test=is_test )\n if verbose:\n print(\"wm tracts\")\n ##### how to use the hemi-reg output to generate any roi value from a template roi\n wm_tracts = ants.image_read( get_data( \"wm_major_tracts\", target_extension='.nii.gz' ) )\n wm_tractsL = ants.apply_transforms( img, wm_tracts, reg['synL']['invtransforms'],\n interpolator='genericLabel' ) * ants.threshold_image( mylr, 1, 1 )\n wm_tractsR = ants.apply_transforms( img, wm_tracts, reg['synR']['invtransforms'],\n interpolator='genericLabel' ) * ants.threshold_image( mylr, 2, 2 )\n wmtdfL = map_segmentation_to_dataframe( \"wm_major_tracts\", wm_tractsL )\n wmtdfR = map_segmentation_to_dataframe( \"wm_major_tracts\", wm_tractsR )\n\n if verbose:\n print(\"hippocampus\")\n\n ##### specialized labeling for hippocampus\n ntries = 10\n if is_test:\n ntries = 1\n hippLR = deep_hippo( img, templateb, ntries )\n\n mydataframes = {\n \"hemispheres\":hemi,\n \"tissues\":tissue,\n \"dktlobes\":dktl,\n \"dktregions\":dktp,\n \"dktcortex\":dktc,\n \"wmtracts_left\":wmtdfL,\n \"wmtracts_right\":wmtdfR,\n \"wmh\":myhypo['wmh_summary']\n }\n\n outputs = {\n \"brain_n4_dnz\": img,\n \"brain_n4_dnz_png\": bxt_png,\n \"brain_extraction\": imgbxt,\n \"tissue_seg_png\": tissue_seg_png,\n \"rbp\": rbp,\n \"left_right\": mylr,\n \"dkt_parc\": myparc,\n \"registration\":reg,\n \"hippLR\":hippLR,\n \"white_matter_hypointensity\":myhypo,\n \"wm_tractsL\":wm_tractsL,\n \"wm_tractsR\":wm_tractsR,\n \"dataframes\": mydataframes\n }\n\n return outputs", "def label(gt_dataset, volume_dim, voxel_dim, labeling_params):\n labeled_volumes = dict()\n labeled_cells = dict()\n #Use global density and reduce the size of gt_dataset here\n global_density = labeling_params[\"global_density\"]\n gt_dataset = {k: v for k,v in gt_dataset.items() if random_sample() < global_density}\n #Label in the order specified in the configuration\n layers = sorted(labeling_params.keys())\n #Remove global_density\n layers.remove(\"global_density\")\n for layer in layers:\n print \"Labeling {}\".format(layer)\n fluorophore = labeling_params[layer]['fluorophore']\n volume, cells = brainbow(gt_dataset, volume_dim, voxel_dim, **labeling_params[layer])\n if fluorophore in labeled_volumes:\n labeled_volumes[fluorophore] += volume\n labeled_cells[fluorophore] |= cells\n else:\n labeled_volumes[fluorophore] = volume\n labeled_cells[fluorophore] = cells\n return labeled_volumes, labeled_cells", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def case():\r\n #ppc = {\"version\": '2'}\r\n ppc = {}\r\n ##----- Power Flow Data -----##\r\n ## system MVA base\r\n ppc[\"baseMVA\"] = 100.0\r\n\r\n ## bus data\r\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\r\n ppc[\"bus\"] = array([\r\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [2, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [3, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [4, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [5, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [6, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [7, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [8, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [9, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [10, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [11, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [12, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [13, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [14, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [15, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [16, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0]\r\n ])\r\n\r\n ## generator data\r\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\r\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf\r\n ppc[\"gen\"] = array([\r\n [1,\t0,\t0,\t10,\t-10,\t1.0224,\t100,\t1,\t10,\t-10,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0, 0, 0,0, 0, 0],\r\n [3 ,0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [5 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [10 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [13 ,0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [15 , 0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0]\r\n ])\r\n load_b = array([2, 4, 9, 12, 14])\r\n ppc[\"bus\"][load_b, 2] = multiply(array([-2.1125, -0.2231, -0.1664, -0.0719, -1.4633]).T, 0.03)\r\n ppc[\"bus\"][load_b, 3] = multiply(array([1.6492, 0.4054, 0.8599, 0.8845, 0.6778]).T, 0.03)\r\n ## branch data\r\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\r\n ppc[\"branch\"] = array([\r\n [1, 2, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 8, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 15, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 3, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 6, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 7, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [3, 4, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [4, 5, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 9, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 12, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 13, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 10, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 14, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [10, 11, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [15, 16, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0]\r\n ])\r\n R1 = 0.43\r\n L1 = 0.4e-3\r\n RS1 = 0.32\r\n LS1 = 0.39e-3\r\n Zbase = (0.4*0.4/100)\r\n branch_phase =array([\r\n [1, 1, 2, 188, R1, L1],\r\n [2, 1 ,8, 346, R1, L1],\r\n [3 ,1 ,15,501, R1 ,L1],\r\n [4, 2, 3, 130, RS1,LS1],\r\n [5, 2, 6, 145, RS1,LS1],\r\n [6, 2 ,7, 157, RS1,LS1],\r\n [7, 3, 4, 185, RS1,LS1],\r\n [8, 4, 5, 1000,RS1,LS1],\r\n [9, 8 ,9, 416, RS1,LS1],\r\n [10,8 ,12,130, RS1,LS1],\r\n [11,8 ,13,121, RS1,LS1],\r\n [12,9 ,10,130, RS1,LS1],\r\n [13,9 ,14,127, RS1,LS1],\r\n [14,10,11,251, RS1,LS1],\r\n [15,15,16,345, RS1,LS1]\r\n ])\r\n ppc[\"branch\"][:, [2,3]] = multiply(array([branch_phase[:, 4]*branch_phase[:, 3], branch_phase[:, 4]*branch_phase[:, 4]*100*pi]).T,0.001/Zbase)\r\n\r\n ##----- OPF Data -----##\r\n ## area data\r\n # area refbus\r\n\r\n\r\n ## generator cost data\r\n # 1 startup shutdown n x1 y1 ... xn yn\r\n # 2 startup shutdown n c(n-1) ... c0\r\n\r\n\r\n return ppc", "def _label_encoding(self):\n for feat in self.cat_feats:\n if self.train:\n lbl = preprocessing.LabelEncoder()\n lbl.fit(self.dataframe[feat].values)\n self.dataframe_d_copy.loc[:,feat] = lbl.transform(self.dataframe[feat].values)\n self.label_encoders[feat] = lbl\n else:\n lbl = self.encoders[feat]\n self.dataframe_d_copy.loc[:,feat] = lbl.transform(self.dataframe[feat].values)\n \n if self.train:\n encoder_path = f\"{self.output_path}/_label_encoder.pkl\"\n self.cat_feats_cfg['encoder_path'] = encoder_path\n joblib.dump(self.label_encoders, encoder_path)\n \n return self.dataframe_d_copy", "def convert2EbnerParamOriginalParam(listSlice,list_prefix,directory,paramAx,paramCor,paramSag):\n paramAx=np.load(paramAx)\n paramCor=np.load(paramCor)\n paramSag=np.load(paramSag)\n param=[]\n param.append(paramAx)\n param.append(paramCor)\n param.append(paramSag)\n \n images,mask = createVolumesFromAlist(listSlice.copy()) #list of images corresponding to differents original stacks\n \n \n mat = np.array([[-1,0,0,0],[0,-1,0,0],[0,0,1,0],[0,0,0,1]]) #matrix to convert affine matrix from nibabel to itk\n\n for n in range(len(images)): #for each stack\n \n imagen = images[n]\n \n for i_slice in range(len(images[n])): #for each slices (in each stacks)\n \n slicei=imagen[i_slice]\n dimension=3\n X,Y,Z= slicei.get_slice().get_fdata().shape\n transfo = param[n][slicei.get_index_slice(),:,:]\n #print()\n matrix = mat @ transfo @ mat\n #print(matrix)\n test = sitk.AffineTransform(dimension)\n test.SetMatrix(matrix[0:3,0:3].flatten())\n test.SetTranslation(matrix[0:3,3])\n images_index = slicei.get_index_image()\n\n sitk.WriteTransform(test,\"%s/%s_slice%d.tfm\" %(directory,list_prefix[images_index],slicei.get_index_slice())) #save rigid transformation, computed at the barycenter of the image, adatpted to itk", "def nodeSeparate(self,compInfo, ifSub, subname, subcktName,numNodesSub):\n node = []\n nodeTemp = []\n nodeDic = {}\n pinInit = 'Modelica.Electrical.Analog.Interfaces.Pin '\n pinProtectedInit = 'Modelica.Electrical.Analog.Interfaces.Pin '\n protectedNode = []\n print \"CompInfo coming to nodeSeparate function: compInfo\",compInfo\n \n #Removing '[' and ']' from compInfo for Digital node\n for i in range(0,len(compInfo),1):\n compInfo[i] = compInfo[i].replace(\"[\",\"\").replace(\"]\",\"\")\n \n \n for eachline in compInfo:\n words = eachline.split()\n if eachline[0] in ['m', 'e', 'g', 't','M','E','G','T']:\n nodeTemp.append(words[1])\n nodeTemp.append(words[2])\n nodeTemp.append(words[3])\n nodeTemp.append(words[4])\n elif eachline[0] in ['q', 'j','J','Q']:\n nodeTemp.append(words[1])\n nodeTemp.append(words[2])\n nodeTemp.append(words[3])\n elif eachline[0]=='x' or eachline[0]=='X':\n templine = eachline.split()\n for i in range(0,len(templine),1):\n if templine[i] in subcktName:\n point = i \n nodeTemp.extend(words[1:point])\n else:\n nodeTemp.append(words[1])\n nodeTemp.append(words[2])\n for i in nodeTemp:\n if i not in node:\n node.append(i)\n \n for i in range(0, len(node),1):\n nodeDic[node[i]] = 'n' + node[i]\n if ifSub == '0':\n if i != len(node)-1:\n pinInit = pinInit + nodeDic[node[i]] + ', '\n else:\n pinInit = pinInit + nodeDic[node[i]]\n else:\n nonprotectedNode = self.getSubInterface(subname, numNodesSub) \n if node[i] in nonprotectedNode:\n continue\n else:\n protectedNode.append(node[i])\n if ifSub == '1':\n if len(nonprotectedNode) > 0:\n for i in range(0, len(nonprotectedNode),1):\n if i != len(nonprotectedNode)-1:\n pinProtectedInit = pinProtectedInit + nodeDic[nonprotectedNode[i]] + ','\n else:\n pinProtectedInit = pinProtectedInit + nodeDic[nonprotectedNode[i]]\n if len(protectedNode) > 0:\n for i in range(0, len(protectedNode),1):\n if i != len(protectedNode)-1: \n pinInit = pinInit + nodeDic[protectedNode[i]] + ','\n else:\n pinInit = pinInit + nodeDic[protectedNode[i]]\n pinInit = pinInit + ';'\n pinProtectedInit = pinProtectedInit + ';'\n print \"Node---->\",node\n print \"nodeDic----->\",nodeDic\n print \"PinInit----->\",pinInit\n print \"pinProtectedinit--->\",pinProtectedInit\n return node, nodeDic, pinInit, pinProtectedInit", "def __repr__(self):\n data = sorted(\n (state, pos, tape_cache, outputs)\n for pos, states in self.iteritems()\n for state, (tape_cache, outputs) in states.iteritems())\n branch = \"branch\" if len(data) == 1 else \"branches\"\n result = \"process (%s %s)\" % (len(data), branch)\n for s, sdata in itertools.groupby(data, lambda x: x[0]):\n result += \"\\n+ at state %s\" % (s,)\n for state, pos, tape_cache, outputs in sdata:\n result += \"\\n+-- %s, %s\" % (tape_cache, outputs)\n return result", "def _vVBEL(self,vKNOT=None,edges=vVBEL_edges,edgesD=vVBEL_edgesD,mColNames=['OBJTYPE','pk'],mIdxNames=['OBJTYPE','OBJID']):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n # construct \r\n vVBEL=None\r\n vVBEL_UnionList=[]\r\n\r\n for VBEL in edges:\r\n if VBEL in self.dataFrames:\r\n vXXXX=self._vVBEL_XXXX(vKNOT=vKNOT,OBJTYPE=VBEL)\r\n if vXXXX is None:\r\n pass\r\n else:\r\n vVBEL_UnionList.append(vXXXX)\r\n vVBEL=pd.concat(vVBEL_UnionList)\r\n\r\n # MIndices\r\n vVBEL=Xm.constructNewMultiindexFromCols(df=vVBEL,mColNames=mColNames,mIdxNames=mIdxNames)\r\n\r\n # Gruppenzugeh. ergaenzen\r\n vVBEL['LAYR']=[list() for dummy in vVBEL['tk']]\r\n dfLayr=self.dataFrames['vLAYR']\r\n if not dfLayr.empty:\r\n dfLayr=dfLayr.rename(columns={'OBJTYPE':'TYPE'}) \r\n dfLayr=pd.merge(\r\n vVBEL\r\n ,dfLayr\r\n ,how='inner' # nur die VBEL die eine Gruppenzugehoerigkeit haben\r\n ,left_index=True \r\n ,right_on=['TYPE','OBJID'] \r\n ,suffixes=('', '_y'))[['NAME','TYPE','OBJID','nrObjInGroup','nrObjtypeInGroup']]\r\n dfLayr=dfLayr[dfLayr.nrObjInGroup <= 1] # pro VBEL und Gruppe nur 1 Zeile\r\n\r\n for index, row in vVBEL.merge(dfLayr.sort_values(by=['NAME','OBJID']),how='left',left_index=True ,right_on=['TYPE','OBJID'],suffixes=('', '_y')).iterrows(): \r\n if pd.isnull(row.NAME):\r\n continue\r\n row.LAYR.append(row.NAME)\r\n\r\n # L ergaenzen\r\n Rohr=self.dataFrames['ROHR']\r\n VbelL=vVBEL.join(Rohr.set_index('pk').rename_axis('OBJID', axis='index'),rsuffix='_y')[['L']] \r\n vVBEL['L']=VbelL['L'].fillna(0) \r\n\r\n # D ergaenzen\r\n # Spalte erzeugen ... \r\n vRohr=self.dataFrames['vROHR']\r\n VbelD=vVBEL.join(vRohr.set_index('pk').rename_axis('OBJID', axis='index'),rsuffix='_y')[['DI']]\r\n vVBEL['D']=VbelD['DI'] # ... mit ROHR\r\n\r\n # ueber alle ausser ROHR\r\n for eIdx,edge in enumerate(edges):\r\n if edge == 'ROHR':\r\n continue\r\n edgeDCol=edgesD[eIdx]\r\n if edgeDCol=='':\r\n continue \r\n if edge not in self.dataFrames:\r\n continue\r\n Edge=self.dataFrames[edge] \r\n if edgeDCol not in Edge.columns.tolist():\r\n continue\r\n edgeD=vVBEL.join(Edge.set_index('pk').rename_axis('OBJID', axis='index'),rsuffix='_y',how='inner')[[edgeDCol]]\r\n vVBEL.loc[[edge],'D']=edgeD.loc[[edge],:].values\r\n\r\n\r\n # fehlende Spaltenwerte zuweisen\r\n #Vent=self.dataFrames['VENT']\r\n #VentD=vVBEL.join(Vent.set_index('pk'),rsuffix='_y',how='inner')[['DN']]\r\n #vVBEL.loc[['VENT'],'D']=VentD.loc[['VENT'],:].values\r\n\r\n # Finish\r\n vVBEL.sort_index(level=0,inplace=True)\r\n\r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.debug(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))\r\n return vVBEL", "def apply_grub_cmdline(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Get the isolated CPUs\n other_workers = node[\"cpu\"][\"other_workers\"]\n vpp_workers = node[\"cpu\"][\"vpp_workers\"]\n if \"vpp_main_core\" in node[\"cpu\"]:\n vpp_main_core = node[\"cpu\"][\"vpp_main_core\"]\n else:\n vpp_main_core = 0\n all_workers = []\n if other_workers is not None:\n all_workers = [other_workers]\n if vpp_main_core != 0:\n all_workers += [(vpp_main_core, vpp_main_core)]\n all_workers += vpp_workers\n isolated_cpus = \"\"\n for idx, worker in enumerate(all_workers):\n if worker is None:\n continue\n if idx > 0:\n isolated_cpus += \",\"\n if worker[0] == worker[1]:\n isolated_cpus += \"{}\".format(worker[0])\n else:\n isolated_cpus += \"{}-{}\".format(worker[0], worker[1])\n\n vppgrb = VppGrubUtil(node)\n current_cmdline = vppgrb.get_current_cmdline()\n if \"grub\" not in node:\n node[\"grub\"] = {}\n node[\"grub\"][\"current_cmdline\"] = current_cmdline\n node[\"grub\"][\"default_cmdline\"] = vppgrb.apply_cmdline(node, isolated_cpus)\n\n self.updateconfig()" ]
[ "0.53284836", "0.52070326", "0.50032526", "0.4924041", "0.48997957", "0.48227805", "0.4819547", "0.4782699", "0.47695082", "0.4746857", "0.4743185", "0.47308764", "0.4726845", "0.46611047", "0.46597615", "0.463954", "0.46242067", "0.46234703", "0.45942166", "0.45858887", "0.4571889", "0.45699766", "0.45352846", "0.45307684", "0.45059124", "0.44992357", "0.4491267", "0.44876143", "0.44860741", "0.4449634", "0.4436323", "0.4420826", "0.4415709", "0.44151866", "0.44132257", "0.44119504", "0.44111606", "0.4409967", "0.4408976", "0.44019982", "0.44017988", "0.44003794", "0.43899447", "0.43819338", "0.43688822", "0.4354956", "0.43437353", "0.43380022", "0.43350187", "0.43290702", "0.4328562", "0.43279904", "0.43196777", "0.43035686", "0.43007937", "0.43005908", "0.42981583", "0.42862144", "0.42832178", "0.42798436", "0.42769793", "0.42730308", "0.42673436", "0.42671657", "0.42671657", "0.42641827", "0.42627034", "0.42608324", "0.42561105", "0.42515945", "0.42507425", "0.42477784", "0.42443958", "0.424284", "0.4238558", "0.42339242", "0.42317352", "0.42286026", "0.4222909", "0.42208576", "0.4217499", "0.4213684", "0.42129332", "0.42101938", "0.4209228", "0.42056566", "0.42052203", "0.42037198", "0.41966754", "0.4193763", "0.41932732", "0.41926092", "0.41910368", "0.419061", "0.41901353", "0.4189429", "0.41860756", "0.4182849", "0.41811472", "0.41706634" ]
0.61593163
0
dijet label. supports dijet cuts, and cuts on particpating jets
def _make_dijet_label(chain_parts): assert len(chain_parts) == 1 scenario = chain_parts[0]['hypoScenario'] assert scenario.startswith('dijet') arg_res = [ re.compile(r'^(?P<lo>\d*)(?P<key>djmass)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j1et)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j1eta)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j2et)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>j2eta)(?P<hi>\d*)$'), ] defaults = { 'j1et': ('100', 'inf'), 'j2et': ('100', 'inf'), 'j1eta': ('0', '320'), 'j2eta': ('0', '320'), 'djmass': ('1000', 'inf'), } args = _args_from_scenario(scenario) argvals = {} while args: assert len(args) == len(arg_res) arg = args.pop() for r in arg_res: m = r.match(arg) if m is not None: arg_res.remove(r) gd = m.groupdict() key = gd['key'] try: lo = float(gd['lo']) except ValueError: lo = defaults[key][0] argvals[key+'lo'] = lo try: hi = float(gd['hi']) except ValueError: hi = defaults[key][1] argvals[key+'hi'] = hi assert len(args) == len(arg_res) assert len(args) == 0 return """ combgen( [(2)(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f) (%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f) ] dijet( [(%(djmasslo).0fdjmass)]) simple([(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f) (%(j2etlo).0fet, %(j2etalo).0feta%(j2etahi).0f)]) )""" % argvals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_info(config, cut, label):\n cfg = filter(lambda c: c['name'] == cut, config['physics']['cuts'])[0]\n text = \"\"\n if 'max' not in cfg:\n text += \"#geq \"\n text += str(cfg['min'])\n if 'max' in cfg and cfg['max'] != cfg['min']:\n text += '-' + str(cfg['max']) + ' ' + label + 's'\n elif cfg['min'] != 1:\n text += ' ' + label + 's'\n else:\n text += ' ' + label\n return text", "def makeDPartial( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n _Kcuts1 = \"~ISMUON & (PT > %(DaugPtLoose)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2Loose)s)\" % locals()['config']\n _KcutsPIDK = \" & (PIDK > %(HighPIDK)s)\" % locals()['config']\n _Kcuts2 = \" & (ISLONG) & (P > %(DaugPLoose)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2Loose)s)\" % locals()['config']\n _Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2\n _Picuts1 = \"~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)\" % locals()['config']\n _PicutsPIDK = \" & (PIDK < %(LowPIDK)s)\" % locals()['config']\n _Picuts2 = \" & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)\" % locals()['config']\n _Picuts = _Picuts1 + _PicutsPIDK + _Picuts2\n _dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts }\n #_Kcuts1 = \"~ISMUON & (PT > 500* MeV) & (MIPCHI2DV(PRIMARY) > 4)\"\n #_KcutsPIDK = \" & (PIDK > 5)\"\n #_Kcuts2 = \" & (ISLONG) & (P > 5000* MeV) & (TRCHI2DOF < 5)\"\n #_Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2\n #_Picuts1 = \"~ISMUON & (PT > 500* MeV) & (MIPCHI2DV(PRIMARY) > 4)\"\n #_PicutsPIDK = \" & (PIDK < 0)\"\n #_Picuts2 = \" & (ISLONG) & (P > 5000* MeV) & (TRCHI2DOF < 5)\"\n #_Picuts = _Picuts1 + _PicutsPIDK + _Picuts2\n #_dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts }\n\n _combCuts = \"(APT > %(D0PtLoose)s* MeV)\" \\\n \"& (AP > %(D0P)s* MeV)\" % locals()['config']\n\n _motherCuts = \"(VFASPF(VCHI2PDOF) < %(D0VtxChi2Ndof)s)\" \\\n \"& (BPVVDCHI2 > %(D0FDChi2)s)\" % locals()['config']\n\n\n _Dminus = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = _dauCuts\n , CombinationCut = _combCuts\n , MotherCut = _motherCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dminus,\n RequiredSelections = inputSel\n )", "def x_group_label(\n x_gr: int, cut: int = 20, name_dict: Dict[AnyStr, AnyStr] = names_dict\n) -> AnyStr:\n name = name_dict[str(x_gr)]\n if len(name) > cut:\n return f\"{name[:cut-3]}...\"\n else:\n return name", "def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')", "def __init__(self, data_cfg, pipeline_cfg, root_path, sel_index=0):\n\n super(DetRetailOneDataset, self).__init__(\n data_cfg, pipeline_cfg, root_path, sel_index\n )\n\n self.cat2label = {cat: i for i, cat in enumerate(self.class_names)}\n self.ORI_CLASSES = (\n \"asamu\",\n \"baishikele\",\n \"baokuangli\",\n \"aoliao\",\n \"bingqilinniunai\",\n \"chapai\",\n \"fenda\",\n \"guolicheng\",\n \"haoliyou\",\n \"heweidao\",\n \"hongniu\",\n \"hongniu2\",\n \"hongshaoniurou\",\n \"kafei\",\n \"kaomo_gali\",\n \"kaomo_jiaoyan\",\n \"kaomo_shaokao\",\n \"kaomo_xiangcon\",\n \"kele\",\n \"laotansuancai\",\n \"liaomian\",\n \"lingdukele\",\n \"maidong\",\n \"mangguoxiaolao\",\n \"moliqingcha\",\n \"niunai\",\n \"qinningshui\",\n \"quchenshixiangcao\",\n \"rousongbing\",\n \"suanlafen\",\n \"tangdaren\",\n \"wangzainiunai\",\n \"weic\",\n \"weitanai\",\n \"weitaningmeng\",\n \"wulongcha\",\n \"xuebi\",\n \"xuebi2\",\n \"yingyangkuaixian\",\n \"yuanqishui\",\n \"xuebi-b\",\n \"kebike\",\n \"tangdaren3\",\n \"chacui\",\n \"heweidao2\",\n \"youyanggudong\",\n \"baishikele-2\",\n \"heweidao3\",\n \"yibao\",\n \"kele-b\",\n \"AD\",\n \"jianjiao\",\n \"yezhi\",\n \"libaojian\",\n \"nongfushanquan\",\n \"weitanaiditang\",\n \"ufo\",\n \"zihaiguo\",\n \"nfc\",\n \"yitengyuan\",\n \"xianglaniurou\",\n \"gudasao\",\n \"buding\",\n \"ufo2\",\n \"damaicha\",\n \"chapai2\",\n \"tangdaren2\",\n \"suanlaniurou\",\n \"bingtangxueli\",\n \"weitaningmeng-bottle\",\n \"liziyuan\",\n \"yousuanru\",\n \"rancha-1\",\n \"rancha-2\",\n \"wanglaoji\",\n \"weitanai2\",\n \"qingdaowangzi-1\",\n \"qingdaowangzi-2\",\n \"binghongcha\",\n \"aerbeisi\",\n \"lujikafei\",\n \"kele-b-2\",\n \"anmuxi\",\n \"xianguolao\",\n \"haitai\",\n \"youlemei\",\n \"weiweidounai\",\n \"jindian\",\n \"3jia2\",\n \"meiniye\",\n \"rusuanjunqishui\",\n \"taipingshuda\",\n \"yida\",\n \"haochidian\",\n \"wuhounaicha\",\n \"baicha\",\n \"lingdukele-b\",\n \"jianlibao\",\n \"lujiaoxiang\",\n \"3+2-2\",\n \"luxiangniurou\",\n \"dongpeng\",\n \"dongpeng-b\",\n \"xianxiayuban\",\n \"niudufen\",\n \"zaocanmofang\",\n \"wanglaoji-c\",\n \"mengniu\",\n \"mengniuzaocan\",\n \"guolicheng2\",\n \"daofandian1\",\n \"daofandian2\",\n \"daofandian3\",\n \"daofandian4\",\n \"yingyingquqi\",\n \"lefuqiu\",\n )", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def create_labelled_dataset(self):\n\n print(\"-------------------------------------------------------------------\")\n print(\" How to Use the Pole Hull Label Tool\")\n print(\"-------------------------------------------------------------------\")\n print(\"- If a hull is NOT associated to a pole: press the 1 button\")\n print(\"- If a hull IS associated to a pole: press the 2 button\")\n print(\"\\n- If any other key is pressed, the program EXITS\")\n print(\"-------------------------------------------------------------------\")\n\n detector = gate_detector.GateDetector(im_resize=3.0/4)\n\n imgs = []\n labels = []\n directory = os.path.dirname(os.getcwd())\n \n # Get absolute path of all images in the images folder\n for dirpath,_,filenames in os.walk(os.path.join(directory, 'images', 'gate')):\n for f in filenames:\n imgs.append(os.path.abspath(os.path.join(dirpath, f)))\n\n # Get the hulls from the segmented image and run the display and label program for each image\n for img in imgs:\n src = cv.imread(img, 1)\n pre = detector.preprocess(src)\n seg = detector.segment(pre)\n mor = detector.morphological(seg)\n hulls = detector.create_convex_hulls(seg)\n labels += self.display_and_label_hulls(hulls, pre)\n return labels", "def setContourLabels(mode='none', ndigits=1):\n odict = {'none':'NONE', 'float':'FLOAT', 'string':'CONLAB'}\n dislin.labdig(ndigits, 'CONTUR')\n dislin.labels(odict[mode], 'CONTUR')", "def _add_labels(self):\n coords = self['pore.coords']\n self['pore.front'] = coords[:,0]<(0.1*self._Lx)\n self['pore.back'] = coords[:,0]>(0.9*self._Lx)\n self['pore.left'] = coords[:,1]<(0.1*self._Ly)\n self['pore.right'] = coords[:,1]>(0.9*self._Ly)\n self['pore.bottom'] = coords[:,2]<(0.1*self._Lz)\n self['pore.top'] = coords[:,2]>(0.9*self._Lz)\n bnds = self.pores(labels=['front','back','left','right','bottom','top'])\n self['pore.boundary'] = False\n self['pore.boundary'] = bnds", "def plot_data_assemble(self,kwargs_seg, add_mask ,img_name='data.pdf',cutout_text='lensed image',font_size=28):\n mask = self.data_mask\n image = self.raw_image\n picked_data = self.data\n selem = np.ones((add_mask, add_mask))\n img_mask = ndimage.binary_dilation(mask.astype(np.bool), selem)\n fig, (ax1, ax2, ax3,ax4) = plt.subplots(1, 4, figsize=(19, 10))\n ax1.imshow(image, origin='lower', cmap=\"gist_heat\")\n ax1.set_title('Cutout Image',fontsize =font_size)\n ax1.text(image.shape[0] * 0.2, image.shape[0] * 0.05, cutout_text,size=20, color='white',weight=\"bold\")\n ax1.axis('off')\n segments_deblend_list, xcenter, ycenter, c_index=kwargs_seg\n ax2.imshow(segments_deblend_list, origin='lower')\n for i in range(len(xcenter)):\n ax2.text(xcenter[i] * 1.1, ycenter[i], 'Seg' + repr(i), size=20,color='w',weight=\"bold\")\n ax2.text(image.shape[0] * 0.2, image.shape[0] * 0.9, 'Seg' + repr(c_index) + ' ' + 'in center',\n size=20, color='white',weight=\"bold\")\n ax2.set_title('Segmentations',fontsize =font_size)\n ax2.axis('off')\n ax3.imshow(img_mask+mask, origin='lower',cmap=\"gist_heat\")\n ax3.set_title('Selected pixels',fontsize =font_size)\n ax3.text(image.shape[0] * 0.1, image.shape[0] * 0.05, 'pixels (S/N >' + repr(self.snr) + ')',size=20, color='white',weight=\"bold\")\n ax3.text(image.shape[0] * 0.1, image.shape[0] * 0.9, 'additional pixels', size=20, color='r',weight=\"bold\")\n ax3.axis('off')\n ax4.imshow(picked_data, origin='lower',cmap=\"gist_heat\")\n ax4.set_title('Processed Image',fontsize =font_size)\n ax4.axis('off')\n plt.show()\n fig.savefig(img_name)\n return 0", "def BaseLabel(self, *args):\n return _XCAFDoc.XCAFDoc_DimTolTool_BaseLabel(self, *args)", "def main():\n # Directory where the DICOM files are being stored (in this\n input_path = './Inputs/valve'\n\n # Original image from the filepath\n img_original = read_image(input_path)\n\n # Image with smoothing applied to reduce noise\n img_smooth = sitk.CurvatureFlow(image1=img_original, timeStep=0.125, numberOfIterations=10)\n\n # Create labels on our smoothed image for cardiac tissue and tissue with blood\n labels_tissue = sitk.BinaryThreshold(image1=img_smooth, lowerThreshold=325, upperThreshold=470, insideValue=1)\n labels_blood = sitk.BinaryThreshold(image1=img_smooth, lowerThreshold=450, upperThreshold=800, insideValue=1, outsideValue=0)\n\n # IMPORTANT STEP: essentially, this is the key to our algorithm. By finding the \"blood\" without cardiac tissue,\n # and then using binary hole filling with a fairly large radius, we are able to label a lot of the mitral valve\n # area without labeling too much of the other cardiac tissue. Thus, THIS is what lets us single out the mitral\n # valve tissue from the rest - all we need is the overlap of the two labels\n labels_tissue_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_tissue, radius=[2] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1)\n labels_blood_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_blood, radius=[4] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1)\n labels_valve = retrieve_overlap(labels_blood_no_holes, labels_tissue_no_holes)\n labels_valve_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_valve, radius=[2] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1)\n labels_valve_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_valve_no_holes, radius=[1] * 3, majorityThreshold=0, backgroundValue=1, foregroundValue=0)\n\n # Fix intensity scaling on our original smoothed image for pretty diagram purposes\n img_smooth = sitk.Cast(sitk.RescaleIntensity(img_smooth), labels_tissue_no_holes.GetPixelID())\n\n # Use a density-based clustering algorithm to attempt to remove as much noise as possible\n labels_valve_filtered = dbscan_filter(labels_valve_no_holes, eps=2, use_z=False)\n labels_valve_filtered = dbscan_filter(labels_valve_filtered, eps=4)\n\n # Find likely start and end points of our image by setting a mininum number of labeled pixels\n start, end = filter_by_label_count(labels_valve_filtered, 10)\n img_smooth = img_smooth[:, :, start:end]\n labels_valve_filtered = labels_valve_filtered[:, :, start:end]\n\n # Remove all values distant from the center of our starting location by taking advantage of kmeans\n df = get_df_from_img(labels_valve_filtered[:, :, 0], dimensions=2)\n x_mid = df['x'].mean()\n y_mid = df['y'].mean()\n df = get_df_from_img(labels_valve_filtered)\n distance_df = df.drop('z', axis=1)\n distance_df['x_dist'] = abs(distance_df['x'] - x_mid)\n distance_df['y_dist'] = abs(distance_df['y'] - y_mid)\n fit = cluster.KMeans(n_clusters=2).fit(distance_df.drop(['x', 'y'], axis=1))\n labels = fit.labels_\n df['label'] = pd.Series(labels)\n counts = df['label'].value_counts().to_dict()\n largest_cluster = max(counts.iterkeys(), key=(lambda key: counts[key]))\n update_img_from_df(df, labels_valve_filtered, keep=largest_cluster)\n\n # Find likely start and end points of our image by setting a mininum number of labeled pixels\n start, end = filter_by_label_count(labels_valve_filtered, 10)\n img_smooth = img_smooth[:, :, start:end]\n labels_valve_filtered = labels_valve_filtered[:, :, start:end]\n\n # Use a segmentation-based clustering algorithm to attempt to find each valve\n label_segments, x_max = kmeans_segment(labels_valve_filtered, use_z=False)\n\n left, right = (label_segments[0], label_segments[1])\n if x_max[0] > x_max[1]:\n left, right = right, left\n\n # Finally, we can simply take the furthest point from the likely start/end points in order to get our annulus\n # this can be done by every z value\n left_points = {'x': [], 'y': [], 'z': []}\n right_points = {'x': [], 'y': [], 'z': []}\n zlen = len(sitk.GetArrayFromImage(left))\n for z in xrange(zlen):\n left_df = get_df_from_img(left[:, :, z], dimensions=2)\n if len(left_df['y']) > 0:\n index = left_df['y'].idxmin()\n row = left_df.iloc[index]\n left_points['x'].append(int(row['x']))\n left_points['y'].append(int(row['y']))\n left_points['z'].append(z)\n\n right_df = get_df_from_img(right[:, :, z], dimensions=2)\n if len(right_df['x']) > 0:\n index = right_df['x'].idxmax()\n row = right_df.iloc[index]\n right_points['x'].append(int(row['x']))\n right_points['y'].append(int(row['y']))\n right_points['z'].append(z)\n\n # These both represent the coordinates of our annulus ring. A simple spline can be used for interpolation between\n # points\n final_left = pd.DataFrame.from_dict(left_points)\n final_right = pd.DataFrame.from_dict(right_points)\n print('Coordinates for one side of the ring')\n print(final_left)\n print('\\n\\nCoordinates for the other side of the ring')\n print(final_right)\n\n final_image = make_empty_img_from_img(left)\n x = left_points['x'] + right_points['x']\n y = left_points['y'] + right_points['y']\n z = left_points['z'] + right_points['z']\n for x, y, z in zip(x, y, z):\n final_image.SetPixel(x, y, z, 1)\n\n show_all(img_smooth, final_image)", "def label(self, cfg):\n rep = \"\"\n nl = \"\"\n for node in cfg.nodes:\n rep += nl + \"{}\\tgen={}\\tkill={}\\tout={}\".format(\n node, \n set(self.gen.get(node)),\n set(self.kill.get(node)),\n set(self.out.get(node)))\n nl = \"\\n\"\n return rep", "def test_get_dim_label_with_label(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][0]\n dims_df = pyjstat.get_dim_label(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == 'UNR')\n self.assertTrue(dims_df.iloc[-1]['label'] == 'Unemployment rate')", "def LabelDisks(self):\n pass", "def visualize_detection(self, img, dets, seg, classes=[], thresh=0.6):\n from dataset.cs_labels import labels\n lut = np.zeros((256,3))\n for l in labels:\n if l.trainId<255 and l.trainId>=0:\n lut[l.trainId,:]=list(l.color)\n palette = lut\n # det2seg = {0:6,1:7,2:11,3:12,4:13,5:14,6:15,7:16,8:17,9:18,}\n det2seg = {0:11,1:12,2:13,3:14,4:15,5:16,6:17,7:18,}\n \n import cv2\n import random\n tic = time.time()\n color_white = (255, 255, 255)\n im = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # change to bgr\n yscale, xscale, ch = im.shape\n color = (0,0,128)\n fontFace = cv2.FONT_HERSHEY_PLAIN\n fontScale = .8*(yscale/float(320))\n thickness = 2 if yscale>320 else 1\n idx = np.argsort(dets[:,6],axis=0)[::-1] ## draw nearest first !!\n dets = dets[idx,:]\n for det in dets:\n cls_id = int(det[0])\n bbox = [det[2]*xscale,det[3]*yscale,det[4]*xscale,det[5]*yscale]\n score = det[1]\n distance = det[-1]\n if score > thresh:\n bbox = map(int, bbox)\n color = palette[det2seg[int(det[0])],(2,1,0)]\n cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=thickness)\n text = '%s %.0fm' % (short_class_name[classes[cls_id]], distance*255., )\n textSize, baseLine = cv2.getTextSize(text, fontFace, fontScale, thickness=1)\n cv2.rectangle(im, (bbox[0], bbox[1]-textSize[1]), (bbox[0]+textSize[0], bbox[1]), color=(128,0,0), thickness=-1)\n cv2.putText(im, text, (bbox[0], bbox[1]),\n color=color_white, fontFace=fontFace, fontScale=fontScale, thickness=1)\n disp = im.copy()\n if False: #disp.shape[1]>1000:\n hh, ww, ch = disp.shape\n resized = cv2.resize(disp, (int(round(ww*.92)),int(round(hh*.92))))\n else:\n resized = disp\n cv2.imshow(\"result\", resized)\n # cv2.imwrite(\"data/cityscapes/Results/stuttgart_%06d.png\" % (self.imgidx,), resized)\n # self.imgidx += 1", "def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()", "def convert_kicad_coor(edif_pt):\n scale = 10\n return [edif_pt[0] * scale, +edif_pt[1] * scale]", "def create_dimension_labels(gll, parameters: list):\n dimstr = '[ ' + ' | '.join(parameters) + ' ]'\n gll['MODEL/data'].dims[0].label = 'element'\n gll['MODEL/data'].dims[1].label = dimstr\n gll['MODEL/data'].dims[2].label = 'point'", "def add_hdv(self, ROI_id, type_hdv='cum', checkbox_mode=False):\n\n appartenance_contourage = self.dicom_navigation.slice.get_appartenance_contourage(ROI_id)\n \n contourage = Contourage_from_matrice(appartenance_contourage, ROI_id) # On crée un objet 'Contourage_from_matrice' à partir du de la matrice booléenne\n\n dose_matrix = self.dicom_navigation.slice.get_dose_matrix()\n\n # Cas ou on ajoute pour la premiere fois un contourage\n if dose_matrix is None:\n return\n \n doses = Doses_from_matrice(dose_matrix) # On crée un objet 'Doses_from_matrice' à partir de la matrice de doses mise à jour\n\n var = tk.StringVar() # À VENIR... VARIABLE D'ÉTAT QUI INDIQUE SI ON EST EN MODE 'VOLUME RELATF' OU 'VOLUME ABSOLU'. CODÉ EN DUR POUR LE MOMENT\n var.set('r')\n\n self.ddc = Doses_dans_contourage(doses, contourage) # Triage des doses qui sont dans le contourage.\n\n if self.ddc.dose_max == 0: # Si la dose max est 0, on sait qu'on est à l'extérieur de la zone réduite. *** \n return\n\n if not ROI_id in self.dict_graph: \n self.dict_graph[ROI_id] = {} \n self.dict_plot[ROI_id] = {} \n self.dict_doses_max[ROI_id] = {} \n if self.dicom_navigation.var_etat_abs_rel.get() == 'a':\n self.dict_volumes_max[ROI_id] = {} \n\n self.dict_doses_max[ROI_id][type_hdv] = self.ddc.dose_max\n\n ###\n\n if self.dicom_navigation.var_etat_abs_rel.get() == 'r': # si on est en mode 'volume relatif', le range des axes sera définit différemment\n facteur = 100.0/self.ddc.nb_voxels # comme l'instance 'axe_volume' créée par les classes hdv_cumulatif et hdv_differentiel contient des données en NOMBRE DE VOXELS\n # (et non en pourcentage ou en volume réel), il faut multiplier ces données par le facteur de conversion approprié (il dépend\n # de si l'on est en mode 'relatf' ou 'absolu').\n\n if self.dicom_navigation.var_etat_abs_rel.get() == 'a': # si on est en mode 'volume absolu'.\n facteur = self.ddc.v_voxel\n self.dict_volumes_max[ROI_id][type_hdv] = self.ddc.v_voxel * self.ddc.nb_voxels \n self.y_lim = get_max_2D_dic(self.dict_volumes_max)\n\n ###\n\n if type_hdv == 'cum':\n hdv = HDV_cumulatif(self.ddc, 100)\n\n if type_hdv == 'diff':\n hdv = HDV_differentiel(self.ddc, 50)\n\n\n self.dict_graph[ROI_id][type_hdv] = hdv\n self.dict_plot[ROI_id][type_hdv], = self.fig.plot(hdv.axe_doses, facteur * hdv.axe_volume)\n\n ###\n\n self.x_lim = get_max_2D_dic(self.dict_doses_max) \n\n self.fig.set_xlim([0, 1.02*self.x_lim]) # dimension de l'axe des x\n self.fig.set_ylim([0, 1.02*self.y_lim]) # dimension de l'axe des y\n\n # Contraintes\n if self.got_contraintes and type_hdv == 'cum': # 'got_contraintes' SERA INITALISÉE À 'TRUE' LORSQUE L'ON AURA RÉCUPÉRÉ LE FICHIER DE CONTRAINTES\n self.dicom_navigation.get_dicom_contraintes().verifier_contraintes_sur_une_ROI(ROI_id)\n\n # Modifier\n if checkbox_mode:\n self.refresh_HDV()", "def instance_label(task, pred, k=15, n_iters=1, dist_thresh=5, watershed=False):\n mask = pred\n\n # noise removal\n if k > 1 and n_iters > 0:\n kernel = np.ones((k, k), np.uint8)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,\n iterations=n_iters)\n\n if watershed:\n from clab.live import filters\n mask = filters.watershed_filter(mask, dist_thresh=dist_thresh)\n\n mask = mask.astype(np.uint8)\n n_ccs, cc_labels = cv2.connectedComponents(mask, connectivity=4)\n return cc_labels", "def old_ideal_label(I):\n a, c, d = ideal_HNF(I)\n return \"%s.%s.%s\" % (a * d, c, d)", "def setContourLabelString(text=''):\n dislin.conlab(text)", "def label(cmd):\r\n cmd = cmd.replace('make][.DP', 'make1][.NP')\r\n cmd = cmd.replace('make][.SC', 'make2][.SC')\r\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\r\n cmd = '[result ' + cmd + ']' #dummy function for plop\r\n return cmd", "def label(gt_dataset, volume_dim, voxel_dim, labeling_params):\n labeled_volumes = dict()\n labeled_cells = dict()\n #Use global density and reduce the size of gt_dataset here\n global_density = labeling_params[\"global_density\"]\n gt_dataset = {k: v for k,v in gt_dataset.items() if random_sample() < global_density}\n #Label in the order specified in the configuration\n layers = sorted(labeling_params.keys())\n #Remove global_density\n layers.remove(\"global_density\")\n for layer in layers:\n print \"Labeling {}\".format(layer)\n fluorophore = labeling_params[layer]['fluorophore']\n volume, cells = brainbow(gt_dataset, volume_dim, voxel_dim, **labeling_params[layer])\n if fluorophore in labeled_volumes:\n labeled_volumes[fluorophore] += volume\n labeled_cells[fluorophore] |= cells\n else:\n labeled_volumes[fluorophore] = volume\n labeled_cells[fluorophore] = cells\n return labeled_volumes, labeled_cells", "def make_data_label(self):\n data_label = \"\"\n if self.detector is not None:\n data_label += \"%s \"%self.detector\n if self.selection is not None:\n data_label += \"%s Event Selection\"%self.selection\n if data_label == \"\":\n data_label = \"IceCube\"\n return data_label", "def write_label(self, contig_name, width, height, font, title_width, upper_left, vertical_label,\n strand, canvas, horizontal_centering=False, center_vertical=False, chop_text=True,\n label_color=(50, 50, 50, 255)):\n upper_left = list(upper_left) # to make it mutable\n shortened = contig_name[-title_width:] # max length 18. Last characters are most unique\n txt = Image.new('RGBA', (width, height))#, color=(0,0,0,50))\n txt_canvas = ImageDraw.Draw(txt)\n text_width = txt_canvas.textsize(shortened, font)[0]\n if not chop_text and text_width > width:\n txt = Image.new('RGBA', (text_width, height)) # TODO performance around txt_canvas\n txt_canvas = ImageDraw.Draw(txt)\n if center_vertical or vertical_label: # Large labels are centered in the column to look nice,\n # rotation indicates strand in big text\n vertically_centered = (height // 2) - multi_line_height(font, shortened, txt)//2\n else: # Place label at the beginning of gene based on strand\n vertically_centered = height - multi_line_height(font, shortened, txt) # bottom\n if strand == \"+\":\n vertically_centered = 0 # top of the box\n txt_canvas.multiline_text((0, max(0, vertically_centered)), shortened, font=font,\n fill=label_color)\n if vertical_label:\n rotation_direction = 90 if strand == '-' else -90\n txt = txt.rotate(rotation_direction, expand=True)\n upper_left[1] += -4 if strand == '-' else 4\n if horizontal_centering:\n margin = width - text_width\n upper_left[0] += margin // 2\n canvas.paste(txt, (upper_left[0], upper_left[1]), txt)", "def convert_medical_decathlon_labels(mask, cohort, keep_all_label=False):\n label = 12*[0]\n if keep_all_label:\n label += [0,0]\n \n if cohort == 'liver':\n mask[mask == 2] = 6\n mask[mask == 1] = 6\n label[6] = 1\n \n elif cohort == 'pancreas':\n mask[mask == 2] = 11\n mask[mask == 1] = 11\n label[11] = 1\n\n elif cohort == 'spleen':\n mask[mask != 1] = 0\n label[1] = 1 \n\n elif cohort == 'hepatic':\n mask[mask == 2] = 0\n mask[mask == 1] = 0\n \n return mask, label", "def get_labels(self):\n if self.option == \"term\":\n return ['platform characteristics', 'atmospheric winds', 'radio wave','weather events', 'geomagnetism', 'atmospheric electricity','microwave', 'atmospheric temperature', 'atmospheric water vapor','atmospheric pressure', 'aerosols', 'atmospheric radiation','atmospheric chemistry', 'precipitation', 'sensor characteristics','radar', 'infrared wavelengths', 'visible wavelengths','weather/climate advisories', 'clouds', 'lidar', 'ocean optics','ultraviolet wavelengths', 'cryospheric indicators','land use/land cover', 'topography', 'surface thermal properties','spectral/engineering', 'soils', 'snow/ice', 'geothermal dynamics','natural hazards', 'surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics']\n elif self.option == \"mostdepth\":\n return ['flight data logs', 'turbulence', 'radio wave flux', 'lightning', 'magnetic field', 'atmospheric conductivity', 'electric field', 'data synchronization time', 'brightness temperature', 'vertical profiles', 'water vapor profiles', 'air temperature', 'upper level winds', 'atmospheric pressure measurements', 'upper air temperature', 'humidity', 'dew point temperature', 'aerosol particle properties', 'emissivity', 'trace gases/trace species', 'liquid precipitation', 'cloud liquid water/ice', 'microwave radiance', 'sensor counts', 'total pressure', 'airspeed/ground speed', 'total temperature', 'static pressure', 'wind speed', 'wind direction', 'radar reflectivity', 'doppler velocity', 'infrared imagery', 'visible imagery', 'water vapor', 'vertical wind velocity/speed', 'aerosol backscatter', 'weather forecast', 'tropical cyclones', 'visible radiance', 'infrared radiance', 'total precipitable water', 'boundary layer temperature', 'atmospheric temperature indices', 'cloud height', 'flight level winds', 'cloud droplet distribution', 'cloud droplet concentration/size', 'cloud condensation nuclei', 'cloud microphysics', 'hydrometeors', 'ozone', 'wind profiles', 'cloud base temperature', 'cloud base height', 'liquid water equivalent', 'solar radiation', 'planetary boundary layer height', 'surface winds', 'precipitation amount', 'precipitation rate', 'surface pressure', 'rain', 'cloud optical depth/thickness', 'aerosol extinction', 'aerosol optical depth/thickness', 'cirrus cloud systems', 'lidar depolarization ratio', 'radar backscatter', 'radar cross-section', 'return power', 'mean radial velocity', 'radiance', 'air quality', 'climate advisories', 'atmospheric emitted radiation', 'optical depth/thickness', 'surface temperature', 'ultraviolet flux', 'spectrum width', 'microwave imagery', 'lidar backscatter', 'relative humidity', 'u/v wind components', 'wind speed/wind direction', 'radar imagery', 'snow depth', 'land use/land cover classification', 'digital elevation/terrain model (dem)', 'snow', 'droplet size', 'droplet concentration/size', 'drizzle', 'precipitation anomalies', 'snow water equivalent', 'solid precipitation', 'total surface precipitation rate', 'particle size distribution', 'skin temperature', 'attitude characteristics', 'land surface temperature', 'hail', 'reflectance', 'soil moisture/water content', 'soil temperature', 'soil bulk density', 'surface roughness', 'present weather', 'snow density', 'ambient temperature', 'aerosol forward scatter', 'floods', 'snow cover', 'sigma naught', 'precipitable water', 'stage height', 'rivers/streams', 'shortwave radiation', 'photosynthetically active radiation', 'longwave radiation', 'net radiation', 'hourly precipitation amount', '24 hour precipitation amount', 'soil moisture', 'satellite orbits/revolution', 'sea surface temperature', 'heat flux', 'latent heat flux', 'cloud fraction', '3 and 6 hour precipitation amount', 'geopotential height', 'particulate matter', 'particle images', 'water vapor indices', 'horizontal wind velocity/speed', 'electrical conductivity', 'dissolved carbon dioxide', 'hurricanes', 'tropical cyclone track', 'convective clouds/systems (observed/analyzed)', 'cloud top height', 'viewing geometry', 'temperature profiles', 'vertical wind shear', 'wind shear', 'carbon monoxide', 'sea level pressure', 'water vapor tendency', 'potential temperature', 'angstrom exponent', 'ultraviolet radiation', 'solar irradiance', 'scattering', 'absorption', 'water vapor mixing ratio profiles', 'sea surface temperature indices', 'extreme eastern tropical pacific sst', 'sedimentation', 'erosion', 'sediment transport', 'sediments', 'tropopause', 'ocean chemistry', 'ocean optics', 'ocean temperature', 'salinity/density', 'pigments', 'ocean color', 'attenuation/transmission', 'inorganic carbon', 'organic carbon', 'photosynthetically available radiation', 'chlorophyll', 'optical depth', 'fluorescence', 'vegetation index', 'gelbstoff', 'phytoplankton', 'vegetation index2', 'cloud precipitable water', 'landscape ecology', 'ultraviolet radiance', 'cloud ceiling', 'aerosol radiance', 'carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles', 'organic particles', 'sulfate particles', 'radiative flux', 'transmittance', 'atmospheric stability', 'cloud asymmetry', 'cloud frequency', 'cloud top pressure', 'cloud top temperature', 'cloud vertical distribution', 'cloud emissivity', 'cloud radiative forcing', 'cloud reflectance', 'rain storms', 'reflected infrared', 'thermal infrared', 'incoming solar radiation', 'clouds', 'cloud properties', 'cloud types', 'orbital characteristics', 'sensor characteristics', 'maximum/minimum temperature', 'condensation', 'platform characteristics', 'geolocation', 'geodetics', 'coordinate reference system', 'aerosols', 'topographical relief maps', 'terrain elevation', 'normalized difference vegetation index (ndvi)', 'infrared flux', 'visible flux', 'albedo', 'land use/land cover', 'topography', 'lidar', 'lidar waveform', 'plant phenology', 'vegetation cover', 'crop/plant yields', 'land use classes', 'landscape patterns', 'forest harvesting and engineering', 'forest management', 'total surface water', 'agricultural plant science', 'photosynthesis', 'primary production', 'leaf characteristics', 'evapotranspiration', 'fire occurrence', 'surface thermal properties', 'canopy characteristics', 'evergreen vegetation', 'crown', 'deciduous vegetation', 'anisotropy', 'fire ecology', 'biomass burning', 'wildfires', 'topographical relief', 'burned area', 'surface radiative properties', 'environmental sustainability', 'boundaries', 'anthropogenic/human influenced ecosystems', 'emissions', 'sulfur dioxide', 'population', 'infrastructure', 'environmental assessments', 'public health', 'conservation', 'agriculture production', 'administrative divisions', 'economic resources', 'socioeconomics', 'lake/pond', 'rivers/stream', 'political divisions', 'environmental vulnerability index (evi)', 'ecosystems', 'urban areas', 'sustainability', 'treaty agreements/results', 'human settlements', 'population estimates', 'nitrogen dioxide', 'cropland', 'pasture', 'particulates', 'cyclones', 'mortality', 'environmental impacts', 'droughts', 'earthquakes', 'population distribution', 'fertilizers', 'animal manure and waste', 'urbanization/urban sprawl', 'landslides', 'avalanche', 'urban lands', 'mangroves', 'volcanic eruptions', 'pesticides', 'population size', 'population density', 'lakes/reservoirs', 'surface water', 'rural areas', 'infant mortality rates', 'amphibians', 'mammals', 'carbon', 'sulfur oxides', 'methane', 'non-methane hydrocarbons/volatile organic compounds', 'nitrogen oxides', 'natural gas', 'coal', 'coastal elevation', 'biodiversity functions', 'nuclear radiation exposure', 'radiation exposure', 'poverty levels', 'malnutrition', 'wetlands', 'sea level rise', 'vulnerability levels/index', 'ground water', 'snow/ice', 'electricity', 'energy production/use', 'sustainable development', 'deforestation', 'household income', 'discharge/flow', 'hydropattern', 'nitrogen', 'phosphorus', 'carbon dioxide', 'alpine/tundra', 'forests', 'vegetation', 'permafrost', 'nutrients', 'plant characteristics', 'leaf area index (lai)', 'soil gas/air', 'ammonia', 'nitrous oxide', 'ecosystem functions', 'litter characteristics', 'soil chemistry', 'soil respiration', 'active layer', 'soil depth', 'cation exchange capacity', 'organic matter', 'soil porosity', 'soil texture', 'permafrost melt', 'land subsidence', 'freeze/thaw', 'surface water features', 'chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride', 'molecular hydrogen', 'sulfur compounds', 'fire models', 'biomass', 'dominant species', 'vegetation species', 'sulfur', 'tree rings', 'soil classification', 'heat index', 'sea ice concentration', 'ocean heat budget', 'reforestation', 'even-toed ungulates', 'species recruitment', 'population dynamics', 'range changes', 'topographic effects', 'land resources', 'river ice depth/extent', 'snow melt', 'river ice', 'animal commodities', 'animal ecology and behavior', 'phenological changes', 'water depth', 'inundation', 'forest fire science', 'biogeochemical cycles', 'radiative forcing', 'soil heat budget', 'drainage', 'respiration rate', 'river/lake ice breakup', 'river/lake ice freeze', 'reclamation/revegetation/restoration', 'permafrost temperature', 'indigenous/native species', 'fire dynamics', 'lichens', 'plants', 'plant succession', 'carbon flux', 'coastal', 'salt marsh', 'degradation', 'altitude', 'carbon and hydrocarbon compounds', 'halocarbons and halogens', 'forest composition/vegetation structure', 'water vapor indicators', 'barometric altitude', 'atmospheric water vapor', 'terrestrial ecosystems', 'volatile organic compounds', 'boundary layer winds', 'forest fire danger index', 'periglacial processes', 'landscape processes', 'evaporation', 'soil horizons/profile', 'shrubland/scrub', 'soil ph', 'soils', 'soil water holding capacity', 'community structure', 'pingo', 'soil color', 'virtual temperature', 'formaldehyde', 'hydroxyl', 'photolysis rates', 'cloud dynamics', 'nitric oxide', 'molecular oxygen', 'smog', 'peroxyacyl nitrate', 'hydrogen compounds', 'nitrogen compounds', 'oxygen compounds', 'stable isotopes', 'chemical composition', 'actinic flux', 'tropospheric ozone', 'fossil fuel burning', 'industrial emissions', 'denitrification rate', 'sunshine', 'runoff', 'soil structure', 'mosses/hornworts/liverworts', 'peatlands', 'hydraulic conductivity', 'snow/ice temperature', 'vegetation water content', 'discharge', 'chlorophyll concentrations', 'outgoing longwave radiation', 'geomorphic landforms/processes', 'soil compaction', 'soil impedance', 'canopy transmittance', 'water table', 'decomposition', 'water temperature', 'dissolved gases', 'total dissolved solids', 'agricultural expansion', 'forest science', 'pressure tendency', 'visibility', 'biomass dynamics', 'agricultural lands', 'grasslands', 'savannas', 'grazing dynamics/plant herbivory', 'herbivory', 'paleoclimate reconstructions', 'drought indices', 'fire weather index', 'animal yields', 'multivariate enso index', 'dissolved solids', 'ocean currents', 'salinity', 'coastal processes', 'atmospheric pressure', 'afforestation/reforestation', 'fresh water river discharge', 'surface water chemistry', 'drainage basins', 'resource development site', 'dunes', 'flood plain', 'endangered species', 'precipitation indices', 'temperature indices', 'forest yields', 'stratigraphic sequence', 'freeze/frost', 'frost', 'hydrogen cyanide', 'land management', 'nutrient cycling', 'industrialization', 'suspended solids', 'deserts', 'weathering', 'gas flaring', 'atmospheric temperature', 'ice extent', 'fraction of absorbed photosynthetically active radiation (fapar)', 'marshes', 'swamps', 'lake ice', 'atmospheric winds', 'watershed characteristics', 'transportation', 'soil rooting depth', 'isotopes', 'cultural features', 'consumer behavior', 'boundary surveys', 'aquifers', 'land productivity', 'water quality/water chemistry', 'sediment composition', 'dissolved oxygen', 'surface water processes/measurements', 'turbidity', 'conductivity', 'ph', 'calcium', 'magnesium', 'potassium', 'micronutrients/trace elements', 'social behavior', 'sulfate', 'sediment chemistry', 'biogeochemical processes', 'water ion concentrations', 'cropping systems', 'percolation', 'groundwater chemistry', 'reforestation/revegetation', 'species/population interactions', 'soil infiltration', 'alkalinity', 'soil fertility', 'phosphorous compounds', 'radioisotopes', 'cooling degree days', 'angiosperms (flowering plants)', 'glacial landforms', 'glacial processes', 'contour maps', 'estuaries', 'methane production/use', 'natural gas production/use', 'petroleum production/use', 'visualization/image processing', 'subsetting/supersetting', 'transformation/conversion', 'forest mensuration', 'acid deposition', 'differential pressure', 'precipitation', 'marine ecosystems', 'consumption rates', 'radio wave', 'soil organic carbon (soc)', 'soil erosion', 'halocarbons', 'trace elements/trace metals', 'biomass energy production/use', 'riparian wetlands', 'soil consistence', 'snow stratigraphy', 'thermal conductivity', 'estuary', 'tidal height', 'plant diseases/disorders/pests', 'layered precipitable water', 'atmospheric chemistry', 'water vapor concentration profiles', 'specific humidity', 'total runoff', 'pressure thickness', 'wind stress', 'atmospheric heating', 'conduction', 'hydrogen chloride', 'nitric acid', 'radar', 'land surface/agriculture indicators', 'satellite soil moisture index', 'chlorine nitrate', 'chlorofluorocarbons', 'dinitrogen pentoxide', 'antenna temperature', 'glaciers', 'ice sheets', 'dimethyl sulfide', 'potential vorticity', 'ice fraction', 'atmospheric radiation', 'runoff rate', 'temperature tendency', 'wind dynamics', 'wind direction tendency', 'base flow', 'bromine monoxide', 'chlorine monoxide', 'methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy', 'cloud base pressure', 'temperature anomalies', 'nitrate', 'ocean mixed layer', 'precipitation trends', 'temperature trends', 'convection', 'ground ice', 'oxygen', 'phosphate', 'solar induced fluorescence', 'chlorine dioxide', 'sun-earth interactions', 'uv aerosol index', 'volcanic activity', 'potential evapotranspiration', 'ultraviolet wavelengths', 'ice temperature', 'sea surface skin temperature', 'sea surface height', 'sublimation', 'convective surface precipitation rate', 'hydrogen fluoride', 'airglow', 'energy deposition', 'x-ray flux', 'electron flux', 'proton flux', 'magnetic fields/magnetic currents']\n else:\n return ['platform characteristics', 'atmospheric winds','radio wave', 'weather events', 'geomagnetism','atmospheric electricity', 'microwave', 'atmospheric temperature','atmospheric water vapor', 'atmospheric pressure', 'aerosols','atmospheric radiation', 'atmospheric chemistry', 'precipitation','sensor characteristics', 'radar', 'infrared wavelengths','visible wavelengths', 'weather/climate advisories', 'clouds','lidar', 'ocean optics', 'ultraviolet wavelengths','cryospheric indicators', 'land use/land cover', 'topography','surface thermal properties', 'spectral/engineering', 'soils','snow/ice', 'geothermal dynamics', 'natural hazards','surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics','flight data logs','wind dynamics', 'radio wave flux', 'lightning', 'magnetic field','atmospheric conductivity', 'electric field','data synchronization time', 'brightness temperature','upper air temperature', 'water vapor profiles','surface temperature', 'upper level winds','atmospheric pressure measurements', 'water vapor indicators','aerosol particle properties', 'emissivity','trace gases/trace species', 'liquid precipitation','cloud microphysics', 'microwave radiance', 'sensor counts','total pressure', 'airspeed/ground speed', 'total temperature','static pressure', 'humidity', 'radar reflectivity','doppler velocity', 'infrared imagery', 'visible imagery','aerosol backscatter', 'weather forecast', 'tropical cyclones','visible radiance', 'infrared radiance','atmospheric temperature indices', 'cloud droplet distribution','cloud condensation nuclei', 'hydrometeors', 'oxygen compounds','wind profiles', 'liquid water equivalent', 'solar radiation','planetary boundary layer height', 'surface winds','precipitation amount', 'precipitation rate', 'surface pressure','aerosol extinction', 'aerosol optical depth/thickness','tropospheric/high-level clouds (observed/analyzed)','lidar depolarization ratio', 'radar backscatter','radar cross-section', 'return power', 'radial velocity','radiance', 'climate advisories', 'atmospheric emitted radiation','optical depth/thickness', 'ultraviolet flux', 'spectrum width','microwave imagery', 'lidar backscatter', 'radar imagery','snow depth', 'land use/land cover classification','terrain elevation', 'solid precipitation', 'droplet size','droplet concentration/size', 'precipitation anomalies','snow water equivalent', 'total surface precipitation rate','skin temperature', 'water vapor', 'attitude characteristics','land surface temperature', 'reflectance','soil moisture/water content', 'soil temperature','soil bulk density', 'surface roughness', 'present weather','snow density', 'geothermal temperature','aerosol forward scatter', 'floods', 'snow cover', 'sigma naught','precipitable water', 'surface water processes/measurements','surface water features', 'shortwave radiation','photosynthetically active radiation', 'longwave radiation','net radiation', 'flight level winds', 'soil moisture','satellite orbits/revolution', 'heat flux','precipitation profiles', 'geopotential height','particulate matter', 'particle images', 'water vapor indices','electrical conductivity', 'gases', 'sea surface temperature','convective clouds/systems (observed/analyzed)','viewing geometry', 'wind shear','carbon and hydrocarbon compounds', 'sea level pressure','water vapor processes', 'ultraviolet radiation','solar irradiance', 'scattering', 'absorption','sea surface temperature indices', 'sedimentation', 'erosion','sediment transport', 'sediments', 'tropopause', 'nan', 'pigments','attenuation/transmission', 'inorganic carbon', 'organic carbon','photosynthetically available radiation', 'chlorophyll','optical depth', 'fluorescence', 'vegetation index', 'gelbstoff','plankton', 'vegetation index2', 'landscape ecology','ultraviolet radiance', 'aerosol radiance','carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles','organic particles', 'sulfate particles', 'radiative flux','transmittance', 'atmospheric stability','cloud radiative transfer', 'rain storms', 'reflected infrared','thermal infrared', 'incoming solar radiation', 'cloud types','orbital characteristics', 'geolocation','coordinate reference system', 'infrared flux', 'visible flux','albedo', 'lidar waveform', 'plant phenology', 'vegetation cover','crop/plant yields', 'land use classes', 'landscape patterns','forest harvesting and engineering', 'forest management','ecosystem functions', 'leaf characteristics', 'fire ecology','total surface water', 'primary production', 'photosynthesis','canopy characteristics', 'evergreen vegetation', 'crown','deciduous vegetation', 'anisotropy', 'biomass burning','wildfires', 'topographical relief','environmental sustainability','anthropogenic/human influenced ecosystems', 'emissions','sulfur compounds', 'environmental assessments', 'conservation','agriculture production', 'administrative divisions','freshwater ecosystems', 'political divisions', 'urban areas','treaty agreements/results', 'population estimates','nitrogen compounds', 'particulates', 'mortality', 'droughts','earthquakes', 'population distribution', 'fertilizers','animal manure and waste', 'urbanization/urban sprawl','landslides', 'avalanche', 'mangroves', 'volcanic eruptions','pesticides', 'population size', 'population density','rural areas', 'amphibians', 'mammals', 'carbon', 'sulfur oxides','land management', 'natural gas', 'sedimentary rocks','coastal elevation', 'community dynamics','nuclear radiation exposure', 'radiation exposure','poverty levels', 'malnutrition', 'sea level rise','vulnerability levels/index', 'electricity','energy production/use', 'sustainable development','deforestation', 'household income', 'nitrogen', 'phosphorus','terrestrial ecosystems', 'permafrost', 'nutrients','plant characteristics', 'soil gas/air', 'litter characteristics','soil chemistry', 'soil respiration', 'active layer', 'soil depth','cation exchange capacity', 'organic matter', 'soil porosity','soil texture', 'permafrost melt','ground water processes/measurements', 'freeze/thaw','halocarbons and halogens', 'hydrogen compounds', 'biomass','dominant species', 'vegetation species', 'sulfur', 'tree rings','soil classification', 'sea ice concentration', 'reforestation','species/population interactions', 'range changes','topographic effects', 'land resources', 'river ice depth/extent','snow melt', 'river ice', 'animal ecology and behavior','phenological changes', 'forest fire science', 'radiative forcing','soil heat budget', 'river/lake ice breakup','river/lake ice freeze', 'reclamation/revegetation/restoration','lichens', 'marine ecosystems', 'coastal landforms', 'degradation','forest composition/vegetation structure', 'barometric altitude','volatile organic compounds', 'forest fire danger index','periglacial processes', 'landscape processes','soil horizons/profile', 'soil ph', 'soil water holding capacity','fluvial landforms', 'soil color', 'glacial processes','photochemistry', 'cloud dynamics', 'nitrogen oxides', 'smog','chemical composition', 'actinic flux', 'tropospheric ozone','fossil fuel burning', 'industrial emissions','denitrification rate', 'sunshine', 'soil structure','mosses/hornworts/liverworts', 'hydraulic conductivity','snow/ice temperature', 'water characteristics','outgoing longwave radiation', 'soil compaction', 'soil impedance','canopy transmittance', 'ground water features', 'solids','agricultural expansion', 'pressure tendency', 'visibility','herbivory', 'paleoclimate reconstructions', 'drought indices','fire weather index', 'animal yields', 'teleconnections','carbon dioxide', 'dissolved solids', 'ocean currents', 'salinity','afforestation/reforestation', 'fresh water river discharge','surface water chemistry', 'aeolian landforms','precipitation indices', 'temperature indices', 'forest yields','stratigraphic sequence', 'freeze/frost', 'frost','industrialization', 'ice core records', 'suspended solids','weathering', 'gas flaring', 'ice extent', 'biogeochemical cycles','lake ice', 'isotopes', 'watershed characteristics','transportation', 'soil rooting depth', 'geochemical properties','carbon monoxide', 'cultural features', 'consumer behavior','boundary surveys', 'land productivity', 'sediment composition','calcium', 'magnesium', 'potassium','micronutrients/trace elements', 'sediment chemistry','biogeochemical processes', 'cropping systems','groundwater chemistry', 'reforestation/revegetation','soil infiltration', 'soil fertility','angiosperms (flowering plants)', 'glacial landforms','forest mensuration', 'acid deposition', 'differential pressure','soil erosion', 'trace elements/trace metals', 'soil consistence','snow stratigraphy', 'thermal conductivity', 'estuaries','tidal height', 'plant diseases/disorders/pests','pressure thickness', 'atmospheric heating', 'conduction','evaporation', 'turbulence', 'wind stress','satellite soil moisture index', 'antenna temperature', 'glaciers','ice sheets', 'nitrate', 'ocean mixed layer','precipitation indicators', 'temperature indicators', 'ground ice','alkalinity', 'dissolved gases', 'oxygen', 'ph', 'phosphate','solar induced fluorescence', 'volcanic activity','ice temperature', 'sea surface height', 'airglow','energy deposition', 'x-ray flux', 'electron flux', 'proton flux','magnetic fields/magnetic currents', 'vertical profiles','air temperature', 'dew point temperature','cloud liquid water/ice', 'wind speed', 'wind direction','vertical wind velocity/speed', 'total precipitable water','boundary layer temperature', 'cloud height','cloud droplet concentration/size', 'ozone','cloud base temperature', 'cloud base height', 'rain','cloud optical depth/thickness', 'cirrus/systems','mean radial velocity', 'relative humidity', 'u/v wind components','wind speed/wind direction','digital elevation/terrain model (dem)', 'snow', 'drizzle','particle size distribution', 'hail', 'ambient temperature','stage height', 'rivers/streams', 'hourly precipitation amount','24 hour precipitation amount', 'latent heat flux','cloud fraction', '3 and 6 hour precipitation amount','horizontal wind velocity/speed', 'dissolved carbon dioxide','hurricanes', 'tropical cyclone track', 'cloud top height','temperature profiles', 'vertical wind shear','water vapor tendency', 'potential temperature','angstrom exponent', 'water vapor mixing ratio profiles','extreme eastern tropical pacific sst', 'phytoplankton','cloud precipitable water', 'cloud asymmetry', 'cloud ceiling','cloud frequency', 'cloud top pressure', 'cloud top temperature','cloud vertical distribution', 'cloud emissivity','cloud radiative forcing', 'cloud reflectance','maximum/minimum temperature', 'condensation','topographical relief maps', 'evapotranspiration','fire occurrence', 'burned area', 'sulfur dioxide', 'lake/pond','rivers/stream', 'nitrogen dioxide', 'agricultural lands','cyclones', 'urban lands', 'lakes/reservoirs','infant mortality rates', 'methane','non-methane hydrocarbons/volatile organic compounds', 'coal','biodiversity functions', 'wetlands', 'discharge/flow','hydropattern', 'alpine/tundra', 'forests','leaf area index (lai)', 'ammonia', 'nitrous oxide','land subsidence', 'normalized difference vegetation index (ndvi)','chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride','molecular hydrogen', 'fire models', 'heat index','even-toed ungulates', 'species recruitment','population dynamics', 'water depth', 'inundation', 'drainage','respiration rate', 'permafrost temperature','indigenous/native species', 'fire dynamics', 'plant succession','coastal', 'salt marsh', 'boundary layer winds', 'shrubland/scrub','community structure', 'pingo', 'virtual temperature','formaldehyde', 'hydroxyl', 'photolysis rates', 'nitric oxide','molecular oxygen', 'peroxyacyl nitrate', 'stable isotopes','runoff', 'vegetation water content', 'discharge','chlorophyll concentrations', 'water table', 'decomposition','water temperature', 'total dissolved solids', 'biomass dynamics','grasslands', 'savannas', 'grazing dynamics/plant herbivory','multivariate enso index', 'drainage basins','resource development site', 'dunes', 'flood plain','endangered species', 'hydrogen cyanide', 'nutrient cycling','deserts','fraction of absorbed photosynthetically active radiation (fapar)','aquifers', 'dissolved oxygen', 'turbidity', 'conductivity','sulfate', 'water ion concentrations', 'percolation','phosphorous compounds', 'radioisotopes', 'cooling degree days','contour maps', 'methane production/use','natural gas production/use', 'petroleum production/use','consumption rates', 'soil organic carbon (soc)', 'halocarbons','biomass energy production/use', 'estuary','layered precipitable water', 'water vapor concentration profiles','hydrogen chloride', 'nitric acid', 'chlorine nitrate','chlorofluorocarbons', 'dinitrogen pentoxide', 'dimethyl sulfide','vorticity', 'ice fraction', 'temperature tendency','wind direction tendency', 'bromine monoxide', 'chlorine monoxide','methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy','cloud base pressure', 'temperature anomalies','precipitation trends', 'temperature trends', 'convection','chlorine dioxide', 'uv aerosol index','sea surface skin temperature', 'sublimation','convective surface precipitation rate', 'hydrogen fluoride']", "def setContourLabelDistance(distance=500):\n dislin.labdis(distance, 'CONTUR')", "def eval_final_label(args):\n cfg, lbl = util.get_label_cfg_by_args(args)\n uid = cfg['uniqueid']\n print('We are playing with %s' % uid)\n outdir='models/%s/gate_expert' % uid\n outname='gate_expert_model.pt'\n if KLLOSS:\n outname = 'gate_expert_kldiv_model.pt'\n if args.warm:\n outname = outname.replace('.pt', '_warm.pt')\n mdl_path = os.path.join(outdir, outname)\n gate_expert = GateExpertNet(mdl_path, False)\n eval_fun = gate_expert.get_p_y\n\n data = npload(cfg['file_path'], uid)\n datax = data[cfg['x_name']]\n p, v = eval_fun(datax)\n\n label = np.argmax(p, axis=1)\n\n if args.draw:\n fig, ax = plt.subplots()\n n_expert = np.amax(label) + 1\n for i in range(n_expert):\n mask = label == i\n ax.scatter(datax[mask, 0], datax[mask, 1])\n plt.show()\n\n label_name = 'data/pen/gate_expert_label.npy'\n if KLLOSS:\n label_name = label_name.replace('_label', '_kldiv_label')\n if args.warm:\n label_name = label_name.replace('.npy', '_warm.npy')\n np.save(label_name, label)", "def donut_highD(N=1500, V=1500, binary=True, d=3):\n X = torch.randn(N + V, d)\n R = torch.sqrt(torch.sum(X ** 2, dim=1))\n ind_class0 = (R < 1.0).nonzero()[:, 0]\n ind_class1 = (R >= 1.0).nonzero()[:, 0]\n X = X * 1.2 # rescale data\n X.add_(0.2 * torch.randn_like(X)) # add some noise to the data\n \n return label(d, X, ind_class0, ind_class1, N, V, binary)", "def label(self):\r\n raise NotImplementedError", "def cell_description(self, gid):\n\n tree = arbor.segment_tree()\n\n tree.append(\n arbor.mnpos,\n arbor.mpoint(0, 0, 0, self.radius),\n arbor.mpoint(self.length, 0, 0, self.radius),\n tag=1,\n )\n\n labels = arbor.label_dict({\"cable\": \"(tag 1)\", \"start\": \"(location 0 0)\"})\n\n decor = (\n arbor.decor()\n .set_property(Vm=self.Vm, cm=self.cm, rL=self.rL)\n .paint('\"cable\"', arbor.density(f\"pas/e={self.Vm}\", g=self.g))\n .place(\n '\"start\"',\n arbor.iclamp(\n self.stimulus_start, self.stimulus_duration, self.stimulus_amplitude\n ),\n \"iclamp\",\n )\n )\n\n policy = arbor.cv_policy_max_extent(self.cv_policy_max_extent)\n decor.discretization(policy)\n\n return arbor.cable_cell(tree, decor, labels)", "def label(cmd):\n cmd = cmd.replace('make][.DP', 'make1][.NP')\n cmd = cmd.replace('make][.SC', 'make2][.SC')\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\n cmd = '[result ' + cmd + ']' #dummy function for plop\n return cmd", "def plot_countryperskill(data_df, **args):\n name = args.get('name', 'VARIABLE NAME')\n idx = args.get('idx', data_df.index.values)\n order = args.get('order', np.array([9, 0, 1, 2, 3, 4, 5, 6, 8, 7], int))\n dd = args.get('dd', .7) # 3.3\n wdth = args.get('wdth', 8) # 7\n hght = args.get('hght', 4)\n markersize = 60\n target_y = args.get('target_y', 1)\n label_y = args.get('label_y', r'$\\rho$')\n colors14 = args.get('colors14', ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', \\\n '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', \\\n '#cab2d6', '#6a3d9a', '#ffff99', '#b15928', \\\n '#dd1c77', '#8dd3c7'])\n plt.figure(facecolor='w', figsize=(wdth, hght))\n meth_labels = [r'$Lit$', r'$Lit^2$', r'$Lit^3$', r'$Lit^4$', r'$Lit^5$', \\\n r'$Pop$', r'$Pop^2$', r'$Lit^3Pop$', r'$Lit^2Pop$', r'$LitPop$']\n idx = idx[order]\n meth_labels = [meth_labels[i] for i in order]\n # empty plots for legend handlers:\n for i in np.arange(0, len(countries_sel)): # country\n plt.scatter([], [], marker='o', s=markersize, edgecolor='black', linewidth='.4',\\\n c=colors14[i], label=countries[countries_sel[i]])\n plt.legend()\n\n plt.scatter([0, len(idx)+dd], [0.7, 0.7], marker='.', lw=1, c='white') # legendspace\n\n # actual plotting:\n for i in np.arange(0, len(countries_sel)): # country\n for j in np.arange(0, len(idx)):\n # rp - pearson correlation:\n plt.scatter([j], data_df[countries[countries_sel[i]]][idx[j]], marker='o', \\\n s=markersize, edgecolor='black', linewidth='.4',\\\n alpha=1., c=colors14[i], zorder=j+10)\n if not target_y == 'none':\n plt.plot([0, j], [target_y, target_y], c='#d3d3d3', lw=5, ls='-', zorder=1)\n\n plt.xticks(np.arange(0, len(idx)), meth_labels, color='black', rotation=30)\n plt.grid(axis='y')\n # plt.xlabel('Method')\n plt.ylabel(label_y)\n plt.title(name)\n\n plt.savefig(os.path.join(output_path, experiment_name + '_' + 'allcountries_perScore_v4_' + name + '.pdf'),\\\n dpi=600, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format='pdf',\n transparent=False, bbox_inches=None, pad_inches=0.1,\n frameon=None, metadata=None)\n plt.show()", "def showOneNeedle(self,i,visibility):\n #obsolete\n profbox()\n fidname = \"fid\"+self.option[i]\n pNode = self.parameterNode()\n needleID = pNode.GetParameter(self.option[i]+'.vtp')\n fidID = pNode.GetParameter(fidname) \n NeedleNode = slicer.mrmlScene.GetNodeByID(needleID)\n fiducialNode = slicer.mrmlScene.GetNodeByID(fidID) \n \n if NeedleNode !=None:\n displayNode =NeedleNode.GetModelDisplayNode()\n nVisibility=displayNode.GetVisibility() \n\n if fiducialNode == None:\n displayNode.SetVisibility(1) \n displayNode.SetOpacity(0.9)\n polyData = NeedleNode.GetPolyData()\n polyData.Update()\n nb = int(polyData.GetNumberOfPoints()-1)\n coord = [0,0,0]\n if nb>100:\n fiducialNode = slicer.vtkMRMLAnnotationFiducialNode()\n polyData.GetPoint(nb,coord) \n fiducialNode.SetName(self.option[i])\n fiducialNode.SetFiducialCoordinates(coord) \n fiducialNode.Initialize(slicer.mrmlScene)\n fiducialNode.SetLocked(1)\n fiducialNode.SetSelectable(0)\n fidDN = fiducialNode.GetDisplayNode()\n fidDN.SetColor(NeedleNode.GetDisplayNode().GetColor())\n fidDN.SetGlyphScale(0)\n fidTN = fiducialNode.GetAnnotationTextDisplayNode()\n fidTN.SetTextScale(3)\n fidTN.SetColor(NeedleNode.GetDisplayNode().GetColor())\n fiducialNode.SetDisplayVisibility(0)\n pNode.SetParameter(fidname,fiducialNode.GetID())\n fiducialNode.SetDisplayVisibility(1)\n\n if visibility ==0:\n\n displayNode.SetVisibility(0)\n displayNode.SetSliceIntersectionVisibility(0)\n if fiducialNode!=None:\n fiducialNode.SetDisplayVisibility(0)\n\n else:\n\n displayNode.SetVisibility(1)\n displayNode.SetSliceIntersectionVisibility(1)\n if fiducialNode!=None:\n fiducialNode.SetDisplayVisibility(1)\n\n else:\n vtkmat = vtk.vtkMatrix4x4()\n vtkmat.DeepCopy(self.m_vtkmat)\n vtkmat.SetElement(0,3,self.m_vtkmat.GetElement(0,3)+self.p[0][i])\n vtkmat.SetElement(1,3,self.m_vtkmat.GetElement(1,3)+self.p[1][i])\n vtkmat.SetElement(2,3,self.m_vtkmat.GetElement(2,3)+(30.0-150.0)/2.0)\n\n TransformPolyDataFilter=vtk.vtkTransformPolyDataFilter()\n Transform=vtk.vtkTransform() \n TransformPolyDataFilter.SetInput(self.m_polyCylinder)\n Transform.SetMatrix(vtkmat)\n TransformPolyDataFilter.SetTransform(Transform)\n TransformPolyDataFilter.Update()\n\n triangles=vtk.vtkTriangleFilter()\n triangles.SetInput(TransformPolyDataFilter.GetOutput()) \n self.AddModel(i,triangles.GetOutput())\n self.showOneNeedle(i,visibility)", "def getLabel(*args):", "def getLabel(*args):", "def getLabel(*args):", "def drawlabels(t, t1):\r\n t.fd(250)\r\n t.pd()\r\n t.write(\"Life\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(12)\r\n t.pd()\r\n t.write(\"Exp.\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(238)\r\n t.right(90)\r\n t.fd(80)\r\n t1.pu()\r\n t1.back(50)\r\n t1.rt(90)\r\n t1.fd(250)\r\n t1.pd()\r\n t1.write(\"Year\", font=(\"Arial\", 10, \"bold\"))\r\n t1.pu()\r\n t1.back(250)\r\n t1.left(90)\r\n t1.fd(50)", "def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment", "def efficient_tagged_jets_hist(datalist,discriminant, discriminant_cut, CSV_cut, bins, Difference=False, mode=\"pT_jet\",Save=False):\n title = \"tagged_jets_vs_\"+mode\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n print \"working on\",data[1]\n ran = data[2]\n AllJetsHistlist.append(rt.TH1D(data[1]+\"_AllJets\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(data[1]+\"_CSV\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(data[1]+\"_Discriminant\",data[1]+\"_\"+title,bins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n for particle in data[0]:\n AllJetsHistlist[n].Fill(particle[feature])\n if particle[1] >= CSV_cut: CSVHistlist[n].Fill(particle[feature])\n if Difference:\n L = particle[8]-particle[5]\n else:\n if particle[13] != 0:\n L = particle[16]/float(particle[13])\n else:\n continue\n if L >= discriminant_cut: DiscriminantHistlist[n].Fill(particle[feature])\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n canvaslist.append(rt.TCanvas(data[1]+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(data[1]+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(mode)\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(\"Thesis_Plots/\"+title+\"_\"+data[1]+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\",\"recreate\"))\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def autolabel(rects):", "def __init__(self, label_num, des_dir, des_dim=48):\n self.label_num = label_num\n self.des_dir = des_dir\n self.des_dim = 48", "def _get_labels(self, ind):\n pass", "def diet_label(self, diet_label):\n\n self._diet_label = diet_label", "def dim_reduction_plot(data, label, block_flag):\n \n PCA_model = TruncatedSVD(n_components=3).fit(data)\n data_PCA = PCA_model.transform(data)\n idxc1 = np.where(label==0)\n idxc2 = np.where(label==1)\n plt.scatter(data_PCA[idxc1,0],data_PCA[idxc1,1],s=80,c='r', marker='^',linewidths = 0, label='healthy')\n plt.scatter(data_PCA[idxc2,0],data_PCA[idxc2,1],s=80,c='y', marker='o',linewidths = 0, label='infected')\n plt.gca().axes.get_xaxis().set_ticks([])\n plt.gca().axes.get_yaxis().set_ticks([])\n plt.title('PCA of the codes')\n plt.legend(scatterpoints=1,loc='best')\n plt.show(block=block_flag)", "def plotrgcloud(self):\n print self.kpunten\n for i in range(len(self.kpunten[0])):\n self.writetext('sen ='+ self.kpunten[0][i][0], (0.65,0.85), axnum = 0, hor = None ,ver = None , rot = None ,fs =14 , transform = self.fig.axes[0].transAxes)\n if i == len(self.kpunten[0]) -1 :\n end = None\n else:\n end = self.kpunten[0][i+1][1] + 1\n print end\n self.plotrgwrap( self.rgindex,2*self.reader.npair+self.rgindex,'real part of rgvars (a.u)' , 'imaginary part of rgvars (a.u.)', tit ='RG vars g = %f all states'%(self.chardata) , begin = self.kpunten[0][i][1] , stop = end , name = 'cpcloud'+ self.kpunten[0][i][0] , filenum = 0)", "def makeD2hhAsymm(name,\n config,\n KPIDK_string,\n PiPIDK_string,\n Mass_low_string,\n Mass_high_string,\n CombPIDK_string,\n DecayDescriptor,\n inputSel,\n useTOS,\n Hlt1TOS,\n Hlt2TOS\n ) :\n\n def makeTISTOS( name, _input, _hlttos ) :\n from Configurables import TisTosParticleTagger\n _tisTosFilter = TisTosParticleTagger( name + \"Tagger\" )\n _tisTosFilter.TisTosSpecs = _hlttos\n return Selection( name\n , Algorithm = _tisTosFilter\n , RequiredSelections = [ _input ]\n ) \n\n _Kcuts1 = \"~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)\" % locals()['config']\n _KcutsPIDK = KPIDK_string % locals()['config']\n _Kcuts2 = \" & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)\" % locals()['config']\n _Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2\n _Picuts1 = \"~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)\" % locals()['config']\n _PicutsPIDK = PiPIDK_string % locals()['config']\n _Picuts2 = \" & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)\" % locals()['config']\n _Picuts = _Picuts1 + _PicutsPIDK + _Picuts2\n _dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts }\n\n _massLow = Mass_low_string % locals()['config']\n _massHigh = Mass_high_string % locals()['config']\n _combCuts1 = \"(APT > %(D0Pt)s* MeV)\" \\\n \"& (AHASCHILD( PT > %(DaugPtMax)s* MeV ) )\" \\\n \"& (ADOCA(1,2)< %(D0DOCA)s* mm)\" \\\n \"& (AP > %(D0P)s* MeV)\" % locals()['config']\n _combCutsPIDK = CombPIDK_string % locals()['config']\n _combCuts = _combCuts1 + _combCutsPIDK + _massLow + _massHigh\n\n _motherCuts = \"(VFASPF(VCHI2PDOF) < %(D0VtxChi2Ndof)s)\" \\\n \"& (BPVVDCHI2 > %(D0FDChi2)s)\" \\\n \"& (BPVLTIME() > %(D0Tau)s)\" \\\n \"& (BPVDIRA > %(D0BPVDira)s)\" % locals()['config']\n\n _D0 = CombineParticles( DecayDescriptor = DecayDescriptor,\n MotherCut = _motherCuts,\n CombinationCut = _combCuts,\n DaughtersCuts = _dauCuts)\n\n _sel = Selection ( name+'Sel',\n Algorithm = _D0,\n RequiredSelections = inputSel )\n\n if not useTOS:\n return _sel\n\n _selD2hhHlt1TOS = makeTISTOS( name + \"D2hhHlt1TOS\"\n , _sel\n , Hlt1TOS\n )\n _selD2hhHlt2TOS = makeTISTOS( name + \"D2hhHlt2TOS\"\n , _selD2hhHlt1TOS\n , Hlt2TOS\n )\n \n return _selD2hhHlt2TOS", "def efficient_binned_tagged_jets_hist(datalist,discriminant, discriminant_cuts, CSV_cuts, bins, nbins, Difference=False, mode=\"pT_jet\",Save=False):\n title = \"binned_tagged_jets_vs_\"+mode\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n print \"working on\",data[1]\n ran = data[2]\n AllJetsHistlist.append(rt.TH1D(data[1]+\"_AllJets\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(data[1]+\"_CSV\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(data[1]+\"_Discriminant\",data[1]+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n for particle in data[0]:\n bin_number = FCM.bin_selection(particle,bins)\n if bin_number == -100: continue\n AllJetsHistlist[n].Fill(particle[feature])\n if particle[1] >= CSV_cuts[bin_number]: CSVHistlist[n].Fill(particle[feature])\n if Difference:\n L = particle[8]-particle[5]\n else:\n if particle[17] != 0:\n L = particle[20]/float(particle[17])\n else:\n continue\n if L >= discriminant_cuts[bin_number]: DiscriminantHistlist[n].Fill(particle[feature])\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n canvaslist.append(rt.TCanvas(data[1]+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(data[1]+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(\"jet p_{T} (GeV)\")\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(title+\"_\"+data[1]+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\",\"recreate\"))\n print \"saved histogram as Thesis_Plots/root_files/\"+title+\"_\"+data[1]+discriminant+\".root\"\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def fid_cuts(ptname, etaname):\n cuts = []\n cuts.append(combine_cuts([ptname + ' > 4.5',\n 'TMath::Abs(' + etaname + ') < 1.2']))\n cuts.append(combine_cuts([ptname + ' > 4.0',\n var_selection('TMath::Abs('+etaname+')', 1.2, 1.4)\n ]))\n cuts.append(combine_cuts([ptname + ' > 3.5',\n var_selection('TMath::Abs('+etaname+')', 1.4, 1.6)\n ]))\n return combine_cuts(cuts, ' || ')", "def showComponents(self, mask):\n\n from skimage import measure\n\n thresh = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)[1]\n labels = measure.label(thresh, neighbors=8, background=0)\n for label in range(0,len(labels)):\n img = np.zeros(mask.shape)\n # if this is the background label, ignore it\n if label == 0:\n continue\n img[labels==label]=255\n numPixels = cv2.countNonZero(img)\n\n \t# if the number of pixels in the component is sufficiently\n \t# large, then add it to our mask of \"large blobs\"\n if numPixels > 500:\n showme(img, 'Contour '+str(label))", "def getLabel2(*args):", "def getLabel2(*args):", "def DGTsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_DGTsLabel(*args)", "def _get_component_label(self):\n labels = df_parser(self.workflow.builder.df_path, workflow=self.workflow).labels\n if self.pdc_component_df_label not in labels:\n raise PluginFailedException('No %s label in Dockerfile, can\\'t get PDC component',\n self.pdc_component_df_label)\n return labels[self.pdc_component_df_label]", "def tagview(tab,label,x,y):\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n col=classifc[label]\r\n labnow=classif[label]\r\n# print (labnow, text)\r\n if label == 'back_ground':\r\n deltay=30\r\n else:\r\n# deltay=25*((labnow-1)%5)\r\n deltay=40+10*(labnow-1)\r\n\r\n viseg=cv2.putText(tab,label,(x, y+deltay), font,0.3,col,1)\r\n return viseg", "def label(image,**kw):\n # default connectivity in OpenCV: 8 (which is equivalent to...)\n # default connectivity in scikit-image: 2\n n, labels = cv2.connectedComponents(image.astype(uint8), connectivity=4)\n #n, labels = cv2.connectedComponentsWithAlgorithm(image.astype(uint8), connectivity=4, ltype=2, ccltype=cv2.CCL_DEFAULT)\n return labels, n-1\n # try: return measurements.label(image,**kw)\n # except: pass\n # types = [\"int32\",\"uint32\",\"int64\",\"uint64\",\"int16\",\"uint16\"]\n # for t in types:\n # try: return measurements.label(array(image,dtype=t),**kw)\n # except: pass\n # # let it raise the same exception as before\n # return measurements.label(image,**kw)", "def needleSegmentationCLIDEMO(self):\r\n # research\r\n profbox()\r\n widget = slicer.modules.NeedleFinderWidget\r\n scene = slicer.mrmlScene\r\n pNode = self.parameterNode()\r\n if slicer.mrmlScene.GetNodeByID(pNode.GetParameter(\"baselineVolumeID\")) == None:\r\n inputVolume = self.__volumeSelector.currentNode()\r\n inputVolumeID = self.__volumeSelector.currentNode().GetID()\r\n else:\r\n inputVolume = slicer.mrmlScene.GetNodeByID(pNode.GetParameter(\"baselineVolumeID\"))\r\n inputVolumeID = slicer.mrmlScene.GetNodeByID(pNode.GetParameter(\"baselineVolumeID\")).GetID()\r\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\r\n\r\n datetime = time.strftime(\"%Y-%m-%d-%H_%M_%S\", time.localtime())\r\n\r\n inputVolume.SetAttribute(\"foldername\", datetime)\r\n self.outputVolumeNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLModelNode')\r\n self.outputVolumeNode.SetName(\"Output Needle Model\")\r\n outputVolumeStorageNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLModelStorageNode')\r\n slicer.mrmlScene.AddNode(self.outputVolumeNode)\r\n slicer.mrmlScene.AddNode(outputVolumeStorageNode)\r\n self.outputVolumeNode.AddAndObserveStorageNodeID(outputVolumeStorageNode.GetID())\r\n outputVolumeStorageNode.WriteData(self.outputVolumeNode)\r\n\r\n outputID = self.outputVolumeNode.GetID()\r\n\r\n self.foldername = '/NeedleModels/' + datetime\r\n\r\n # Set the parameters for the CLI module\r\n parameters = {}\r\n parameters['inputVolume'] = inputVolumeID\r\n parameters['inputLabel'] = inputLabelID\r\n parameters['outputVtk'] = outputID\r\n parameters['outputFolderName'] = self.foldername\r\n parameters['nbPointsPerLine'] = self.nbPointsPerLine.value\r\n parameters['nbRadiusIterations'] = self.nbRadiusIterations.value\r\n parameters['radiusMax'] = self.radiusMax.value\r\n parameters['numberOfPointsPerNeedle'] = self.numberOfPointsPerNeedle.value\r\n parameters['nbRotatingIterations'] = self.nbRotatingIterations.value\r\n\r\n module = slicer.modules.mainlabelneedletrackingcli\r\n self.__cliNode = None\r\n self.__cliNode = slicer.cli.run(module, None, parameters, wait_for_completion=True)\r\n\r\n ##### match the needles ######\r\n\r\n self.setNeedleCoordinates()\r\n self.computerPolydataAndMatrix()\r\n xmin = min(self.p[0])\r\n xmax = max(self.p[0])\r\n ymin = min(self.p[1])\r\n ymax = max(self.p[1])\r\n xdelta = xmax - xmin\r\n ydelta = ymax - ymin\r\n k = 0\r\n\r\n self.base = [[0 for j in range(3)] for j in range(63)]\r\n self.tip = [[0 for j in range(3)] for j in range(63)]\r\n self.needlenode = [[0 for j in range(2)] for j in range(63)]\r\n self.bentNeedleNode = [[0 for j in range(2)] for j in range(63)]\r\n self.displaynode = [0 for j in range(63)]\r\n self.displaynodeB = [0 for j in range(63)]\r\n self.fiducialnode = [0 for j in range(63)]\r\n\r\n for i in xrange(63):\r\n\r\n pathneedle = self.foldername + '/' + str(i) + '.vtp'\r\n pathBentNeedle = self.foldername + '/' + str(i) + '_bent.vtp'\r\n self.needlenode[i] = slicer.util.loadModel(pathneedle, True)\r\n self.bentNeedleNode[i] = slicer.util.loadModel(pathBentNeedle, True)\r\n\r\n if self.needlenode[i][0] == True and self.needlenode[i][1] != None:\r\n self.displaynode[i] = self.needlenode[i][1].GetDisplayNode()\r\n self.displaynodeB[i] = self.bentNeedleNode[i][1].GetDisplayNode()\r\n\r\n\r\n polydata = self.needlenode[i][1].GetPolyData()\r\n polydata.GetPoint(0, self.base[i])\r\n\r\n self.displaynode[i].SliceIntersectionVisibilityOn()\r\n self.displaynodeB[i].SliceIntersectionVisibilityOn()\r\n bestmatch = None\r\n mindist = None\r\n for j in xrange(63):\r\n delta = ((self.p[0][j] - (self.base[i][0])) ** 2 + (self.p[1][j] - self.base[i][1]) ** 2) ** (0.5)\r\n if delta < mindist or mindist == None:\r\n bestmatch = j\r\n mindist = delta\r\n\r\n bestmatch = k\r\n k += 1\r\n self.displaynode[i].SetColor(self.color[bestmatch])\r\n self.displaynodeB[i].SetColor(self.color[bestmatch])\r\n self.needlenode[i][1].SetName(self.option[bestmatch] + \"_segmented\")\r\n self.bentNeedleNode[i][1].SetName(self.option[bestmatch] + \"_optimized\")\r\n self.needlenode[i][1].SetAttribute(\"segmented\", \"1\")\r\n self.bentNeedleNode[i][1].SetAttribute(\"optimized\", \"1\")\r\n self.needlenode[i][1].SetAttribute(\"nth\", str(bestmatch))\r\n self.bentNeedleNode[i][1].SetAttribute(\"nth\", str(bestmatch))\r\n self.needlenode[i][1].SetAttribute(\"needleID\", self.needlenode[i][1].GetID())\r\n self.bentNeedleNode[i][1].SetAttribute(\"needleID\", self.bentNeedleNode[i][1].GetID())\r\n\r\n if widget.removeDuplicates.isChecked():\r\n self.positionFilteringNeedles()\r\n\r\n d = slicer.mrmlScene.GetNodeByID(outputID).GetDisplayNode()\r\n d.SetVisibility(0)\r\n\r\n self.__editorFrame.collapsed = 1\r\n\r\n self.addButtons()", "def nifti2dicom(seg_nifti, bk_nifti, ref_dicom_dir, save_dir, description, mode_RGB=False, zoom_num=4, watermarks=True): \n #Load nifti, here is segmentation and background\n seg_image = sitk.ReadImage(seg_nifti)\n seg_image = sitk.GetArrayFromImage(seg_image)\n seg_image = seg_image.astype(np.uint8)\n \n # print(nifti_image.shape)\n bk_image = sitk.ReadImage(bk_nifti)\n bk_image = sitk.GetArrayFromImage(bk_image)\n\n #Get Volume report from the seg_image, cubic ml, and the 95% CI:\n v_nonenhancing = round(seg_image[seg_image==1].sum()/1000,1)\n ci_nonenhancing = round(v_nonenhancing*0.2,1)\n v_enhancing = round(seg_image[seg_image==4].sum()/1000,1)\n ci_enhancing = round(v_enhancing*0.3,1)\n v_edema = round(seg_image[seg_image==2].sum()/1000,1)\n ci_edema = round(v_edema*0.1,1)\n\n #Loading the reference dicom, in order to get the headers of each slice. \n series_IDs = sitk.ImageSeriesReader.GetGDCMSeriesIDs(ref_dicom_dir)\n if not series_IDs:\n print(\"ERROR: given directory \\\"\"+data_directory+\"\\\" does not contain a DICOM series.\")\n sys.exit(1)\n\n series_file_names = sitk.ImageSeriesReader.GetGDCMSeriesFileNames(ref_dicom_dir, series_IDs[0])\n\n series_reader = sitk.ImageSeriesReader()\n series_reader.SetFileNames(series_file_names)\n\n # Configure the reader to load all of the DICOM tags (public+private):\n # By default tags are not loaded (saves time).\n # By default if tags are loaded, the private tags are not loaded.\n # We explicitly configure the reader to load tags, including the private ones.\n series_reader.MetaDataDictionaryArrayUpdateOn()\n series_reader.LoadPrivateTagsOn()\n ref_image = series_reader.Execute()\n \n #set reader for slice \n reader = sitk.ImageFileReader()\n reader.LoadPrivateTagsOn()\n \n writer = sitk.ImageFileWriter()\n # Use the study/series/frame of reference information given in the meta-data\n # dictionary and not the automatically generated information from the file IO\n writer.KeepOriginalImageUIDOn()\n\n # Copy some of the tags and add the relevant tags indicating the change.\n # For the series instance UID (0020|000e), each of the components is a number, cannot start\n # with zero, and separated by a '.' We create a unique series ID using the date and time. tags of interest:\n \n castFilter = sitk.CastImageFilter()\n castFilter.SetOutputPixelType(sitk.sitkInt16)\n ORG_ROOT=\"1.3.12.2\"\n #create SeriesInstanceUID and StudyInstanceUID\n SeriesInstanceUID = generateUID(org_root=ORG_ROOT)\n StudyInstanceUID = generateUID(org_root=ORG_ROOT)\n #create a prefix for the accession number\n acc='BTS'+series_reader.GetMetaData(0,\"0008|0050\")\n #changing spacing\n reader.SetFileName(series_file_names[0])\n reader.ReadImageInformation()\n\n if mode_RGB:\n customized_tag_values = [(\"0008|103e\", description),\n (\"0020|000e\", SeriesInstanceUID),\n (\"0008|0050\", acc),\n (\"0020|000d\", StudyInstanceUID), \n (\"0028|0004\", 'RGB'),\n (\"0028|0002\", \"3\")]\n else:\n customized_tag_values = [(\"0008|103e\", description),\n (\"0020|000e\", SeriesInstanceUID), \n (\"0008|0050\", acc),\n (\"0020|000d\", StudyInstanceUID)] \n\n os.makedirs(save_dir, exist_ok = True)\n\n #for nifti, the main axis is the first one, while for dicoms it is the last one\n for i in range(ref_image.GetDepth()):\n #zoom 2 times, todo need to figure out which axis to zoom, post is the 3rd\n #pre assume the first axis is the slice numbers\n bk_slice = ndimage.zoom(bk_image[i,:,:], zoom_num, order=0)\n seg_slice = ndimage.zoom(seg_image[i,:,:], zoom_num, order=0)\n \n #Due to the DICOM saving coordinate system is different with nifti,i.e mirrored, it is easier to flip array\n bk_slice = np.flip(bk_slice, (0, 1)) \n seg_slice = np.flip(seg_slice, (0, 1)) \n\n #get contours\n seg_idx = get_contours(seg_slice)\n \n #add watermarks\n if watermarks:\n canvas_tmp = np.zeros(list(bk_slice.shape), dtype=np.uint8)\n font = cv2.FONT_HERSHEY_PLAIN\n cv2.putText(canvas_tmp,'FOR RESEARCH ONLY;REFER TO OFFICIAL REPORT FOR DETAILS',(10,30), \n font,2,255,1)\n cv2.putText(canvas_tmp,'(This tool is intended for evaluation of gliomas, and results may be unreliable for other pathologies)',(90,50), \n font,1,255,1) \n #add Legend and volumes \n cv2.putText(canvas_tmp, 'Legend Volume(+/-95% CI)',(10,900), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Edema {v_edema}+/-{ci_edema} mL',(30,920), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Enhancing {v_enhancing}+/-{ci_enhancing} mL',(30,940), font,0.8,255,1)\n cv2.putText(canvas_tmp, f'Non- {v_nonenhancing}+/-{ci_nonenhancing} mL',(30,960), font,0.8,255,1)\n cv2.putText(canvas_tmp,'Enhancing', (30,975), font,0.8,255,1)\n cv2.putText(canvas_tmp,'(The error is based on testing of algorithm performance vs. manual segmentation)', (150,1000), font,1,255,1)\n\n \n \n #burning segmentation contour into slices\n cv2.line(seg_idx, (10,915), (20,915), 2, 2)\n cv2.line(seg_idx, (10,935), (20,935), 4, 2)\n cv2.line(seg_idx, (10,955), (20,955), 1, 2)\n \n if mode_RGB:\n #burning the watermarks\n bk_slice[canvas_tmp==255]=bk_slice.max()\n #convert dicom from nomogram to RGB\n bk_slice = toRGB(bk_slice)\n #colorize the bk_slice according to seg_idx\n bk_slice[0,:,:,0][seg_idx==1] = 255\n bk_slice[0,:,:,1][seg_idx==4] = 255\n bk_slice[0,:,:,2][seg_idx==2] = 255 \n else:\n #grey the ori_image_slice according to seg_idx\n bk_slice[canvas_tmp==255]=bk_slice.max()//2\n bk_slice[seg_idx==1] = bk_slice.max()*2//50\n bk_slice[seg_idx==2] = bk_slice.max()*1//50\n bk_slice[seg_idx==4] = bk_slice.max()*3//50\n\n converted_slice = sitk.GetImageFromArray(bk_slice)\n reader.SetFileName(series_file_names[i])\n reader.ReadImageInformation()\n spacing_new = [i/zoom_num for i in reader.GetSpacing()[:-1]] + [reader.GetSpacing()[-1]]\n \n #generate SOPInstanceUID\n SOPInstanceUID = generateUID(org_root=ORG_ROOT)\n series_tag_values = [(k, reader.GetMetaData(k)) for k in reader.GetMetaDataKeys()] + customized_tag_values + [(\"0008|0018\", SOPInstanceUID)]\n# print(series_tag_values)\n if '_seg_' in description:\n converted_slice = converted_slice \n \n # Tags shared by the series.\n for tag, value in series_tag_values:\n converted_slice.SetMetaData(tag, value)\n \n # especially set spacing tags\n # Image Position (Patient)\n converted_slice.SetMetaData(\"0020|0013\", str(i)) # Instance Number\n converted_slice.SetSpacing(spacing_new)\n \n # Write to the output directory and add the extension dcm, to force writing in DICOM format \n writer.SetFileName(os.path.join(save_dir, str(i)+'.dcm'))\n writer.Execute(converted_slice)", "def needleSegmentationCLIDEMO(self):\n #research\n profbox()\n widget = slicer.modules.NeedleFinderWidget\n scene = slicer.mrmlScene\n pNode = self.parameterNode()\n if slicer.mrmlScene.GetNodeByID(pNode.GetParameter(\"baselineVolumeID\")) == None:\n inputVolume = self.__volumeSelector.currentNode()\n inputVolumeID = self.__volumeSelector.currentNode().GetID()\n else:\n inputVolume = slicer.mrmlScene.GetNodeByID(pNode.GetParameter(\"baselineVolumeID\"))\n inputVolumeID = slicer.mrmlScene.GetNodeByID(pNode.GetParameter(\"baselineVolumeID\")).GetID()\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\n \n datetime = time.strftime(\"%Y-%m-%d-%H_%M_%S\", time.localtime())\n \n inputVolume.SetAttribute(\"foldername\",datetime)\n self.outputVolumeNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLModelNode')\n self.outputVolumeNode.SetName(\"Output Needle Model\")\n outputVolumeStorageNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLModelStorageNode')\n slicer.mrmlScene.AddNode(self.outputVolumeNode)\n slicer.mrmlScene.AddNode(outputVolumeStorageNode)\n self.outputVolumeNode.AddAndObserveStorageNodeID(outputVolumeStorageNode.GetID())\n outputVolumeStorageNode.WriteData(self.outputVolumeNode)\n \n outputID = self.outputVolumeNode.GetID()\n \n self.foldername = '/NeedleModels/' + datetime\n \n # Set the parameters for the CLI module \n parameters = {} \n parameters['inputVolume'] = inputVolumeID\n parameters['inputLabel'] = inputLabelID\n parameters['outputVtk'] = outputID\n parameters['outputFolderName'] = self.foldername\n parameters['nbPointsPerLine'] = self.nbPointsPerLine.value\n parameters['nbRadiusIterations'] = self.nbRadiusIterations.value\n parameters['distanceMax'] = self.distanceMax.value\n parameters['numberOfPointsPerNeedle'] = self.numberOfPointsPerNeedle.value\n parameters['nbRotatingIterations'] = self.nbRotatingIterations.value\n \n module = slicer.modules.mainlabelneedletrackingcli \n self.__cliNode = None\n self.__cliNode = slicer.cli.run(module, None, parameters, wait_for_completion=True)\n \n ##### match the needles ######\n\n self.setNeedleCoordinates()\n self.computerPolydataAndMatrix()\n xmin = min(self.p[0])\n xmax = max(self.p[0])\n ymin = min(self.p[1])\n ymax = max(self.p[1])\n xdelta = xmax - xmin\n ydelta = ymax - ymin\n k = 0\n\n self.base = [[0 for j in range(3)] for j in range(63)]\n self.tip = [[0 for j in range(3)] for j in range(63)]\n self.needlenode = [[0 for j in range(2)] for j in range(63)]\n self.bentNeedleNode = [[0 for j in range(2)] for j in range(63)]\n self.displaynode = [0 for j in range(63)]\n self.displaynodeB = [0 for j in range(63)]\n self.fiducialnode = [0 for j in range(63)]\n \n for i in xrange(63):\n\n pathneedle = self.foldername+'/'+str(i)+'.vtp'\n pathBentNeedle = self.foldername+'/'+str(i)+'_bent.vtp'\n self.needlenode[i] = slicer.util.loadModel(pathneedle, True)\n self.bentNeedleNode[i] = slicer.util.loadModel(pathBentNeedle, True)\n\n if self.needlenode[i][0] == True and self.needlenode[i][1] != None:\n self.displaynode[i] = self.needlenode[i][1].GetDisplayNode()\n self.displaynodeB[i] = self.bentNeedleNode[i][1].GetDisplayNode()\n\n \n polydata = self.needlenode[i][1].GetPolyData()\n polydata.GetPoint(0,self.base[i]) \n \n self.displaynode[i].SliceIntersectionVisibilityOn()\n self.displaynodeB[i].SliceIntersectionVisibilityOn()\n bestmatch = None\n mindist = None\n for j in xrange(63):\n delta = ((self.p[0][j]-(self.base[i][0]))**2+(self.p[1][j]-self.base[i][1])**2)**(0.5)\n if delta < mindist or mindist == None:\n bestmatch = j\n mindist = delta\n \n bestmatch = k\n k += 1\n self.displaynode[i].SetColor(self.color[bestmatch])\n self.displaynodeB[i].SetColor(self.color[bestmatch])\n self.needlenode[i][1].SetName(self.option[bestmatch]+\"_segmented\")\n self.bentNeedleNode[i][1].SetName(self.option[bestmatch]+\"_optimized\")\n self.needlenode[i][1].SetAttribute(\"segmented\",\"1\")\n self.bentNeedleNode[i][1].SetAttribute(\"optimized\",\"1\")\n self.needlenode[i][1].SetAttribute(\"nth\",str(bestmatch))\n self.bentNeedleNode[i][1].SetAttribute(\"nth\",str(bestmatch))\n self.needlenode[i][1].SetAttribute(\"needleID\",self.needlenode[i][1].GetID())\n self.bentNeedleNode[i][1].SetAttribute(\"needleID\",self.bentNeedleNode[i][1].GetID())\n \n if widget.removeDuplicates.isChecked():\n self.positionFilteringNeedles()\n\n d = slicer.mrmlScene.GetNodeByID(outputID).GetDisplayNode()\n d.SetVisibility(0)\n \n self.__editorFrame.collapsed = 1\n \n self.addButtons()", "def create_slice_labels(dataset, base_task_name, slice_name, verbose=False):\n # TODO: break this out into more modular pieces oncee we have multiple slices\n slice_fn = globals()[slice_name]\n slice_indicators = torch.tensor(\n [slice_fn(dataset, idx) for idx in range(len(dataset))], dtype=torch.uint8\n ).view(-1, 1)\n\n Y_base = dataset.labels[f\"{base_task_name}_gold\"]\n Y_slice = Y_base.clone().masked_fill_(slice_indicators == 0, 0)\n\n if verbose:\n if not any(Y_slice):\n warnings.warn(f\"No examples were found to belong to slice {slice_name}\")\n else:\n print(f\"Found {sum(slice_indicators)} examples in slice {slice_name}.\")\n\n # NOTE: we assume here that all slice labels are for sentence-level tasks only\n return Y_slice", "def __do_split_haghverdi16(self, Dseg, tips):\n # sort distance from first tip point\n # then the sequence of distances Dseg[tips[0]][idcs] increases\n idcs = np.argsort(Dseg[tips[0]])\n # consider now the sequence of distances from the other\n # two tip points, which only increase when being close to `tips[0]`\n # where they become correlated\n # at the point where this happens, we define a branching point\n if True:\n imax = self.kendall_tau_split(Dseg[tips[1]][idcs],\n Dseg[tips[2]][idcs])\n if False:\n # if we were in euclidian space, the following should work\n # as well, but here, it doesn't because the scales in Dseg are\n # highly different, one would need to write the following equation\n # in terms of an ordering, such as exploited by the kendall\n # correlation method above\n imax = np.argmin(Dseg[tips[0]][idcs]\n + Dseg[tips[1]][idcs]\n + Dseg[tips[2]][idcs])\n # init list to store new segments\n ssegs = []\n # first new segment: all points until, but excluding the branching point\n # increasing the following slightly from imax is a more conservative choice\n # as the criterion based on normalized distances, which follows below,\n # is less stable\n ibranch = imax + 2 # this used to be imax + 1!\n # ibranch = int(0.95 * imax)\n return idcs[:ibranch]\n # ssegs.append(idcs[:ibranch])\n # TODO get rid of the following heuristics\n # define nomalized distances to tip points for the rest of the data\n # dist1 = Dseg[tips[1], idcs[ibranch:]] / Dseg[tips[1], idcs[ibranch-1]]\n # dist2 = Dseg[tips[2], idcs[ibranch:]] / Dseg[tips[2], idcs[ibranch-1]]\n # assign points according to whether being closer to tip cell 1 or 2\n # ssegs.append(idcs[ibranch:][dist1 <= dist2])\n # ssegs.append(idcs[ibranch:][dist1 > dist2])\n # return ssegs", "def Problem11():\n return 'Ductile Coulomb-Mohr'", "def ANN_binned_tagged_jets_hist(datalist, model, discriminant_cuts, CSV_cuts, bins, nbins, mode=\"pT_jet\",Save=False,addFeature=False):\n title = \"binned_tagged_jets_vs_\"+mode\n\tdiscriminant = \"ANN\"\n AllJetsHistlist = []\n CSVHistlist = []\n DiscriminantHistlist = []\n if mode == \"pT_hadron\":\n feature = 2\n elif mode == \"pT_jet\":\n feature = 3\n elif mode == \"decay_vx\":\n feature = 4\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n print \"working on\",datatitle\n ran = data[4]\n\t\tCSV = data[2]\n\t\tpT = data[1]\n\t\tx_data = data[0]\n AllJetsHistlist.append(rt.TH1D(datatitle+\"_AllJets\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n AllJetsHistlist[n].SetLineColor(4)\n CSVHistlist.append(rt.TH1D(datatitle+\"_CSV\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n CSVHistlist[n].SetLineColor(3)\n DiscriminantHistlist.append(rt.TH1D(datatitle+\"_Discriminant\",datatitle+\"_\"+title,nbins,ran[0],ran[1]))\n DiscriminantHistlist[n].SetLineColor(2)\n\t\n\t\tif addFeature == False:\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\t\telif addFeature == \"pT\":\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\t\telif addFeature == \"PV\":\n\t\t\tassert x_data.shape[1] == 21, \"wrong x_data format: PV cannot be found\"\n\t\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\t\telse:\n\t\t\tprint \"invalid feature input\"\n\t\t\treturn None\n\t\tbin_numbers = ANN_bin_selection(pT,bins)\n\n\t for i,pT_value in enumerate(pT):\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJetsHistlist[n].Fill(pT_value)\n\t if pred_y[i] >= discriminant_cuts[bin_numbers[i]]: DiscriminantHistlist[n].Fill(pT_value)\n\t if CSV[i] >= CSV_cuts[bin_numbers[i]]: CSVHistlist[n].Fill(pT_value)\n\n canvaslist = []\n legendlist = []\n Tfilelist = []\n for n,data in enumerate(datalist):\n\t\tdatatitle = data[3]\n canvaslist.append(rt.TCanvas(datatitle+\"_canvas\",\"canvas\",600,600))\n canvaslist[n].SetTitle(datatitle+\"_\"+title)\n rt.gStyle.SetOptStat(0)\n legendlist.append(rt.TLegend(0.9,0.9,0.65,0.75))\n legendlist[n].AddEntry(AllJetsHistlist[n], \"All jets\")\n legendlist[n].AddEntry(CSVHistlist[n], \"CSV\")\n legendlist[n].AddEntry(DiscriminantHistlist[n], discriminant)\n AllJetsHistlist[n].GetXaxis().SetTitle(mode)\n AllJetsHistlist[n].GetYaxis().SetTitle('# jets')\n AllJetsHistlist[n].GetYaxis().SetTitleOffset(1.5)\n #AllJetsHistlist[n].Draw()\n #CSVHistlist[n].Draw(\"SAME\")\n #DiscriminantHistlist[n].Draw(\"SAME\")\n #legendlist[n].Draw()\n if Save:\n #canvaslist[n].SaveAs(title+\"_\"+datatitle+discriminant+\".png\")\n Tfilelist.append(rt.TFile(\"Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\",\"recreate\"))\n print \"saved histogram as Thesis_Plots/root_files/\"+title+\"_\"+datatitle+discriminant+\".root\"\n AllJetsHistlist[n].Write()\n CSVHistlist[n].Write()\n DiscriminantHistlist[n].Write()", "def mask_label_contour(image, seg):\n return sitk.Mask(image, sitk.LabelContour(seg+1)==0)", "def adjust_labels(data_y, label):\n\n if label == 'locomotion': # Labels for locomotion are adjusted\n data_y[data_y == 4] = 3\n data_y[data_y == 5] = 4\n elif label == 'gestures': # Labels for gestures are adjusted\n data_y[data_y == 406516] = 1\n data_y[data_y == 406517] = 2\n data_y[data_y == 404516] = 3\n data_y[data_y == 404517] = 4\n data_y[data_y == 406520] = 5\n data_y[data_y == 404520] = 6\n data_y[data_y == 406505] = 7\n data_y[data_y == 404505] = 8\n data_y[data_y == 406519] = 9\n data_y[data_y == 404519] = 10\n data_y[data_y == 406511] = 11\n data_y[data_y == 404511] = 12\n data_y[data_y == 406508] = 13\n data_y[data_y == 404508] = 14\n data_y[data_y == 408512] = 15\n data_y[data_y == 407521] = 16\n data_y[data_y == 405506] = 17\n return data_y", "def GetDatumLabels(self, *args):\n return _XCAFDoc.XCAFDoc_DimTolTool_GetDatumLabels(self, *args)", "def get_labels(self):\r\n return [\"X\", \"O\", \"B-a\", \"I-a\", \"B-b\", \"I-b\", \"B-c\", \"I-c\", \"S-a\", \"S-b\", \"S-c\", \"[CLS]\", \"[SEP]\"]", "def _labels_of_sentence(self, sentence, split):\n labels = torch.ones(1)\n labels[0] = self.category_int_of_label_string(sentence[0][self.name_to_index_dict['label']]) #\n return labels", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def get_labels(self):\n return [\"A轮\", \"B轮\",\"C轮\",\"天使轮\",\"战略融资\"]", "def GetDatumTolerLabels(self, *args):\n return _XCAFDoc.XCAFDoc_DimTolTool_GetDatumTolerLabels(self, *args)", "def test_get_dim_label_with_index(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][2]\n dims_df = pyjstat.get_dim_label(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == '2003')\n self.assertTrue(dims_df.iloc[-1]['label'] == '2014')", "def addFid(data, Dim=.5, nodName=\"N\", lableName=\"1\", color=\"red\", GlyphType=1):\n\t\txyz = tuple(data)\n\t\ttipFiducial = slicer.mrmlScene.AddNode(slicer.vtkMRMLMarkupsFiducialNode())\n\t\ttipFiducial.SetName(nodName)\n\t\ttipFiducial.AddFiducial(xyz[0], xyz[1], xyz[2])\n\t\ttipFiducial.SetNthFiducialLabel(0, lableName)\n\t\tslicer.mrmlScene.AddNode(tipFiducial)\n\t\ttipFiducial.SetDisplayVisibility(True)\n\t\ttipFiducial.GetDisplayNode().SetGlyphType(GlyphType) # Vertex2D\n\t\ttipFiducial.GetDisplayNode().SetGlyphScale(Dim * 10)\n\t\ttipFiducial.GetDisplayNode().SetTextScale(3)\n\t\ttipFiducial.GetDisplayNode().SetSelectedColor(Helper.myColor(color))\n\t\t'''\tGlyphShapes {\n GlyphTypeInvalid = 0, 1-StarBurst2D, 2-Cross2D, 3-CrossDot2D,\n 4-ThickCross2D, 5-Dash2D, 6-Sphere3D, 7-Vertex2D,\n 8-Circle2D,9-Triangle2D, 10-Square2D, Diamond2D,\n Arrow2D, ThickArrow2D, HookedArrow2D, GlyphType_Last\n }'''", "def label_for(self, *pp, unit=True, description=True):\n if len(pp) > 1 and np.all([re.match(r\"k\\d+l\", p) for p in pp]):\n label = \"$k_nl$\"\n if unit:\n label += \" / $m^{-n}$\"\n return label\n return super().label_for(*pp, unit=unit, description=description)", "def make_label(self, label, units):\n nice_label = self.tex_axis_label(label)\n if not (units == 'dimensionless') and \\\n (units is not None) and (not units == []):\n nice_label += ' (%s)'%self.tex_axis_label(units)\n return nice_label", "def kaons ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import FilterDesktop\n ## \n if self['NOPIDHADRONS'] :\n from StandardParticles import StdAllNoPIDsKaons as inpts\n kaoncut = self['KaonCut']\n else :\n from StandardParticles import StdAllLooseANNKaons as inpts \n kaoncut = \"(%s)&(%s)\" % ( self['KaonCut'] , self['KaonPIDCut'] ) \n #\n ##\n return self.make_selection (\n 'Kaon' ,\n FilterDesktop ,\n [ inpts ] ,\n Code = kaoncut ,\n )", "def classify_defect_clusters_modifier(frame, data):\n\n if data.particles.count == 0:\n # No particles there to classify, create empty properties anyway\n data.particles_.create_property('Si_V', dtype=int, components=1)\n data.particles_.create_property('Si_I', dtype=int, components=1)\n data.particles_.create_property('Si_C', dtype=int, components=1)\n data.particles_.create_property('C_V', dtype=int, components=1)\n data.particles_.create_property('C_I', dtype=int, components=1)\n data.particles_.create_property('C_Si', dtype=int, components=1)\n return\n\n # TODO Create numpy arrays containing the number of Si vacancies,\n # interstitials, etc for each particle site in `data.particles`. These\n # next lines are just placeholders!\n si_vacancy = data.particles[\"vacancy_mask\"][...] * data.particles[\"Is Si Site\"][...]\n si_interstitial = (data.particles[\"Is Si Site\"][...] & (data.particles[\"Si Occupancy\"][...] > 1)) * (\n data.particles[\"Si Occupancy\"][...] - 1) + (\n (data.particles[\"Is C Site\"][...]) * data.particles[\"Si Occupancy\"][...]) - (\n data.particles[\"Is C Site\"][...] & data.particles[\"antisite_mask\"][...])\n si_antisite = data.particles[\"antisite_mask\"][...] * data.particles[\"Is Si Site\"][...]\n c_vacancy = data.particles[\"vacancy_mask\"][...] * data.particles[\"Is C Site\"][...]\n c_interstitial = (data.particles[\"Is C Site\"][...] & (data.particles[\"C Occupancy\"][...] > 1)) * (\n data.particles[\"C Occupancy\"][...] - 1) + (\n (data.particles[\"Is Si Site\"][...]) * data.particles[\"C Occupancy\"][...]) - (\n data.particles[\"Is Si Site\"][...] & data.particles[\"antisite_mask\"][...])\n c_antisite = data.particles[\"antisite_mask\"][...] * data.particles[\"Is C Site\"][...]\n\n\n data.particles_.create_property('Si_V', data=si_vacancy.astype(int))\n data.particles_.create_property('Si_I', data=si_interstitial.astype(int))\n data.particles_.create_property('Si_C', data=si_antisite.astype(int))\n data.particles_.create_property('C_V', data=c_vacancy.astype(int))\n data.particles_.create_property('C_I', data=c_interstitial.astype(int))\n data.particles_.create_property('C_Si', data=c_antisite.astype(int))", "def _label_commuter_rail_rider(self, rider):\n if (rider['servicebrand_Commuter Rail'] > 0) and (rider['zonecr_1a'] == 0):\n label = 'CR except zone 1A'\n else:\n label = 'others'\n return label", "def showOneNeedle(self, i, visibility):\r\n # obsolete\r\n profbox()\r\n fidname = \"fid\" + self.option[i]\r\n pNode = self.parameterNode()\r\n needleID = pNode.GetParameter(self.option[i] + '.vtp')\r\n fidID = pNode.GetParameter(fidname)\r\n NeedleNode = slicer.mrmlScene.GetNodeByID(needleID)\r\n fiducialNode = slicer.mrmlScene.GetNodeByID(fidID)\r\n\r\n if NeedleNode != None:\r\n displayNode = NeedleNode.GetModelDisplayNode()\r\n nVisibility = displayNode.GetVisibility()\r\n\r\n if fiducialNode == None:\r\n displayNode.SetVisibility(1)\r\n displayNode.SetOpacity(0.9)\r\n polyData = NeedleNode.GetPolyData()\r\n polyData.Update()\r\n nb = int(polyData.GetNumberOfPoints() - 1)\r\n coord = [0, 0, 0]\r\n if nb > 100:\r\n fiducialNode = slicer.vtkMRMLAnnotationFiducialNode()\r\n polyData.GetPoint(nb, coord)\r\n fiducialNode.SetName(self.option[i])\r\n fiducialNode.SetFiducialCoordinates(coord)\r\n fiducialNode.Initialize(slicer.mrmlScene)\r\n fiducialNode.SetLocked(1)\r\n fiducialNode.SetSelectable(0)\r\n fidDN = fiducialNode.GetDisplayNode()\r\n fidDN.SetColor(NeedleNode.GetDisplayNode().GetColor())\r\n fidDN.SetGlyphScale(0)\r\n fidTN = fiducialNode.GetAnnotationTextDisplayNode()\r\n fidTN.SetTextScale(3)\r\n fidTN.SetColor(NeedleNode.GetDisplayNode().GetColor())\r\n fiducialNode.SetDisplayVisibility(0)\r\n pNode.SetParameter(fidname, fiducialNode.GetID())\r\n fiducialNode.SetDisplayVisibility(1)\r\n\r\n if visibility == 0:\r\n\r\n displayNode.SetVisibility(0)\r\n displayNode.SetSliceIntersectionVisibility(0)\r\n if fiducialNode != None:\r\n fiducialNode.SetDisplayVisibility(0)\r\n\r\n else:\r\n\r\n displayNode.SetVisibility(1)\r\n displayNode.SetSliceIntersectionVisibility(1)\r\n if fiducialNode != None:\r\n fiducialNode.SetDisplayVisibility(1)\r\n\r\n else:\r\n vtkmat = vtk.vtkMatrix4x4()\r\n vtkmat.DeepCopy(self.m_vtkmat)\r\n vtkmat.SetElement(0, 3, self.m_vtkmat.GetElement(0, 3) + self.p[0][i])\r\n vtkmat.SetElement(1, 3, self.m_vtkmat.GetElement(1, 3) + self.p[1][i])\r\n vtkmat.SetElement(2, 3, self.m_vtkmat.GetElement(2, 3) + (30.0 - 150.0) / 2.0)\r\n\r\n TransformPolyDataFilter = vtk.vtkTransformPolyDataFilter()\r\n Transform = vtk.vtkTransform()\r\n TransformPolyDataFilter.SetInput(self.m_polyCylinder)\r\n Transform.SetMatrix(vtkmat)\r\n TransformPolyDataFilter.SetTransform(Transform)\r\n TransformPolyDataFilter.Update()\r\n\r\n triangles = vtk.vtkTriangleFilter()\r\n triangles.SetInput(TransformPolyDataFilter.GetOutput())\r\n self.AddModel(i, triangles.GetOutput())\r\n self.showOneNeedle(i, visibility)", "def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }", "def syed_dilation(data, vessel):", "def _compute_labels(self, element, data, mapping):\n lidx = element.nodes.get_dimension(self.label_index)\n if element.vdims:\n edges = Dataset(element)[element[element.vdims[0].name]>0]\n nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))\n nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})\n else:\n nodes = element\n\n value_dim = element.vdims[0]\n labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]\n if self.show_values:\n value_labels = []\n for i, node in enumerate(element._sankey['nodes']):\n value = value_dim.pprint_value(node['value'])\n label = '%s - %s' % (labels[i], value)\n if value_dim.unit:\n label += ' %s' % value_dim.unit\n value_labels.append(label)\n labels = value_labels\n\n ys = nodes.dimension_values(1)\n nodes = element._sankey['nodes']\n offset = (nodes[0]['x1']-nodes[0]['x0'])/4.\n if self.label_position == 'right':\n xs = np.array([node['x1'] for node in nodes])+offset\n else:\n xs = np.array([node['x0'] for node in nodes])-offset\n data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels])\n align = 'left' if self.label_position == 'right' else 'right'\n mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align)", "def chainDict2jetLabel(chain_dict):\n\n # suported scenarios \n router = {\n 'simple': _make_simple_label,\n 'HT': _make_ht_label,\n 'vbenf': _make_vbenf_label,\n 'dijet': _make_dijet_label,\n 'combinationsTest': _make_combinationsTest_label,\n 'partitionsTest': _make_partitionsTest_label,\n }\n\n # chain_part - scenario association\n cp_sorter = {}\n for k in router: cp_sorter[k] = []\n\n for cp in chain_dict['chainParts']:\n if cp['signature'] != 'Jet' and cp['signature'] != 'Bjet': \n continue\n for k in cp_sorter:\n if cp['hypoScenario'].startswith(k):\n cp_sorter[k].append(cp)\n break\n\n # obtain labels by scenario.\n labels = []\n for k, chain_parts in cp_sorter.items():\n if chain_parts: labels.append(router[k](chain_parts))\n\n assert labels\n nlabels = len(labels)\n if nlabels == 1: return labels[0]\n if nlabels == 2:\n alabel = \"\"\"\\\nand([]\n %s\n %s)\"\"\" % (tuple(labels))\n return alabel\n\n # more than 2 labels is not expected\n assert False", "def get_diar_target_labels(self, uniq_id, sample, fr_level_target):\n seg_target_list, base_clus_label = [], []\n self.scale_n = len(self.multiscale_timestamp_dict[uniq_id]['scale_dict'])\n subseg_time_stamp_list = self.multiscale_timestamp_dict[uniq_id][\"scale_dict\"][self.scale_n - 1][\"time_stamps\"]\n for (seg_stt, seg_end) in subseg_time_stamp_list:\n seg_stt_fr, seg_end_fr = int(seg_stt * self.frame_per_sec), int(seg_end * self.frame_per_sec)\n soft_label_vec_sess = torch.sum(fr_level_target[seg_stt_fr:seg_end_fr, :], axis=0) / (\n seg_end_fr - seg_stt_fr\n )\n label_int_sess = torch.argmax(soft_label_vec_sess)\n soft_label_vec = soft_label_vec_sess.unsqueeze(0)[:, sample.target_spks].squeeze()\n if label_int_sess in sample.target_spks and torch.sum(soft_label_vec_sess) > 0:\n label_int = sample.target_spks.index(label_int_sess)\n else:\n label_int = -1\n label_vec = (soft_label_vec > self.soft_label_thres).float()\n seg_target_list.append(label_vec.detach())\n base_clus_label.append(label_int)\n seg_target = torch.stack(seg_target_list)\n base_clus_label = torch.tensor(base_clus_label)\n return seg_target, base_clus_label", "def onCut(self):\n pass", "def autolabel(X_pos,values,height_lift):\r\n\theight= np.round(np.nan_to_num(values),2);y_pos = height_lift*height\r\n\tfor i in range(len(height)):\r\n\t\tax.text(X_pos[i],y_pos[i],'%4.2f' % height[i], ha='center', va='bottom',size=4)", "def labels(self):\n\n param=self\n\n l=len(param)\n\n sweep_label=[]\n\n for index,name in enumerate(param.names):\n\n sweep_label.append((\\\n ''.join([c for c in name if c.isupper()]))\\\n .replace(\"IDT\",\"\")\\\n .replace(\"S\",\"\")\\\n .replace(\"M\",\"\"))\n\n stringout=[]\n\n unique={name:list(dict.fromkeys(values)) for name,values in zip(param.names,param.values)}\n\n for i in range(l):\n\n tmp_lab=''\n\n for lab,name in zip(sweep_label,self.names):\n\n tmp_lab=tmp_lab+lab+str(unique[name].index(param()[name][i]))\n\n stringout.append(tmp_lab)\n\n return stringout", "def _change_name(self, suff, info_extra):\n if 'cable-ring' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n f = i1 / i2\n suff = suff.replace('.png',\n f'-area-{i1:0.3f}-best-{i2:0.3f}-FRAC-{f:0.3f}.png')\n elif 'cloth-flat' in self.path:\n i1 = info_extra['cloth_coverage']\n suff = suff.replace('.png', f'-coverage-{i1:0.3f}.png')\n elif 'bag-alone' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n suff = suff.replace('.png', f'-area-{i1:0.3f}-best-{i2:0.3f}.png')\n else:\n pass\n return suff", "def Label(self) -> str:", "def setPieLabels(label, position):\n pdict = {'left':'LEFT', 'right':'RIGHT'}\n dislin.pielab(label, pdict[position])", "def test_reneaming_old_default_labels_to_new_fixed_labels():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 3\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 8)\n ts_cold = np.ones(nt) * 4.0 + np.cos(time) * 4\n ts_warm = np.ones(nt) * 20.0 + -np.sin(time) * 4\n\n C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4\n eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)\n eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)\n C_m = 5000.0\n eta_mf = np.cos(time + np.pi / 8) / 10 + 1\n eta_mb = np.sin(time + np.pi / 8) / 10 + 1\n dalpha_r = 0.005284\n dalpha_m = 0.004961\n dalpha_p = 0.005607\n gamma = 482.6\n\n temp_real_kelvin = np.zeros((len(x), nt)) + 273.15\n temp_real_kelvin[x < 0.2 * cable_len] += ts_cold[None]\n temp_real_kelvin[x > 0.85 * cable_len] += ts_warm[None]\n temp_real_celsius = temp_real_kelvin - 273.15\n\n st = (\n eta_pf[None]\n * C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real_kelvin)\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n ast = (\n eta_mf[None]\n * C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n rst = (\n eta_pb[None]\n * C_p\n * np.exp(-dalpha_r * (-x[:, None] + cable_len))\n * np.exp(-dalpha_p * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real_kelvin)\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n rast = (\n eta_mb[None]\n * C_m\n * np.exp(-dalpha_r * (-x[:, None] + cable_len))\n * np.exp(-dalpha_m * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n\n c_f = np.log(eta_mf * C_m / (eta_pf * C_p))\n c_b = np.log(eta_mb * C_m / (eta_pb * C_p))\n\n dalpha = dalpha_p - dalpha_m # \\Delta\\alpha\n alpha_int = cable_len * dalpha\n\n df = c_f # reference section starts at first x-index\n db = c_b + alpha_int\n i_fw = np.log(st / ast)\n i_bw = np.log(rst / rast)\n\n E_real = (i_bw - i_fw) / 2 + (db - df) / 2\n\n ds = DataStore(\n {\n \"ST\": ([\"x\", \"time\"], st),\n \"AST\": ([\"x\", \"time\"], ast),\n \"REV-ST\": ([\"x\", \"time\"], rst),\n \"REV-AST\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n ds = ds.rename_labels()\n\n sections = {\n \"cold\": [slice(0.0, 0.09 * cable_len)],\n \"warm\": [slice(0.9 * cable_len, cable_len)],\n }\n\n real_ans2 = np.concatenate(([gamma], df, db, E_real[:, 0]))\n\n ds.calibration_double_ended(\n sections=sections,\n st_var=1.5,\n ast_var=1.5,\n rst_var=1.0,\n rast_var=1.0,\n method=\"wls\",\n solver=\"sparse\",\n fix_gamma=(gamma, 0.0),\n )\n\n assert_almost_equal_verbose(df, ds.df.values, decimal=14)\n assert_almost_equal_verbose(db, ds.db.values, decimal=13)\n assert_almost_equal_verbose(\n x * (dalpha_p - dalpha_m), ds.alpha.values - ds.alpha.values[0], decimal=13\n )\n assert np.all(np.abs(real_ans2 - ds.p_val.values) < 1e-10)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=10)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=10)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=10)\n pass", "def autolabel(rects,array,axis,dist):\n ctr = 0\n label_array = [EM.truncate(v*100,1) for v in array]\n for entry in range(len(label_array)):\n if(label_array[entry]>=0) and (label_array[entry]<=1):\n label_array[entry] = EM.truncate(array[entry]*100,2)\n\n\n for rect in rects:\n height = rect.get_height()\n if(axis=='1'):\n ax1.text(rect.get_x() + rect.get_width()/2., height+dist,\n label_array[ctr],fontsize=fonts[3],\n #'%d' % int(height),\n ha='center', va='bottom',rotation=90)\n elif(axis=='2'):\n ax2.text(rect.get_x() + rect.get_width()/2., height+dist,\n label_array[ctr],fontsize=fonts[3],\n #'%d' % int(height),\n ha='center', va='bottom',rotation=90)\n elif(axis=='3'):\n ax3.text(rect.get_x() + rect.get_width()/2., height+dist,\n label_array[ctr],fontsize=fonts[3],\n #'%d' % int(height),\n ha='center', va='bottom',rotation=90)\n elif(axis=='4'):\n ax4.text(rect.get_x() + rect.get_width()/2., height+dist,\n label_array[ctr],fontsize=fonts[3],\n #'%d' % int(height),\n ha='center', va='bottom',rotation=90)\n ctr = ctr + 1", "def label(d, X, ind_class0, ind_class1, N, V, binary):\n if binary == True:\n K = 1\n C = torch.zeros(N + V, K)\n C[ind_class0, :] = 0.0\n C[ind_class1, :] = 1.0\n else:\n K = 2\n C = torch.zeros(N + V, K)\n C[ind_class0, :] = torch.tensor([1.0, 0.0])\n C[ind_class1, :] = torch.tensor([0.0, 1.0])\n\n X_train = X[:N, :]\n X_val = X[N:, :]\n C_train = C[:N, :]\n C_val = C[N:, :]\n\n return [X_train, C_train, X_val, C_val, d, K]", "def Write_GCode_Drag_Knife(self, PostPro):\n\n # initialisation of the string\n exstr = \"\"\n\n # Get the mill settings defined in the GUI\n safe_retract_depth = self.parentLayer.axis3_retract\n safe_margin = self.parentLayer.axis3_safe_margin\n\n workpiece_top_Z = self.axis3_start_mill_depth\n f_g1_plane = self.f_g1_plane\n f_g1_depth = self.f_g1_depth\n\n \"\"\"\n Cutting in slices is not supported for Swivel Knife tool. All is cut at once.\n \"\"\"\n mom_depth = self.axis3_mill_depth\n drag_depth = self.axis3_slice_depth\n\n # Move the tool to the start.\n exstr += self.stmove.geos.abs_el(0).Write_GCode(PostPro)\n\n # Add string to be added before the shape will be cut.\n exstr += PostPro.write_pre_shape_cut()\n\n # Move into workpiece and start cutting into Z\n exstr += PostPro.rap_pos_z(\n workpiece_top_Z + abs(safe_margin)) # Compute the safe margin from the initial mill depth\n exstr += PostPro.chg_feed_rate(f_g1_depth)\n\n # Write the geometries for the first cut\n if isinstance(self.stmove.geos.abs_el(1), ArcGeo):\n if self.stmove.geos.abs_el(1).drag:\n exstr += PostPro.lin_pol_z(drag_depth)\n drag = True\n else:\n exstr += PostPro.lin_pol_z(mom_depth)\n drag = False\n else:\n exstr += PostPro.lin_pol_z(mom_depth)\n drag = False\n exstr += PostPro.chg_feed_rate(f_g1_plane)\n\n exstr += self.stmove.geos.abs_el(1).Write_GCode(PostPro)\n\n for geo in Geos(self.stmove.geos[2:]).abs_iter():\n if isinstance(geo, ArcGeo):\n if geo.drag:\n exstr += PostPro.chg_feed_rate(f_g1_depth)\n exstr += PostPro.lin_pol_z(drag_depth)\n exstr += PostPro.chg_feed_rate(f_g1_plane)\n drag = True\n elif drag:\n exstr += PostPro.chg_feed_rate(f_g1_depth)\n exstr += PostPro.lin_pol_z(mom_depth)\n exstr += PostPro.chg_feed_rate(f_g1_plane)\n drag = False\n elif drag:\n exstr += PostPro.chg_feed_rate(f_g1_depth)\n exstr += PostPro.lin_pol_z(mom_depth)\n exstr += PostPro.chg_feed_rate(f_g1_plane)\n drag = False\n\n exstr += self.Write_GCode_for_geo(geo, PostPro)\n\n # Do the tool retraction\n exstr += PostPro.chg_feed_rate(f_g1_depth)\n exstr += PostPro.lin_pol_z(workpiece_top_Z + abs(safe_margin))\n exstr += PostPro.rap_pos_z(safe_retract_depth)\n\n # Add string to be added before the shape will be cut.\n exstr += PostPro.write_post_shape_cut()\n\n return exstr", "def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name", "def setLabelDistance(dist=24, axes='XYZ'):\n dislin.labdis(dist, axes)", "def onAddCutToolClicked(self, event):\n i_cube = self.cube_choice.GetSelection()\n i_dimension = self.cut_dimension_choice.GetSelection()\n\n if i_dimension <= 0:\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut dimension not selected'))\n else:\n value = self.cut_value_textCtrl.GetValue()\n if not value.strip():\n dlg_func.openWarningBox(_(u'CUT'), _(u'Cut value not specified'))\n else:\n cube = self._OLAP_server.getCubes()[i_cube]\n dimension = cube.getDimensions()[i_dimension - 1]\n row = (dimension.getLabel(), dimension.getName(), value)\n self.appendListCtrlRow(listctrl=self.cut_listCtrl, row=row)\n\n # After adding, clear the controls\n self.cut_dimension_choice.SetSelection(0)\n self.cut_value_textCtrl.SetValue(u'')\n\n event.Skip()" ]
[ "0.5650385", "0.54846936", "0.54270923", "0.5375306", "0.53342646", "0.5226105", "0.5209129", "0.51919734", "0.51784825", "0.5173263", "0.5163355", "0.51560795", "0.5154937", "0.51164854", "0.5109441", "0.5093374", "0.50911057", "0.5076122", "0.50540954", "0.50480634", "0.50421095", "0.5034301", "0.5032512", "0.5028899", "0.50236046", "0.50234383", "0.50142574", "0.50132155", "0.500397", "0.49963433", "0.49924707", "0.49883384", "0.49860042", "0.49729776", "0.49658248", "0.49618146", "0.49567258", "0.49540654", "0.49540654", "0.49540654", "0.49483338", "0.4944248", "0.49440426", "0.49428022", "0.49410334", "0.4940744", "0.49205828", "0.49146846", "0.49107683", "0.49019703", "0.49014068", "0.4897067", "0.4896715", "0.4887378", "0.4887378", "0.48800677", "0.48689413", "0.48567855", "0.485127", "0.48479623", "0.48441973", "0.48358923", "0.48252445", "0.4818517", "0.4812086", "0.4808856", "0.48074234", "0.48025966", "0.47962397", "0.47950944", "0.47950405", "0.47939527", "0.47912276", "0.47896543", "0.47838947", "0.47807014", "0.47798583", "0.4776863", "0.47678357", "0.47665718", "0.475749", "0.47557047", "0.47531223", "0.47501165", "0.4749837", "0.474615", "0.4737223", "0.47342587", "0.47282302", "0.47193056", "0.4719002", "0.47179413", "0.47126937", "0.47052646", "0.47046396", "0.4704533", "0.47022855", "0.4702027", "0.47002992", "0.4694844" ]
0.6397082
0
ht label. ht cuts, and cuts on particpating jets
def _make_ht_label(chain_parts): assert len(chain_parts) == 1, '_make_ht_label, no. of chain parts != 1' scenario = chain_parts[0]['hypoScenario'] assert scenario.startswith('HT'), '_make_ht_label(): scenario does not start with HT' arg_res = [ re.compile(r'^(?P<lo>\d*)(?P<key>ht)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>et)(?P<hi>\d*)$'), re.compile(r'^(?P<lo>\d*)(?P<key>eta)(?P<hi>\d*)$'), ] defaults = { 'ht': ('0', 'inf'), 'et': ('0', 'inf'), 'eta': ('0', 'inf'), } args = _args_from_scenario(scenario) argvals = {} nargs = len(args) assert len(args) <= len(arg_res), 'bad num of args %d, expected < %d' % (len(args), len(arg_res)) # obtain argument values frrom scenario while args: arg = args.pop() for r in arg_res: m = r.match(arg) if m is not None: arg_res.remove(r) gd = m.groupdict() key = gd['key'] try: lo = float(gd['lo']) except ValueError: lo = float(defaults[key][0]) argvals[key+'lo'] = lo try: hi = float(gd['hi']) except ValueError: hi = float(defaults[key][1]) argvals[key+'hi'] = hi print (argvals) assert len(argvals) == 2*nargs, 'no of args: %d, expected %d' % (len(argvals), 2*nargs) print ('sent 100') result = """ ht([(%(htlo).0fht) (%(etlo).0fet) (%(etalo).0feta%(etahi).0f) ])""" % argvals print (result) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_thin(self, orig_label):\n pil_thin = thin(orig_label)\n # Keep the original label and set non-thinning label as 0\n orig_label[~pil_thin] = 0\n\n return orig_label", "def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()", "def onCut(self):\n pass", "def odemis_to_hyperspy(filename='sampledata/cltest.h5',specbin=1) :\r\n\r\n f=h5.File(filename,'r')\r\n shome = 'Acquisition2//ImageData/'\r\n x = f[shome + 'Image']\r\n cdesc =f['Acquisition2/PhysicalData/ChannelDescription'].value[0].decode('utf-8')\r\n #print(cdesc)\r\n\r\n cltype = None\r\n if 'Spectrum' in cdesc :\r\n cltype = 'spectrum'\r\n elif 'CL intensity' in cdesc:\r\n cltype = 'panchrom'\r\n\r\n print('<' + filename + '> original shape :' ,x.shape, cltype)\r\n\r\n # strip unused dimensions and transpose/ reverse index order\r\n if cltype == 'panchrom' :\r\n xx=x[0,0,0,:,:].transpose((1,0))\r\n # just an image..\r\n else :\r\n xx=x[:,0,0,:,:].transpose((2,1,0))\r\n\r\n if cltype == 'spectrum' :\r\n #interpolate data to linearize the wavelength scale\r\n w = f[shome + 'DimensionScaleC'].value *1e9\r\n wx = np.linspace(w.min(),w.max(),w.size)\r\n for i in np.arange(xx.shape[0]) :\r\n for k in np.arange(xx.shape[1]) :\r\n xx[i,k,:] = np.interp(wx,w,xx[i,k,:])\r\n\r\n wslope = wx[1]-wx[0]\r\n woffset = wx.min()\r\n #wx = np.arange(w.size)\r\n #wslope,woffset=np.polyfit(wx,w,1)\r\n s = hs.signals.Signal1D(xx)\r\n\r\n elif cltype == 'panchrom' :\r\n s = hs.signals.Signal2D(xx)\r\n else :\r\n print('unknown type')\r\n\r\n print('hyperspy shape :' ,s.data.shape)\r\n\r\n\r\n s.metadata.General.title = 'Odemis: ' + cdesc\r\n s.metadata.General.original_filename = filename\r\n s.metadata.General.notes = cltype\r\n s.axes_manager[0].name = 'pos x'\r\n s.axes_manager[0].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[0].offset = f[shome + 'XOffset'].value * 1e6\r\n s.axes_manager[0].units = 'um'\r\n\r\n\r\n s.axes_manager[1].name = 'pos y'\r\n s.axes_manager[1].scale = f[shome + 'DimensionScaleX'].value * 1e6\r\n s.axes_manager[1].offset = f[shome + 'YOffset'].value * 1e6\r\n s.axes_manager[1].units = 'um'\r\n\r\n if cltype == 'spectrum' :\r\n s.axes_manager[2].name = 'wavelength'\r\n s.axes_manager[2].units = 'nm'\r\n s.axes_manager[2].offset = woffset\r\n s.axes_manager[2].scale = wslope\r\n s.metadata.signal_type = 'CL'\r\n\r\n f.close()\r\n if (specbin > 1) and (cltype == 'spectrum'):\r\n return( s.rebin(scale=[1,1,specbin]) )\r\n else :\r\n return( s )\r\n #end odemis_to_hyperspy\r\n #######################\r", "def cut(S, T, graph):\n ###TODO\n pass", "def __init__(self, label=None):\n super().__init__(\"h\", 1, [], label=label)", "def extract_info(config, cut, label):\n cfg = filter(lambda c: c['name'] == cut, config['physics']['cuts'])[0]\n text = \"\"\n if 'max' not in cfg:\n text += \"#geq \"\n text += str(cfg['min'])\n if 'max' in cfg and cfg['max'] != cfg['min']:\n text += '-' + str(cfg['max']) + ' ' + label + 's'\n elif cfg['min'] != 1:\n text += ' ' + label + 's'\n else:\n text += ' ' + label\n return text", "def ch(h1):\n return -(pic_height / float(h)) * h1", "def drawlabels(t, t1):\r\n t.fd(250)\r\n t.pd()\r\n t.write(\"Life\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(12)\r\n t.pd()\r\n t.write(\"Exp.\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(238)\r\n t.right(90)\r\n t.fd(80)\r\n t1.pu()\r\n t1.back(50)\r\n t1.rt(90)\r\n t1.fd(250)\r\n t1.pd()\r\n t1.write(\"Year\", font=(\"Arial\", 10, \"bold\"))\r\n t1.pu()\r\n t1.back(250)\r\n t1.left(90)\r\n t1.fd(50)", "def hxlcut():\n run_script(hxlcut_main)", "def write_label_ps(header_lines, base_lines, tail_lines, shape_list, title, outFn, cutofflist=[0.3,0.5,0.7], mode='fill'):\n OUT = open(outFn, \"w\")\n for header_line in header_lines:\n if r'{title}' in header_line:\n header_line = header_line.format(title=title)\n OUT.writelines(header_line)\n #print(len(shape_list), len())\n for shape,base_line in zip(shape_list,base_lines):\n if mode=='label':\n OUT.writelines( _color_command_segmented(shape, cutofflist)+\"\\n\" )\n elif mode=='heatmap':\n OUT.writelines( _color_command_heatmap(shape, Gradient_Colors, 0, 1)+\"\\n\" )\n else:\n raise RuntimeError(\"Sorry: mode='fill' Not applicant now\")\n OUT.writelines(base_line)\n for tail_line in tail_lines:\n OUT.writelines(tail_line)\n OUT.close()", "def __get_ohe_label__(self, label_idx) -> List[int]:\n\n label = [0] * self.n_classes\n label[label_idx] = 1\n\n return label", "def agglo_from_labelmask(\n h5path_in,\n h5path_lv='',\n ratio_threshold=0,\n h5path_out='',\n save_steps=False,\n protective=False,\n ):\n\n # check output paths\n outpaths = {'out': h5path_out}\n status = utils.output_check(outpaths, save_steps, protective)\n if status == \"CANCELLED\":\n return\n\n # open data for reading\n h5file_in, ds_in, elsize, axlab = utils.h5_load(h5path_in)\n h5file_lv, ds_lv, _, _ = utils.h5_load(h5path_lv)\n\n # open data for writing\n h5file_out, ds_out = utils.h5_write(None, ds_in.shape, ds_in.dtype,\n h5path_out,\n element_size_um=elsize,\n axislabels=axlab)\n\n ulabels = np.unique(ds_in)\n maxlabel = np.amax(ulabels)\n print(\"number of labels in watershed: {:d}\".format(maxlabel))\n\n fwmap = np.zeros(maxlabel + 1, dtype='i')\n\n areas_ws = np.bincount(ds_in.ravel())\n\n labelsets = {}\n rp_lw = regionprops(ds_lv, ds_in)\n for prop in rp_lw:\n\n maskedregion = prop.intensity_image[prop.image]\n counts = np.bincount(maskedregion)\n svoxs_in_label = [l for sl in np.argwhere(counts) for l in sl]\n\n ratios_svox_in_label = [float(counts[svox]) / float(areas_ws[svox])\n for svox in svoxs_in_label]\n fwmask = np.greater(ratios_svox_in_label, ratio_threshold)\n labelset = np.array(svoxs_in_label)[fwmask]\n labelsets[prop.label] = set(labelset) - set([0])\n\n basepath = h5path_in.split('.h5/')[0]\n utils.write_labelsets(labelsets, basepath + \"_svoxsets\",\n filetypes=['pickle'])\n\n ds_out[:] = utils.forward_map(np.array(fwmap), ds_in, labelsets)\n\n # close and return\n h5file_in.close()\n h5file_lv.close()\n try:\n h5file_out.close()\n except (ValueError, AttributeError):\n return ds_out", "def cut(self,cell):\r\n self.grid[cell[0]][cell[1]] = 1", "def o_wo_per_head(self):\n assert self.ff % self.heads == 0\n # fuse ff->e and projection layer of self-attention\n return (self.ff // (self.heads-self.padded_heads)) + self.qkv", "def ksh(i,t,htanses):\n for (zs,ys,zx,yx) in htanses[i]:\n alex.penup()\n alex.goto((zs%m)*20-10*m,(zs//m)*20-10*n)\n alex.pendown()\n alex.goto((ys%m+1)*20-10*m,(ys//m)*20-10*n)\n alex.goto((yx%m+1)*20-10*m,(yx//m+1)*20-10*n)\n alex.goto((zx%m)*20-10*m,(zx//m+1)*20-10*n)\n alex.goto((zs%m)*20-10*m,(zs//m)*20-10*n)\n alex.hideturtle()", "def create_teacher(self):\n\n #words = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\n\n #print(\"self.length: \", self.length)\n for i, image in enumerate(self.images):\n if False:#i % 2 == 0:\n img = list(chunks(image, 10))\n plt.imshow(img, interpolation=\"nearest\", origin=\"upper\")\n plt.colorbar()\n plt.title(self.labels[i])\n plt.show()\n label = np.argmax(self.labels[i]) + 1\n label_vector = self.labels[i]\n timesteps = label + 1\n\n for chain in list(self.traces[i]):\n count_tensor = list()\n input_tensor = list()\n target_tensor = list()\n \n #count_tensor.append([0] * z_size)\n #input_tensor.append(image)\n #target_tensor.append([None, None])\n\n #count_padding = [0 for x in range(z_size)]\n \n for count, link in enumerate(chain):\n count_vector = [0 if x is not count else 1 for x in range(z_size)]\n count_tensor.append(count_vector)\n x, y = link\n image[y*200+x] = 255\n input_tensor.append(image)\n target_tensor.append(list(link))\n\n # Fill in the rest of the list with the same (current one doe\n #for cont in range(count + 1, z_size):\n for cont in range(count + 1, z_size + 1):\n #count_tensor.append(count_padding)\n count_tensor.append(count_vector)\n input_tensor.append(image)\n target_tensor.append(list(link))\n\n \n\n self.explode_lbls.append(label)\n self.explode_labels.append(label_vector)\n self.explode_counts.append(count_tensor)\n self.explode_images.append(input_tensor)\n self.explode_traces.append(target_tensor)\n #print(target_tensor)\n\n \n \n self.explode_length = len(self.explode_images)", "def get_hbls_hbbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n u = self.u\n v = self.v\n \n v_upts = TTTW_func.v2u(v)\n Hz = z_u_w[:,1:] - z_u_w[:,:-1]\n\n\n\n # CALCULATE swr_frac\n self.swr_frac = TTTW_func.lmd_swr_frac(self.grid_dict)\n\n\n # WHOLE THING HAPPENS IN j loop through y-indices\n \n # INITIALIZE ARRAYS\n self.kmo = np.zeros([Ly])\n self.Cr = np.zeros([Ly])\n self.kbl = np.empty([Ly],dtype='int')\n self.C_h_MO = np.zeros([Ly])\n self.Cr = np.zeros([Ly,N+1]) # sum term\n self.FC = np.zeros([Ly,N+1])\n self.swdk_r = np.zeros([Ly,N+1])\n \n self.zscale = np.zeros([Ly,N])\n self.Kern = np.zeros([Ly,N])\n\n \n # --> LOOP THROUGH Y-INDICES\n for j in range(Ly):\n if self.LIMIT_MO_DEPTH:\n self.kmo[j] = 0\n self.C_h_MO[j] = self.C_MO *self.ustar[j]**3/self.vonKar\n \n self.kbl[j] = 0\n self.Cr[j,-1] = 0 # set top Cr\n self.Cr[j,0] = 0 # set bottom Cr\n \n # SEARCH FOR MIXED LAYER DEPTH\n self.FC[j,-1] = 0.\n\n\n # ---> LOOP TOP TO BOTTOM (FORTRAN ==> k=N-1,1,-1)\n for k in range(N-1,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n \n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n self.zscale[j,k_w] = zscale\n if self.LMD_KPP:\n if self.LMD_BKPP:\n zscaleb = z_u_r[j,k_r] - z_u_w[j,0]\n Kern = zscale * zscaleb**2 / ( (zscale + self.epssfcs*self.hbls_old[j]) * (zscaleb**2+(self.epssfcb**2*self.hbbl_old[j]**2)))\n else:\n Kern = zscale / (zscale + (self.epssfcs*self.hbls_old[j]))\n else:\n Kern = 1.\n \n\n\n self.Kern[j,k_w] = Kern\n self.FC[j,k_w] = self.FC[j,k_w+1] + Kern * (\\\n ( ( u[j,k_r+1] - u[j,k_r] )**2 + ( v_upts[j,k_r+1] - v_upts[j,k_r])**2 ) \\\n / (Hz[j,k_r] + Hz[j,k_r+1]) \\\n - 0.5 * ( Hz[j,k_r] + Hz[j,k_r+1]) * (self.Ri_inv * self.bvf[j,k_w] + self.C_Ek*self.f[j]*self.f[j]))\n\n\n #\t\tLOOP THAT FINDS BL DEPTH ##\n #----> LOOP TOP TO BOTTOM (start at free surface, w-level surface) \n \n if self.LMD_KPP:\n #swdk_r only used in this function so don't need to be class attribute\n # but for testing make it an attribute to see what it is\n \n # fortran equivlanet ===> k=N,1,-1 \n for k in range(N,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n ###################################################################### \n self.swdk_r[j,k_w] = np.sqrt( self.swr_frac[j,k_w] * self.swr_frac[j,k_w-1])\n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n Bfsfc = self.Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])\n \n self.bvf_max = np.sqrt(np.max([0,self.bvf[j,k_w-1]]))\n \n # CALCULATE TURBULENT VELOCITY SCALE FOR TRACERS\n \t\t\t self.ws = self.lmd_wscale_ws_only(Bfsfc, zscale,self.hbls_old[j],self.ustar[j])\n \n self.Vtsq = self.Vtc * self.ws* self.bvf_max + self.V0\n \n\n self.Cr[j,k_w] = self.FC[j,k_w] + self.Vtsq\n \n\n #######################################################################\n \n # SEARCH FOR hbls vertical level #\n '''\n kbl is specified at vertical w-level (via Cr which is at\n vertical w-levels)\n '''\n if self.kbl[j] == 0 and self.Cr[j,k_w] < 0:\n self.kbl[j] = k_w\n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] == 0 and Bfsfc*(z_u_w[j,N] - z_u_r[j,k_r]) > self.C_h_MO[j]:\n self.kmo[j] = k_w\n\n \n #--> still in j-loop\n #######################################################\n \n # \t\tGET SURFACE BOUNDARY LAYER DEPTH # \n self.hbls[j] = z_u_w[j,N] - z_u_w[j,0] + self.eps # set hbls as depth of entire water column\n if self.kbl[j] > 0:\n k_w = self.kbl[j]\n k_r = k_w - 1 \n if k_w == N: # set hbls at the surface btwn w- and rho-levels at surface\n self.hbls[j] = z_u_w[j,N] - z_u_r[j,N-1]\n \n else:\n self.hbls[j] = z_u_w[j,N] - ( z_u_r[j,k_r] * self.Cr[j,k_w+1] - z_u_r[j,k_r+1] * self.Cr[j,k_w]) / \\\n (self.Cr[j,k_w+1] - self.Cr[j,k_w])\n \n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] > 0:\n k_w = self.kmo[j]\n k_r = k_w-1\n if k_w == N:\n z_up = z_u_w[j,N]\n cff_up = np.max([0,Bo[j]])\n else:\n z_up = z_r[j,k_w+1]\n cff_up = np.max([0, Bo[j] + self.Bosol[j]*(1-self.swdk_r[j,(k_w-1)+1])])\n \n cff_dn = np.max([0,Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])]) \n h_MO = z_u_w[j,N] + self.C_h_MO[j] * ( cff_up*z_up - cff_dn * z_u_r[j,k_r] ) \\\n / ( cff_up * cff_dn * (z_up - z_u_r[j,k_r]) ) \\\n + self.C_h_MO[j] * (cff_dn - cff_up)\n\n self.hbls[j] = np.min([self.hbls[j],np.max([h_MO,0])])\n\n\n\n #### GET BOTTOM BOUNDARY LAYER DEPTH #######\n if self.LMD_BKPP:\n self.kbl[j] = 0 # reset Cr at bottom and kbl for BKPP\n self.Cr[j,0] = 0.\n self.FC[j,0] = 1.5 * self.FC[j,1] - 0.5 * self.FC[j,2] # linear extrapolation\n \n #---> LOOP BOTTOM TO TOP\n # FIND kbl for BBL\n for k in range(1,N+1):\n k_r = k-1\n k_w = k \n self.Cr[j,k_w] = self.FC[j,k_w] - self.FC[j,0]\n \n # LOOK FOR FIRST ZERO CROSSING FROM BOTTOM UP\n if self.kbl[j] == 0 and self.Cr[j,k_w] > 0:\n self.kbl[j] = k_w \n \n\n self.hbbl[j] = z_u_w[j,N] - z_u_w[j,0] # total depth\n if self.kbl[j] > 0 :\n k_w = self.kbl[j] \n k_r = k_w -1\n if k_w == 1: # NO BBL CASE\n self.hbbl[j] = z_u_r[j,0] - z_u_w[j,0] #in between bottom rho and w-level\n else:\n self.hbbl[j] = ( z_u_r[j,k_r-1] * self.Cr[j,k_w] - z_u_r[j,k_r] * self.Cr[j,k_w-1]) / \\\n (self.Cr[j,k_w] - self.Cr[j,k_w-1]) - z_u_w[j,0]", "def process_labels(ctx, tex, chapter):\n headings = ['chapter'] + ['sub'*i + 'section' for i in range(4)]\n reh = r'(' + '|'.join(headings) + r'){(.+?)}'\n environments = ['thm', 'lem', 'exc', 'figure', 'equation']\n ree = r'begin{(' + '|'.join(environments) + r')}'\n rel = r'(\\w+)label{(.+?)}'\n rel2 = r'label{(.+?)}'\n bigone = r'\\\\({})|\\\\({})|\\\\({})|\\\\(caption)|\\\\({})'.format(reh, ree, rel, rel2)\n rx = re.compile(bigone)\n\n sec_ctr = [chapter] + [0]*(len(headings))\n env_ctr = [0]*len(environments)\n blocks = catlist()\n lastlabel = None\n lastidx = 0\n m = rx.search(tex, lastidx)\n while m:\n blocks.append(tex[lastidx:m.start()])\n lastidx = m.start()\n cmd = next_command(tex, lastidx)\n lastidx = cmd.end\n if m.group(2):\n # This is a sectioning command (chapter, subsection,...)\n name = m.group(2)\n i = headings.index(name)\n if i == 0:\n env_ctr = [0]*len(env_ctr)\n sec_ctr[i:] = [sec_ctr[i]+1]+[0]*(len(headings)-i-1)\n number = \".\".join([str(x) for x in sec_ctr[:i+1]])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n blocks.append(\"<a id='{}'></a>\".format(idd))\n\n title = '{}&emsp;{}'.format(number, cmd.args[0])\n blocks.append(r'\\{}{{{}}}'.format(name, title))\n\n elif m.group(5):\n # This is an environment (thm, lem, ...)\n name = m.group(5)\n lastenv = name # save this for a caption command coming later...\n i = environments.index(name)\n env_ctr[i] += 1\n number = \"{}.{}\".format(sec_ctr[0], env_ctr[i])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n blocks.append(\"<a id='{}'></a>\".format(idd))\n\n if name in ctx.theoremlike_environments:\n nicename = ctx.named_entities[name]\n title = '{}&nbsp;{}'.format(nicename, number)\n blocks.append(r'\\begin{{{}}}[{}]'.format(name, title))\n else:\n blocks.append(r'\\begin{{{}}}'.format(name))\n\n elif m.group(6):\n # This is a labelling command (\\thmlabel, \\seclabel,...)\n label = \"{}:{}\".format(m.group(7), m.group(8))\n ctx.label_map[label] = (ctx.outputfile, lastlabel)\n\n elif m.group(9):\n # This is a caption command\n name = lastenv\n i = environments.index(name)\n number = \"{}.{}\".format(sec_ctr[0], env_ctr[i])\n idd = \"{}:{}\".format(name, number)\n lastlabel = idd\n nicename = ctx.named_entities[name]\n title = '<span class=\"title\">{}&nbsp;{}</span>'.format(nicename, number)\n text = '{}&emsp;{}'.format(title, cmd.args[0])\n blocks.append(r'\\caption{{{}}}'.format(text))\n\n elif m.group(10):\n # This is a \\label command, probably the target of a pageref\n idd = gen_unique_id()\n blocks.append(\"<a id={}></a>\".format(idd))\n ctx.label_map[m.group(11)] = (ctx.outputfile, idd)\n\n m = rx.search(tex, lastidx)\n blocks.append(tex[lastidx:])\n return \"\".join(blocks)", "def simple_core(block,cut,laser):\r\n\r\n\tlayers = int(block[\"thickness\"]/laser[\"z_spacing\"])\r\n\r\n\t#Since all cuts are square, the offsets are more obvious than in the general linear case.\r\n\ttaper = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * laser[\"z_spacing\"]\r\n\tmax_delta = math.tan(math.radians(laser[\"kerf_angle\"]/2)) * (block[\"thickness\"] + laser[\"z_final_overshoot\"]) * 2\r\n\t\r\n\tcutlist = []\r\n\tcutlist.append([\"a_abs\", \"0\"])\r\n\tcutlist.append([\"c_abs\", str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\", str(block[\"thickness\"])])\r\n\r\n\tfor a in range(layers):\r\n\t\tx1, y1 = cut[\"final_dimension_x\"]/2 + a*taper, cut[\"final_dimension_y\"]/2 + a*taper\r\n\t\twhile abs(x1-cut[\"final_dimension_x\"]/2) < abs(max_delta):\r\n\t\t\tcutlist.append([\"jump\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(-y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(-x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tcutlist.append([\"mark\", str(x1 + block[\"origin_x\"]), str(y1 + block[\"origin_y\"])])\r\n\t\t\tx1, y1 = x1 + laser[\"xy_spacing\"], y1 + laser[\"xy_spacing\"]\r\n\t\tcutlist.append([\"z_step\", str(-laser[\"z_spacing\"])])\r\n\t\tmax_delta = max_delta - taper \r\n\treturn json.dumps(cutlist)", "def LabelDisks(self):\n pass", "def split2(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=False):\r\n minMarker = localMin(eccMap, cutStep)\r\n\r\n connectivity = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\r\n\r\n newLabel = sm.watershed(eccMap, minMarker, connectivity=connectivity, mask=self.array)\r\n\r\n border = ni.binary_dilation(self.array).astype(np.int8) - self.array\r\n\r\n for i in range(1, np.amax(newLabel) + 1):\r\n currArray = np.zeros(self.array.shape, dtype=np.int8)\r\n currArray[newLabel == i] = 1\r\n currBorder = ni.binary_dilation(currArray).astype(np.int8) - currArray\r\n border = border + currBorder\r\n\r\n border[border > 1] = 1\r\n border = sm.skeletonize(border)\r\n\r\n if borderWidth > 1:\r\n border = ni.binary_dilation(border, iterations=borderWidth - 1).astype(np.int8)\r\n\r\n newPatchMap = ni.binary_dilation(self.array).astype(np.int8) * (-1 * (border - 1))\r\n\r\n labeledNewPatchMap, patchNum = ni.label(newPatchMap)\r\n\r\n # if patchNum != np.amax(newLabel):\r\n # print 'number of patches: ', patchNum, '; number of local minimum:', np.amax(newLabel)\r\n # raise ValueError, \"Number of patches after splitting does not equal to number of local minimum!\"\r\n\r\n newPatchDict = {}\r\n\r\n for j in range(1, patchNum + 1):\r\n\r\n currPatchName = patchName + '.' + str(j)\r\n currArray = np.zeros(self.array.shape, dtype=np.int8)\r\n currArray[labeledNewPatchMap == j] = 1\r\n currArray = currArray * self.array\r\n\r\n if np.sum(currArray[:]) > 0:\r\n newPatchDict.update({currPatchName: Patch(currArray, self.sign)})\r\n\r\n if isplot:\r\n plt.figure()\r\n plt.subplot(121)\r\n plt.imshow(self.array, interpolation='nearest')\r\n plt.title(patchName + ': before split')\r\n plt.subplot(122)\r\n plt.imshow(labeledNewPatchMap, interpolation='nearest')\r\n plt.title(patchName + ': after split')\r\n\r\n return newPatchDict", "def __init__(self, smoothing=0.1):\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing", "def hilfe(self):\n toto_hilfe(3)", "def identify_leaflets(u, time_ts):\n z = u.select_atoms(\"all\").center_of_geometry()[2]\n COM_z= np.array([0,0,z]) #defines the global midplane position along z\n x, y, z = u.trajectory.ts.triclinic_dimensions[0][0], u.trajectory.ts.triclinic_dimensions[1][1], u.trajectory.ts.triclinic_dimensions[2][2]\n box = np.array([x, y, z, 90, 90, 90]) \n ### Determining side of the bilayer CHOL belongs to in this frame\n lipid1 = 'CHL'\n lipid2 = 'DLIP'\n lipid3 = 'SSM'\n lipid4 = 'DSPC'\n \n lpd1_atoms = u.select_atoms('resname %s and name O2'%lipid1) \n lpd2_atoms = u.select_atoms('resname %s and name P '%lipid2) \n lpd3_atoms = u.select_atoms('resname %s and name P '%lipid3) \n lpd4_atoms = u.select_atoms('resname %s and name P '%lipid4)\n \n num_lpd2 = lpd2_atoms.n_atoms\n num_lpd3 = lpd3_atoms.n_atoms\n num_lpd4 = lpd4_atoms.n_atoms \n # atoms in the upper leaflet as defined by insane.py or the CHARMM-GUI membrane builders\n # select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet\n # this must be done because CHOL rapidly flip-flops between leaflets\n # so we must assign CHOL to each leaflet at every time step, and in large systems\n # with substantial membrane undulations, a simple cut-off in the z-axis just will not cut it\n if side == 'up':\n lpd2i = lpd2_atoms[:int((num_lpd2)/2)]\n lpd3i = lpd3_atoms[:int((num_lpd3)/2)]\n lpd4i = lpd4_atoms[:int((num_lpd4)/2)]\n \n\n lipids = lpd2i + lpd3i + lpd4i \n\n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box) \n lpd1i = ns_lipids.search(lipids,15.0) #1.5 nm\n leaflet = lpd1i + lpd2i + lpd3i + lpd4i \n\n elif side == 'down':\n lpd2i = lpd2_atoms[int((num_lpd2)/2):]\n lpd3i = lpd3_atoms[int((num_lpd3)/2):]\n lpd4i = lpd4_atoms[int((num_lpd4)/2):]\n\n lipids = lpd2i + lpd3i + lpd4i #+ lpd3i\n \n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)\n lpd1i = ns_lipids.search(lipids,15.0) # 1.5nm\n leaflet = lpd1i + lpd2i + lpd3i+ lpd4i \n return lpd1i, lpd2i, lpd3i, lpd4i, COM_z, box, leaflet", "def cmd_label_merged_boundaries(self,sun_dir,output_dir=None):\n sun=sunreader.SunReader(sun_dir)\n if output_dir is None:\n # defaults location of dense output\n output_dir=os.path.join(sun.datadir,'dwaq',\"global-dense\")\n \n hyd_fn=glob.glob(os.path.join(output_dir,'*.hyd'))[0]\n \n hydro=SunHydro(sun=sun,hyd_path=hyd_fn,flow_shps=[self.flows_shp])\n\n class SpliceScenario(waq_scenario.Scenario):\n base_path=output_dir\n name=\"spliced\"\n\n scen=SpliceScenario(hydro=hydro)\n\n self.log.info(\"Writing labels\")\n scen.hydro.write_boundary_links()", "def __init__(self, smoothing=0.0):\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing", "def __init__(self, smoothing=0.0):\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing", "def make_stehle(self):\n\n temp_k = self.temp * e / k # temperature in K\n dens_cm = self.e_dens * 1.e-6 # electronic density in cm-3\n prefix = 'n_' + str(self.n_upper) + '_' + str(self.n_lower) + '_'\n\n # extract raw tabulated tabulated_data\n tab_temp_k = np.array(pystark.nc.variables[prefix + 'tempe'].data) # tabulated electron temperatures (K)\n olam0 = pystark.nc.variables[prefix + 'olam0'].data # line centre wavelength (A)\n num_tab_dens = pystark.nc.variables[prefix + 'id_max'].data\n fainom = pystark.nc.variables[prefix + 'fainom'].data\n tab_dens_cm = np.array(pystark.nc.variables[prefix + 'dense'].data) # tabulated electron densities (cm ** -3)\n f00 = np.array(pystark.nc.variables[prefix + 'f00'].data) # normal Holtsmark field strength (30 kV / m)\n dl12 = np.array(pystark.nc.variables[prefix + 'dl12'].data)\n dl12s = np.array(pystark.nc.variables[prefix + 'dl12s'].data)\n fainu = pystark.nc.variables[\n prefix + 'fainu'].data # Asymptotic value of iStark * (alpha ** 2.5) (\"wings factor in alfa units\")\n pr0 = np.array(pystark.nc.variables[\n prefix + 'pr0'].data) # Ratio of the mean interelectronic distance to the electronic Debye length\n jtot = np.array(pystark.nc.variables[prefix + 'jtot'].data,\n dtype=np.int) # \"number of wave lengths for the couple (T,Ne)\"\n dom = np.array(pystark.nc.variables[prefix + 'dom'].data) # frequency detunings in units (rad / (s*ues)\n d1om = np.array(pystark.nc.variables[prefix + 'd1om'].data)\n o1line = np.array(pystark.nc.variables[prefix + 'o1line'].data)\n o1lines = np.array(pystark.nc.variables[prefix + 'o1lines'].data)\n\n # ensure given temperature + density falls within tabulated values\n # change sligtly the value of the input density\n # dens_cm in order to remain , as far as possible, inside the tabulation\n # JSA: this first step seems bogus!\n\n if np.abs(dens_cm - tab_dens_cm[0]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[0] * 1.001\n\n for id in np.arange(1, num_tab_dens + 1):\n if np.abs(dens_cm - tab_dens_cm[id]) / dens_cm <= 1.0E-3:\n dens_cm = tab_dens_cm[id] * 0.999\n\n if dens_cm >= 2.0 * tab_dens_cm[num_tab_dens]:\n raise Exception(\n 'Your input density is higher than the largest tabulated value %f' % tab_dens_cm[num_tab_dens])\n\n if dens_cm <= tab_dens_cm[0]:\n raise Exception('Your input density is smaller than the smallest tabulated value %f' % tab_dens_cm[0])\n\n if temp_k >= tab_temp_k[9]:\n raise Exception('Your input temperature is higher than the largest tabulated value %f' % tab_temp_k[9])\n\n if temp_k <= tab_temp_k[0]:\n raise Exception('Your input temperature is lower than the smallest tabulated value %f' % tab_temp_k[0])\n\n normal_holtsmark_field = 1.25e-9 * (dens_cm ** (2. / 3.)) # normal field value in ues\n\n # calculate line centre wavelength and frequency using Rydberg formula\n # JSA: I have made this step clearer and corrected for deuteron mass in the Rydberg constant (though the effect is small)\n # TODO make sure this matches olam0 parameter above -- why were there two variables in the first place?!\n # rydberg_m = Rydberg / (1. + (electron_mass / physical_constants['deuteron mass'][0]))\n # wl_0_angst = 1e10 * (rydberg_m * (1 / n_lower ** 2 - 1 / n_upper ** 2)) ** -1\n\n wl_centre_angst = self.wl_centre * 1e10\n\n c_angst = c * 1e10 # velocity of light in Ansgtroms / s\n angular_freq_0 = 2 * np.pi * c_angst / wl_centre_angst # rad / s\n\n otrans = -2 * np.pi * c_angst / wl_centre_angst ** 2\n\n olines = o1lines / np.abs(otrans)\n oline = o1line / np.abs(otrans)\n\n # Limit analysis_tools to uncorrelated plasmas.\n # check that mean interelectronic distance is smaller than the electronic Debye length (equ. 10)\n PR0_exp = 0.0898 * (dens_cm ** (1. / 6.)) / np.sqrt(temp_k) # = (r0 / debye)\n if PR0_exp > 1.:\n raise Exception('The plasma is too strongly correlated\\ni.e. r0/debye=0.1\\nthe line cannot be computed.')\n\n # fainom_exp=fainom*(F00_exp**1.5)\n # fainum_exp=fainom_exp/( (OPI*2.)**1.5)\n\n # ========================\n # TABULATION Format CDS\n # si on veut ecrire\n # n -np lambda0 kalpha Ne E0 T R0/Debye Dalpha iDoppler iStark\n\n # IN_cds= N+0.01\n # INP_cds = NP+0.01\n\n # ***********************************************************\n # Don't edit the CDS format...\n # ***********************************************************\n\n # Skipped the code in the IF statement starting at line 470, since it\n # isn't used, if (.FALSE.) ...\n\n # ==============================================\n # define an unique detunings grid - domm - for the tabulated\n # profiles ( various temperatures , densities)\n # calculate all the line shapes for this common grid\n # units used at this points are Domega_new= Delta(omega)/F00\n # in rd/(s-1 ues)\n\n max_num_dens = 30 # Maximum number of densities\n max_num_tab_temp = 10\n max_num_detunings = 60 # Maximum number of detunings\n jtot = jtot.astype(np.int)\n domm = np.zeros(100000)\n dom0 = np.zeros(10000)\n tprof = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n tprofs = np.zeros([max_num_dens, max_num_tab_temp, 10000])\n uprof = np.zeros([max_num_dens, 10000])\n uprofs = np.zeros([max_num_dens, 10000])\n\n inc = 0\n domm[inc] = 0.0\n # ---- Look to replace this loop\n for id in np.arange(num_tab_dens + 1): # loop over tab densities\n for j in np.arange(max_num_tab_temp): # loop over tab temperatures (?)\n for i in np.arange(1, jtot[id, j]):\n inc += 1\n dom0[inc] = dom[id, j, i]\n\n inc = np.count_nonzero(dom)\n npik = inc + 1\n # nut=10000\n\n # Calling numpy sort instead of piksrt\n tmp = np.sort(dom0[0:npik])\n dom0[0:npik] = tmp[0:npik]\n # dom0 seems to agree with the FORTRAN version\n\n inc = 0\n domm[0] = 0.0\n # print 'npik',npik\n # ---- Look to replace this loop\n for i in np.arange(1, npik):\n dif = (dom0[i] - dom0[i - 1])\n if dif <= 1.0E-6:\n continue\n if dif / np.abs(dom0[i]) <= 0.1:\n continue\n inc = inc + 1\n domm[inc] = dom0[i]\n\n jdom = inc + 1 # One line after marker 35\n\n for id in np.arange(num_tab_dens):\n for j in np.arange(10):\n if pr0[id, j] > 1.0:\n continue\n\n tprof[id, j, 0] = oline[id, j, 0]\n tprofs[id, j, 0] = olines[id, j, 0]\n\n if jtot[id, j] == 0:\n continue\n\n for i in np.arange(1, jdom + 1):\n skip1 = False\n skip2 = False\n # print 'i',i\n domeg = domm[i]\n ij_max = jtot[id, j]\n # print 'domeg,ij_max',domeg,ij_max\n for ij in np.arange(1, ij_max - 1):\n # print 'ij',ij\n test = (domeg - dom[id, j, ij]) * (domeg - dom[id, j, ij - 1])\n # print 'test1:',test\n if test <= 0.0:\n # print 'triggered test1'\n x1 = dom[id, j, ij - 1]\n x2 = dom[id, j, ij]\n x3 = dom[id, j, ij + 1]\n y1 = oline[id, j, ij - 1]\n y2 = oline[id, j, ij]\n y3 = oline[id, j, ij + 1]\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij - 1]\n y2 = olines[id, j, ij]\n y3 = olines[id, j, ij + 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n skip1 = True\n skip2 = True\n break\n\n if skip1 is False:\n test = (domeg - dom[id, j, ij_max - 2]) * (domeg - dom[id, j, ij_max - 1])\n # print 'test2:',test\n # print 'domeg',domeg\n # print 'dom[id,j,ij_max-1]',dom[id,j,ij_max-2]\n # print 'dom[id,j,ij_max]',dom[id,j,ij_max-1]\n if test <= 0.0:\n # print 'triggered test2'\n x1 = dom[id, j, ij_max - 3]\n x2 = dom[id, j, ij_max - 2]\n x3 = dom[id, j, ij_max - 1]\n y1 = oline[id, j, ij_max - 3]\n y2 = oline[id, j, ij_max - 2]\n y3 = oline[id, j, ij_max - 1]\n tprof[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n y1 = olines[id, j, ij_max - 3]\n y2 = olines[id, j, ij_max - 2]\n y3 = olines[id, j, ij_max - 1]\n tprofs[id, j, i] = pystark.FINTRP(x1, x2, x3, y1, y2, y3, domeg)\n skip2 = True\n # print 'x1,x2,x3',x1,x2,x3\n # print 'y1,y2,y3',y1,y2,y3\n # print 'tprof[id,j,i]',tprof[id,j,i]\n # print 'tprofs[id,j,i]',tprofs[id,j,i]\n continue\n\n if skip2 is False:\n if domeg > dom[id, j, ij_max]:\n # print 'triggered test3'\n tprof[id, j, i] = fainom / (domeg ** 2.5)\n tprofs[id, j, i] = tprof[id, j, i]\n continue\n\n # We can skip writing the intermediate file\n\n\n for id in np.arange(num_tab_dens):\n otest_dens = (dens_cm - tab_dens_cm[id]) * (dens_cm - tab_dens_cm[id + 1])\n if otest_dens <= 0.0:\n dense1 = tab_dens_cm[id]\n dense2 = tab_dens_cm[id + 1]\n id1 = id\n id2 = id + 1\n break\n\n if dens_cm >= tab_dens_cm[num_tab_dens]:\n dense1 = tab_dens_cm[num_tab_dens - 1]\n dense2 = tab_dens_cm[num_tab_dens]\n id1 = num_tab_dens - 1\n id2 = num_tab_dens\n\n for it in np.arange(10):\n otest = (temp_k - tab_temp_k[it]) * (temp_k - tab_temp_k[it + 1])\n if otest <= 0.0:\n it1 = it\n it2 = it + 1\n # pr01 = pr0[id2,it1] # max value of pr0 for T1,T2,dense1,dense2\n tempe1 = tab_temp_k[it]\n tempe2 = tab_temp_k[it + 1]\n break\n\n # interpolation in temperature\n for id in np.arange(id1, id2 + 1):\n for i in np.arange(jdom):\n uprof[id, i] = tprof[id, it1, i] + (temp_k - tempe1) * (tprof[id, it2, i] - tprof[id, it1, i]) / (\n tempe2 - tempe1)\n uprofs[id, i] = tprofs[id, it1, i] + (temp_k - tempe1) * (tprofs[id, it2, i] - tprofs[id, it1, i]) / (\n tempe2 - tempe1)\n\n delta_lambda = np.zeros(jdom)\n delta_nu = np.zeros(jdom)\n wprof_nu = np.zeros(jdom)\n wprofs_nu = np.zeros(jdom)\n\n for i in np.arange(jdom):\n wprof = uprof[id1, i] + (dens_cm - dense1) * (uprof[id2, i] - uprof[id1, i]) / (dense2 - dense1)\n wprofs = uprofs[id1, i] + (dens_cm - dense1) * (uprofs[id2, i] - uprofs[id1, i]) / (dense2 - dense1)\n delta_omega = domm[i] * normal_holtsmark_field\n delta_nu[i] = delta_omega / (2 * np.pi)\n delta_lambda[i] = wl_centre_angst * delta_omega / (angular_freq_0 + delta_omega)\n # print(delta_lambda[i])\n wprof_nu[i] = (wprof / normal_holtsmark_field) * (2. * np.pi)\n wprofs_nu[i] = (wprofs / normal_holtsmark_field) * (2. * np.pi)\n # print '%e %e %e %e' %(delta_lambda[i],delta_nu[i],wprof_nu[i],wprofs_nu[i])\n\n delta_lambda2 = np.concatenate((-delta_lambda[::-1], delta_lambda)) + wl_centre_angst # + olam0\n delta_nu2 = np.concatenate((-delta_nu[::-1], delta_nu))\n wprof_nu2 = np.concatenate((wprof_nu[::-1], wprof_nu))\n wprofs_nu2 = np.concatenate((wprofs_nu[::-1], wprofs_nu))\n\n # for some reason, i only get a good agreement with the other models if i take the pure Stark broadened Stehle\n # output and manually convolve it with the Doppler profile -- not sure why...\n ls_sd = wprofs_nu2\n\n # interpolate onto frequency axis\n ls_sd = np.interp(self.freq_axis, delta_nu2 + self.freq_centre, ls_sd)\n\n return ls_sd", "def _add_labels(self):\n coords = self['pore.coords']\n self['pore.front'] = coords[:,0]<(0.1*self._Lx)\n self['pore.back'] = coords[:,0]>(0.9*self._Lx)\n self['pore.left'] = coords[:,1]<(0.1*self._Ly)\n self['pore.right'] = coords[:,1]>(0.9*self._Ly)\n self['pore.bottom'] = coords[:,2]<(0.1*self._Lz)\n self['pore.top'] = coords[:,2]>(0.9*self._Lz)\n bnds = self.pores(labels=['front','back','left','right','bottom','top'])\n self['pore.boundary'] = False\n self['pore.boundary'] = bnds", "def __call__(self, src, label):\n\n h, w, _ = src.shape\n # interp = np.random.randint(0, 5)\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img, flips = timage.random_flip(img, px=0.5)\n img = img.astype(np.float32)\n target_list_1 = []\n target_list_2 = []\n\n for k in range(self.teacher_num):\n if self.teacher_aug:\n target_image_1 = self.random_color_aug(img)\n else:\n target_image_1 = img\n\n target_image_2 = self.random_color_aug(img)\n\n target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)\n\n target_image_2 = mx.nd.image.to_tensor(target_image_2)\n target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)\n target_list_1.append(target_image_1)\n target_list_2.append(target_image_2)\n target_list_1 = mx.nd.concat(*target_list_1, dim=0)\n target_list_2 = mx.nd.concat(*target_list_2, dim=0)\n return target_list_1, target_list_2", "def _build_ham(self):\n path = self._solverpath.long_tail()\n print(path)\n current, k = self.snake.head(), 0\n for direc in path:\n self.information[current.x][current.y].idx = k\n self.information[current.x][current.y].direc = direc\n current = current.adj(direc)\n k += 1\n # Process snake bodies\n current = self.snake.tail()\n for _ in range(self.snake.len() - 1):\n self.information[current.x][current.y].idx = k\n self.information[current.x][current.y].direc = self.snake.direc\n current = current.adj(self.snake.direc)\n k += 1", "def tagview(tab,label,x,y):\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n col=classifc[label]\r\n labnow=classif[label]\r\n# print (labnow, text)\r\n if label == 'back_ground':\r\n deltay=30\r\n else:\r\n# deltay=25*((labnow-1)%5)\r\n deltay=40+10*(labnow-1)\r\n\r\n viseg=cv2.putText(tab,label,(x, y+deltay), font,0.3,col,1)\r\n return viseg", "def split(self, eccMap, patchName='patch00', cutStep=1, borderWidth=2, isplot=False):\r\n minMarker = localMin(eccMap, cutStep)\r\n\r\n plt.figure()\r\n plt.imshow(minMarker, vmin=0, interpolation='nearest')\r\n plt.colorbar()\r\n plt.title('markers 1')\r\n plt.show()\r\n\r\n minMarker = minMarker.astype(np.int32)\r\n selfArray = self.array.astype(np.int32)\r\n minMarker = minMarker + 1\r\n minMarker[minMarker == 1] = 0\r\n minMarker = minMarker + (-1 * (selfArray - 1))\r\n # minMarker: marker type for opencv watershed,\r\n # sure background = 1\r\n # unknow = 0\r\n # sure forgrand = 2,3,4... etc\r\n\r\n plt.figure()\r\n plt.imshow(minMarker, vmin=0, interpolation='nearest')\r\n plt.colorbar()\r\n plt.title('markers 2')\r\n plt.show()\r\n\r\n eccMapNor = (np.round(ia.array_nor(eccMap) * 255)).astype(np.uint8)\r\n eccMapRGB = cv2.cvtColor(eccMapNor, cv2.COLOR_GRAY2RGB)\r\n # eccMapRGB: image type for opencv watershed, RGB, [uint8, uint8, uint8]\r\n\r\n newLabel = cv2.watershed(eccMapRGB, minMarker)\r\n\r\n plt.figure()\r\n plt.imshow(newLabel, vmin=0, interpolation='nearest')\r\n plt.colorbar()\r\n plt.title('markers 3')\r\n plt.show()\r\n\r\n newBorder = np.zeros(newLabel.shape).astype(np.int)\r\n\r\n newBorder[newLabel == -1] = 1\r\n\r\n border = ni.binary_dilation(self.array).astype(np.int) - self.array\r\n\r\n border = newBorder + border\r\n\r\n border[border > 1] = 1\r\n\r\n border = sm.skeletonize(border)\r\n\r\n if borderWidth > 1:\r\n border = ni.binary_dilation(border, iterations=borderWidth - 1).astype(np.int8)\r\n\r\n newPatchMap = ni.binary_dilation(self.array).astype(np.int8) * (-1 * (border - 1))\r\n\r\n labeledNewPatchMap, patchNum = ni.label(newPatchMap)\r\n\r\n # if patchNum != np.amax(newLabel):\r\n # print 'number of patches: ', patchNum, '; number of local minimum:', np.amax(newLabel)\r\n # raise ValueError, \"Number of patches after splitting does not equal to number of local minimum!\"\r\n\r\n newPatchDict = {}\r\n\r\n for j in range(1, patchNum + 1):\r\n\r\n currPatchName = patchName + '.' + str(j)\r\n currArray = np.zeros(self.array.shape, dtype=np.int8)\r\n currArray[labeledNewPatchMap == j] = 1\r\n currArray = currArray * self.array\r\n\r\n if np.sum(currArray[:]) > 0:\r\n newPatchDict.update({currPatchName: Patch(currArray, self.sign)})\r\n\r\n if isplot:\r\n plt.figure()\r\n plt.subplot(121)\r\n plt.imshow(self.array, interpolation='nearest')\r\n plt.title(patchName + ': before split')\r\n plt.subplot(122)\r\n plt.imshow(labeledNewPatchMap, interpolation='nearest')\r\n plt.title(patchName + ': after split')\r\n\r\n return newPatchDict", "def do_htt_plots(tree, output_dir, cut=''):\n for logz in [True, False]:\n make_2d_plot(tree, 'httRef', HTT_REF_STR, NB_HTT, HTT_MIN, HTT_MAX, 'httL1', HTT_L1_STR, NB_HTT, HTT_MIN, HTT_MAX,\n os.path.join(output_dir, 'httRef_httL1.pdf'), logz=logz, normx=False,\n cut=cut, title=TITLE, diagonal_line=True)\n for normx in [True, False]:\n make_2d_plot(tree, 'httL1', HTT_L1_STR, NB_HTT, HTT_MIN, HTT_MAX, 'httL1/httRef', HTT_RATIO_STR, NB_HTT_RATIO, HTT_RATIO_MIN, HTT_RATIO_MAX,\n os.path.join(output_dir, 'httRatio_httL1.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, horizontal_line=True)\n make_2d_plot(tree, 'httRef', HTT_REF_STR, NB_HTT, HTT_MIN, HTT_MAX, 'httL1/httRef', HTT_RATIO_STR, NB_HTT_RATIO, HTT_RATIO_MIN, HTT_RATIO_MAX,\n os.path.join(output_dir, 'httRatio_httRef.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, horizontal_line=True)\n\n make_2d_plot(tree, 'httL1', HTT_L1_STR, NB_HTT, HTT_MIN, HTT_MAX, 'httL1-httRef', HTT_DIFF_STR, NB_HTT_DIFF, HTT_DIFF_MIN, HTT_DIFF_MAX,\n os.path.join(output_dir, 'httDiff_httL1.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, horizontal_line=True)\n make_2d_plot(tree, 'httRef', HTT_REF_STR, NB_HTT, HTT_MIN, HTT_MAX, 'httL1-httRef', HTT_DIFF_STR, NB_HTT_DIFF, HTT_DIFF_MIN, HTT_DIFF_MAX,\n os.path.join(output_dir, 'httDiff_httRef.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, horizontal_line=True)\n make_2d_plot(tree, 'httL1/httRef', HTT_RATIO_STR, NB_HTT_RATIO, HTT_RATIO_MIN, HTT_RATIO_MAX,\n 'httL1-httRef', HTT_DIFF_STR, NB_HTT_DIFF, HTT_DIFF_MIN, HTT_DIFF_MAX,\n os.path.join(output_dir, 'httDiff_httRatio.pdf'), logz=logz, normx=normx,\n cut=cut, title=TITLE, horizontal_line=True)", "def __init__(self, trg_vocab_size, label_smoothing=0.1, reduction='mean', with_logits=True, ignore_index=0):\n super(LabelSmoothingLoss, self).__init__()\n self.with_logits = with_logits\n self.ignore_index = ignore_index\n self.kl_divergence = nn.KLDivLoss(reduction=reduction)\n\n self._create_one_hot(label_smoothing, trg_vocab_size)\n self.confidence = 1.0 - label_smoothing", "def nodules_connection(label_data, label_header):\n\n\n las_labels = measure.label(label_data,\n neighbors=8,\n background=0,\n return_num=True)\n\n las_labels_nzero = np.nonzero(las_labels[0])\n [xdif, ydif, zdif] = [np.amax(las_labels_nzero[0])-np.amin(las_labels_nzero[0]),\n np.amax(las_labels_nzero[1])-np.amin(las_labels_nzero[1]),\n np.amax(las_labels_nzero[2])-np.amin(las_labels_nzero[2])]\n\n # conversion pixels to mm\n dims = label_header['pixdim']\n if label_header['xyzt_units'] == 10:\n #dimensions in mm\n print('xyzt_units=10')\n xdif=dims[1]*xdif\n ydif=dims[2]*ydif\n zdif=dims[3]*zdif\n\n\n return las_labels,[xdif,ydif,zdif]", "def at_s2ncut(self):\n\n\t # Notch out the transit and recompute\n\t fmcut = self.fm.copy()\n\t fmcut.fill_value=0\n\t # Widen by twice the transit duration\n\t tmask = self.rLbl['tRegLbl'] >= 0\n\t tmask = np.convolve(\n\t tmask.astype(float),\n\t np.ones(self.header['tdurcad'] * 2),\n\t mode='same'\n\t )\n\t tmask = tmask.astype(bool)\n\t fmcut.mask = fmcut.mask | tmask\n\t grid = tfind.Grid(self.t,fmcut)\n\n\n\t pgram_params = [\n\t dict(Pcad1=self.Pcad - 1, Pcad2=self.Pcad + 1, twdG = [self.header['tdurcad']])\n\t ]\n\t pgram = grid.periodogram(pgram_params,mode='max')\n\t idxmax = pgram.s2n.idxmax()\n\n\t dkeys = 's2ncut s2ncut_t0 s2ncut_mean'.split()\n\t pkeys = 's2n t0 mean'.split()\n\n\t for dkey,pkey in zip(dkeys,pkeys):\n\t self.add_attr(dkey,pgram.ix[idxmax,pkey])", "def at_s2ncut(self):\n\n\t # Notch out the transit and recompute\n\t fmcut = self.fm.copy()\n\t fmcut.fill_value=0\n\t # Widen by twice the transit duration\n\t tmask = self.rLbl['tRegLbl'] >= 0\n\t tmask = np.convolve(\n\t tmask.astype(float),\n\t np.ones(self.header['tdurcad'] * 2),\n\t mode='same'\n\t )\n\t tmask = tmask.astype(bool)\n\t fmcut.mask = fmcut.mask | tmask\n\t grid = tfind.Grid(self.t,fmcut)\n\n\n\t pgram_params = [\n\t dict(Pcad1=self.Pcad - 1, Pcad2=self.Pcad + 1, twdG = [self.header['tdurcad']])\n\t ]\n\t pgram = grid.periodogram(pgram_params,mode='max')\n\t idxmax = pgram.s2n.idxmax()\n\n\t dkeys = 's2ncut s2ncut_t0 s2ncut_mean'.split()\n\t pkeys = 's2n t0 mean'.split()\n\n\t for dkey,pkey in zip(dkeys,pkeys):\n\t self.add_attr(dkey,pgram.ix[idxmax,pkey])", "def housing_labels_(strat_train_set):\n logging.info(\"copy of dataset\")\n housing_labels = strat_train_set[\"median_house_value\"].copy()\n return housing_labels", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def draw_heading(self, heading): \n\n heading_label = ttk.Label(self.frame, text=heading, background=\"blue\",\n foreground=\"white\", anchor=CENTER)\n \n heading_label.configure(font=('Times', 15, \"bold\"))\n heading_label.configure(wraplength=self.width) \n heading_label.pack(side=TOP, fill=X, ipady=10)", "def h_t(self, x, t):\n ret = 0\n strong_classifier = self.classifiers[0:t+1]\n for wc in strong_classifier:\n ret += wc.classify(x)\n return ret", "def h_t(self, x, t):\n ret = 0\n strong_classifier = self.classifiers[0:t+1]\n for wc in strong_classifier:\n ret += wc.classify(x)\n return ret", "def encodeToLabels(self, gt_instances):\n raw_boxes_xyzwhd = np.zeros((self.config_data[\"max_boxes_per_frame\"], 7))\n ### initialize gronud truth labels as np.zeors ###\n gt_labels = np.zeros(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes)] + \\\n [len(self.config_data[\"all_classes\"]) + 7])\n\n ### start transferring box to ground turth label format ###\n for i in range(len(gt_instances[\"classes\"])):\n if i > self.config_data[\"max_boxes_per_frame\"]:\n continue\n class_name = gt_instances[\"classes\"][i]\n box_xyzwhd = gt_instances[\"boxes\"][i]\n class_id = self.config_data[\"all_classes\"].index(class_name)\n if i < self.config_data[\"max_boxes_per_frame\"]:\n raw_boxes_xyzwhd[i, :6] = box_xyzwhd\n raw_boxes_xyzwhd[i, 6] = class_id\n class_onehot = helper.smoothOnehot(class_id, len(self.config_data[\"all_classes\"]))\n \n exist_positive = False\n\n grid_strid = self.grid_strides\n anchor_stage = self.anchor_boxes\n box_xyzwhd_scaled = box_xyzwhd[np.newaxis, :].astype(np.float32)\n box_xyzwhd_scaled[:, :3] /= grid_strid\n anchorstage_xyzwhd = np.zeros([len(anchor_stage), 6])\n anchorstage_xyzwhd[:, :3] = np.floor(box_xyzwhd_scaled[:, :3]) + 0.5\n anchorstage_xyzwhd[:, 3:] = anchor_stage.astype(np.float32)\n\n iou_scaled = helper.iou3d(box_xyzwhd_scaled, anchorstage_xyzwhd, \\\n self.input_size)\n ### NOTE: 0.3 is from YOLOv4, maybe this should be different here ###\n ### it means, as long as iou is over 0.3 with an anchor, the anchor\n ### should be taken into consideration as a ground truth label\n iou_mask = iou_scaled > 0.3\n\n if np.any(iou_mask):\n xind, yind, zind = np.floor(np.squeeze(box_xyzwhd_scaled)[:3]).\\\n astype(np.int32)\n ### TODO: consider changing the box to raw yolohead output format ###\n gt_labels[xind, yind, zind, iou_mask, 0:6] = box_xyzwhd\n gt_labels[xind, yind, zind, iou_mask, 6:7] = 1.\n gt_labels[xind, yind, zind, iou_mask, 7:] = class_onehot\n exist_positive = True\n\n if not exist_positive:\n ### NOTE: this is the normal one ###\n ### it means take the anchor box with maximum iou to the raw\n ### box as the ground truth label\n anchor_ind = np.argmax(iou_scaled)\n xind, yind, zind = np.floor(np.squeeze(box_xyzwhd_scaled)[:3]).\\\n astype(np.int32)\n gt_labels[xind, yind, zind, anchor_ind, 0:6] = box_xyzwhd\n gt_labels[xind, yind, zind, anchor_ind, 6:7] = 1.\n gt_labels[xind, yind, zind, anchor_ind, 7:] = class_onehot\n\n has_label = False\n for label_stage in gt_labels:\n if label_stage.max() != 0:\n has_label = True\n gt_labels = [np.where(gt_i == 0, 1e-16, gt_i) for gt_i in gt_labels]\n return gt_labels, has_label, raw_boxes_xyzwhd", "def __init__(self, iht_size=4096, num_tilings=8, num_tiles=8):\n self.iht = tc.IHT(iht_size)\n self.num_tilings = num_tilings\n self.num_tiles = num_tiles", "def horde_step(self, observation):", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def removeLabel(edge):\n return edge[:-2]", "def removeLabel(edge):\n return edge[:-2]", "def cutPaper(self, cut='partial', feed=True):\n if cut not in ['partial', 'full']:\n raise ValueError('cut must be \\'partial\\' or \\'full\\'')\n elif type(feed) is not bool:\n raise ValueError('feed must be True or False')\n else:\n value = 0 if cut == 'full' else 1\n value += 65 if feed else 0\n self._write(self.__class__.__GS + 'V' + chr(value))", "def resize_label(label):\n # If current ratio greater than given ratio\n if label[HEIGHT_INDEX] / label[WIDTH_INDEX] > 1.5:\n previous = label[WIDTH_INDEX]\n label[WIDTH_INDEX] = label[HEIGHT_INDEX] / 1.5\n label[J_INDEX] -= (label[WIDTH_INDEX] - previous) / 2\n # If current ratio smaller than given ratio\n else:\n previous = label[HEIGHT_INDEX]\n label[HEIGHT_INDEX] = label[WIDTH_INDEX] * 1.5\n label[I_INDEX] -= (label[HEIGHT_INDEX] - previous) / 2\n return label", "def label(gt_dataset, volume_dim, voxel_dim, labeling_params):\n labeled_volumes = dict()\n labeled_cells = dict()\n #Use global density and reduce the size of gt_dataset here\n global_density = labeling_params[\"global_density\"]\n gt_dataset = {k: v for k,v in gt_dataset.items() if random_sample() < global_density}\n #Label in the order specified in the configuration\n layers = sorted(labeling_params.keys())\n #Remove global_density\n layers.remove(\"global_density\")\n for layer in layers:\n print \"Labeling {}\".format(layer)\n fluorophore = labeling_params[layer]['fluorophore']\n volume, cells = brainbow(gt_dataset, volume_dim, voxel_dim, **labeling_params[layer])\n if fluorophore in labeled_volumes:\n labeled_volumes[fluorophore] += volume\n labeled_cells[fluorophore] |= cells\n else:\n labeled_volumes[fluorophore] = volume\n labeled_cells[fluorophore] = cells\n return labeled_volumes, labeled_cells", "def analyze(self, event):\n jets = Collection(event, \"Jet\")\n\n BTagWeightN = 1.0\n BTagWeightN_up = 1.0\n BTagWeightN_down = 1.0\n BTagWeightN_FS = 1.0\n BTagWeightN_up_FS = 1.0\n BTagWeightN_down_FS = 1.0\n BTagWeightD = 1.0\n BTagWeightNHeavy = 1.0\n BTagWeightNHeavy_up = 1.0\n BTagWeightNHeavy_down = 1.0\n BTagWeightNHeavy_FS = 1.0\n BTagWeightNHeavy_up_FS = 1.0\n BTagWeightNHeavy_down_FS = 1.0\n BTagWeightDHeavy = 1.0\n BTagWeightNLight = 1.0\n BTagWeightNLight_FS = 1.0\n BTagWeightNLight_up = 1.0\n BTagWeightNLight_up_FS= 1.0\n BTagWeightNLight_down = 1.0\n BTagWeightNLight_down_FS = 1.0\n BTagWeightDLight = 1.0\n\n for jet in jets:\n pt = jet.pt\n eta = abs(jet.eta)\n flavor = jet.hadronFlavour\n\n if not ( pt > self.jetPtMin and eta < self.jetEtaMax): continue\n\n if flavor == 5:\n pt_bin = self.h_eff_b.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_b.GetXaxis().GetNbins():\n pt_bin = self.h_eff_b.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_b.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_b.GetYaxis().GetNbins():\n eta_bin = self.h_eff_b.GetYaxis().GetNbins();\n\n eff = self.h_eff_b.GetBinContent(pt_bin, eta_bin);\n\n elif flavor == 4:\n pt_bin = self.h_eff_c.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_c.GetXaxis().GetNbins():\n pt_bin = self.h_eff_c.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_c.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_c.GetYaxis().GetNbins():\n eta_bin = self.h_eff_c.GetYaxis().GetNbins();\n\n eff = self.h_eff_c.GetBinContent(pt_bin, eta_bin);\n\n else:\n pt_bin = self.h_eff_udsg.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_udsg.GetXaxis().GetNbins():\n pt_bin = self.h_eff_udsg.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_udsg.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_udsg.GetYaxis().GetNbins():\n eta_bin = self.h_eff_udsg.GetYaxis().GetNbins();\n\n eff = self.h_eff_udsg.GetBinContent(pt_bin, eta_bin);\n \n if self.FastSim:\n btagSF = jet.btagSF\n btagSF_FS=jet.btagSF_FS\n btagSF_up_FS = jet.btagSF_FS_up\n btagSF_down_FS = jet.btagSF_FS_down\n btagSF_down = jet.btagSF_down\n btagSF_up = jet.btagSF_up\n else:\n btagSF = jet.btagSF\n btagSF_FS= 1.0\n btagSF_up = jet.btagSF_up\n btagSF_down = jet.btagSF_down\n btagSF_up_FS = 1.0\n btagSF_down_FS = 1.0\n \n if jet.btagDeepB > self.bDiscCut:\n #check if eff is zero\n if eff < 0.001:\n eff = 0.001\n \n BTagWeightN *= btagSF * eff\n BTagWeightN_FS *= btagSF_FS * eff\n BTagWeightN_up *= btagSF_up * eff\n BTagWeightN_down *= btagSF_down * eff\n BTagWeightN_up_FS *= btagSF_up_FS * eff\n BTagWeightN_down_FS *= btagSF_down_FS * eff\n\n if abs(flavor) == 5:\n BTagWeightNHeavy *= btagSF * eff\n BTagWeightNHeavy_FS *= btagSF_FS * eff\n BTagWeightNHeavy_up *= btagSF_up * eff\n BTagWeightNHeavy_down *= btagSF_down * eff\n BTagWeightNHeavy_up_FS *= btagSF_up_FS * eff\n BTagWeightNHeavy_down_FS *= btagSF_down_FS * eff\n BTagWeightDHeavy *= eff\n else:\n BTagWeightNLight *= btagSF * eff\n BTagWeightNLight_FS *= btagSF_FS * eff\n BTagWeightNLight_up *= btagSF_up * eff\n BTagWeightNLight_down *= btagSF_down * eff\n BTagWeightNLight_up_FS *= btagSF_up_FS * eff\n BTagWeightNLight_down_FS *= btagSF_down_FS * eff\n BTagWeightDLight *= eff\n\n BTagWeightD *= eff\n else:\n #check if eff is 1.0\n if eff > 0.999:\n eff = 0.999\n\n BTagWeightN *= 1 - btagSF * eff\n BTagWeightN_FS *= 1 - btagSF_FS * eff\n BTagWeightN_up *= 1 - btagSF_up * eff\n BTagWeightN_down *= 1 - btagSF_down * eff\n BTagWeightN_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightN_down_FS *= 1 - btagSF_down_FS * eff\n\n if abs(flavor) == 5:\n BTagWeightNHeavy *= 1 - btagSF * eff\n BTagWeightNHeavy_FS *= 1 - btagSF_FS * eff\n BTagWeightNHeavy_up *= 1 - btagSF_up * eff\n BTagWeightNHeavy_down *= 1 - btagSF_down * eff\n BTagWeightNHeavy_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightNHeavy_down_FS *= 1 - btagSF_down_FS * eff\n BTagWeightDHeavy *= 1 - eff\n else:\n BTagWeightNLight *= 1 - btagSF * eff\n BTagWeightNLight_FS *= 1 - btagSF_FS * eff\n BTagWeightNLight_up *= 1 - btagSF_up * eff\n BTagWeightNLight_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightNLight_down *= 1 - btagSF_down * eff\n BTagWeightNLight_down_FS *= 1 - btagSF_down_FS * eff\n BTagWeightDLight *= 1 - eff\n\n BTagWeightD *= 1 - eff\n \n if self.FastSim:\n self.out.fillBranch(\"BTagWeight_FS\", BTagWeightN_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Up_FS\", BTagWeightN_up_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Down_FS\", BTagWeightN_down_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeightHeavy_FS\", BTagWeightNHeavy_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Up_FS\", BTagWeightNHeavy_up_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Down_FS\", BTagWeightNHeavy_down_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightLight_FS\", BTagWeightNLight_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Up_FS\", BTagWeightNLight_up_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Down_FS\", BTagWeightNLight_down_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeight\", BTagWeightN / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Up\", BTagWeightN_up / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Down\", BTagWeightN_down / BTagWeightD)\n self.out.fillBranch(\"BTagWeightHeavy\", BTagWeightNHeavy / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Up\", BTagWeightNHeavy_up / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Down\", BTagWeightNHeavy_down / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightLight\", BTagWeightNLight / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Up\", BTagWeightNLight_up / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Down\", BTagWeightNLight_down / BTagWeightDLight)\n return True", "def torsion_helix_strain(x, dof):\n base = np.zeros([6, dof])\n base[0, 0] = 1 # torsion\n base[1, 1] = 1 # y-bending\n base[2, 2] = 1 # z-bending\n return base", "def make_cutout_table(ra_in, dec_in, other=None, othername=None, table=(2,7), compare=False, scale_unit='pixscale', scale=0.25, layer='decals-dr7', layer2=None, savefile=None): \n de_img = []\n wi_img = []\n N = table[0]*table[1]\n \n for i in range(N):\n de_cutout_url = 'http://legacysurvey.org/viewer-dev/jpeg-cutout/?ra=%g&dec=%g&%s=%g&layer=%s&size=180' % (ra_in[i],dec_in[i], scale_unit, scale, layer)\n img = plt.imread(download_file(de_cutout_url,cache=True,show_progress=False,timeout=120))\n de_img.append(img)\n \n if compare:\n wi_cutout_url = 'http://legacysurvey.org/viewer-dev/jpeg-cutout/?ra=%g&dec=%g&%s=%g&layer=%s&size=180' % (ra_in[i],dec_in[i], scale_unit, scale, layer2)\n img = plt.imread(download_file(wi_cutout_url,cache=True,show_progress=False,timeout=120))\n wi_img.append(img)\n \n fig = plt.figure(figsize=(4*table[1],4*table[0]))\n\n for i in range(len(de_img)):\n ax = fig.add_subplot(table[0],table[1],i+1)\n ax.imshow(de_img[i])\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_major_formatter(NullFormatter())\n if other[i] != None:\n ax.text(0.1,0.9,'%s=%.1f'%(othername,other[i]),transform=ax.transAxes,fontsize=14,color='white')\n\n plt.subplots_adjust(wspace=0.07, hspace=0.07)\n \n if compare:\n fig = plt.figure(figsize=(4*table[1],4*table[0]))\n for i in range(len(wi_img)):\n ax = fig.add_subplot(table[0],table[1],i+1)\n ax.imshow(wi_img[i])\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_major_formatter(NullFormatter())\n ax.text(0.1,0.9,'r=%.1f'%(mag[i]),transform=ax.transAxes,fontsize=14,color='white')\n\n plt.subplots_adjust(wspace=0.07, hspace=0.07)\n \n if savefile != None:\n fig.savefig(savefile +'.png')\n fig.savefig(savefile +'.pdf')", "def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')", "def __init__(self, smoothing=0.1):\n super(LabelSmoothingCrossEntropy, self).__init__()\n assert smoothing < 1.0\n self.smoothing = smoothing\n self.confidence = 1. - smoothing", "def __init__(self):\r\n self.label = \"Longest Flow Path\"\r\n self.description = \"this is my Longest Flow Path python tool\"", "def adjust_labels(data_y, label):\n\n if label == 'locomotion': # Labels for locomotion are adjusted\n data_y[data_y == 4] = 3\n data_y[data_y == 5] = 4\n elif label == 'gestures': # Labels for gestures are adjusted\n data_y[data_y == 406516] = 1\n data_y[data_y == 406517] = 2\n data_y[data_y == 404516] = 3\n data_y[data_y == 404517] = 4\n data_y[data_y == 406520] = 5\n data_y[data_y == 404520] = 6\n data_y[data_y == 406505] = 7\n data_y[data_y == 404505] = 8\n data_y[data_y == 406519] = 9\n data_y[data_y == 404519] = 10\n data_y[data_y == 406511] = 11\n data_y[data_y == 404511] = 12\n data_y[data_y == 406508] = 13\n data_y[data_y == 404508] = 14\n data_y[data_y == 408512] = 15\n data_y[data_y == 407521] = 16\n data_y[data_y == 405506] = 17\n return data_y", "def cut_bkg(self):\n c = TCut(self.cut_both)\n c += TCut(self._return_if('_cut_bkg'))\n return c", "def __init__(self):\n Algorithm.__init__(self)\n self.name = \"Otsus Threshold\"\n self.parent = \"Segmentation\"", "def old_ideal_label(I):\n a, c, d = ideal_HNF(I)\n return \"%s.%s.%s\" % (a * d, c, d)", "def calc_Hcp_ij(self):\n\t\n\thp0_delayed = self.hp_wavelet.get_Psi(self.xi[0] + self.Orbit.L/l.Clight)\n\thp0 = self.hp_wavelet.get_Psi(self.xi[0])\n\thc0_delayed = self.hc_wavelet.get_Psi(self.xi[0] + self.Orbit.L/l.Clight)\n\thc0 = self.hc_wavelet.get_Psi(self.xi[0])\n\t\n\thp1_delayed = self.hp_wavelet.get_Psi(self.xi[1] + self.Orbit.L/l.Clight)\n\thp1 = self.hp_wavelet.get_Psi(self.xi[1])\n\thc1_delayed = self.hc_wavelet.get_Psi(self.xi[1] + self.Orbit.L/l.Clight)\n\thc1 = self.hc_wavelet.get_Psi(self.xi[1])\n\t\n\thp2_delayed = self.hp_wavelet.get_Psi(self.xi[2] + self.Orbit.L/l.Clight)\n\thp2 = self.hp_wavelet.get_Psi(self.xi[2])\n\thc2_delayed = self.hc_wavelet.get_Psi(self.xi[2] + self.Orbit.L/l.Clight)\n\thc2 = self.hc_wavelet.get_Psi(self.xi[2])\n\t\n\tself.Hpij[0,1] = hp1_delayed - hp0\n\tself.Hpij[1,0] = hp0_delayed - hp1\n\n\tself.Hpij[0,2] = hp2_delayed - hp0\n\tself.Hpij[2,0] = hp0_delayed - hp2\n\n\tself.Hpij[1,2] = hp2_delayed - hp1\n\tself.Hpij[2,1] = hp1_delayed - hp2\n\t\n\t# cross-polarization\n\tself.Hcij[0,1] = hc1_delayed - hc0\n\tself.Hcij[1,0] = hc0_delayed - hc1\n\n\tself.Hcij[0,2] = hc2_delayed - hc0\n\tself.Hcij[2,0] = hc0_delayed - hc2\n\n\tself.Hcij[1,2] = hc2_delayed - hc1\n\tself.Hcij[2,1] = hc1_delayed - hc2\n\t\n\treturn", "def __init__(self, training_data, smoothing_factor=1.0):\n\t\tsuper(HostnameFeature, self).__init__('Hostname', 4, smoothing_factor)\n\t\tself.category_bag_of_hostname = {}\n\t\tfor record in training_data:\n\t\t\tcategory = record[6]\n\t\t\tif self.category_bag_of_hostname.get(category) is None:\n\t\t\t\tself.category_bag_of_hostname[category] = {}\n\t\t\tpublishers = self.category_bag_of_hostname[category]\n\t\t\tpublisher_name = record[4].strip().lower()\n\t\t\tif publisher_name not in publishers:\n\t\t\t\tpublishers[publisher_name] = 0\n\t\t\tpublishers[publisher_name] += 1\n\t\t# for k, bw in self.category_bag_of_hostname.items():\n\t\t# \tprint 'category ', k, ' with number of different hostname', len(bw)", "def instance_label(task, pred, k=15, n_iters=1, dist_thresh=5, watershed=False):\n mask = pred\n\n # noise removal\n if k > 1 and n_iters > 0:\n kernel = np.ones((k, k), np.uint8)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,\n iterations=n_iters)\n\n if watershed:\n from clab.live import filters\n mask = filters.watershed_filter(mask, dist_thresh=dist_thresh)\n\n mask = mask.astype(np.uint8)\n n_ccs, cc_labels = cv2.connectedComponents(mask, connectivity=4)\n return cc_labels", "def fredkin(ha, hb, hc):\n\n return controlled_U(ha, swap(hb, hc))", "def labels(self, threshold, segment=True, exclude_border=0):\n data = self.unmasked_data\n isfin = numpy.isfinite(data)\n data[~isfin] = numpy.amin(data[isfin])\n regions = (data > threshold)\n if segment:\n local_max = peak_local_max(data, indices=False,\n exclude_border=0,\n footprint=numpy.ones((3, 3)),\n labels=regions)\n markers = measurements.label(local_max)[0]\n labels = watershed(-data, markers, mask=regions)\n if exclude_border > 0:\n # Remove basins originating from edge peaks\n diff = numpy.zeros_like(local_max)\n for i in range(local_max.ndim):\n local_max = local_max.swapaxes(0, i)\n diff = diff.swapaxes(0, i)\n diff[:exclude_border] = local_max[:exclude_border]\n diff[-exclude_border:] = local_max[-exclude_border:]\n diff = diff.swapaxes(0, i)\n local_max = local_max.swapaxes(0, i)\n \n for l in numpy.sort(labels[diff])[::-1]:\n labels[labels == l] = 0\n labels[labels > l] -= 1\n ulabels = numpy.unique(labels)\n n = ulabels[ulabels != 0].size\n else:\n data_thres = numpy.zeros_like(data)\n data_thres[regions] = data[regions]\n labels, n = measurements.label(data_thres)\n return labels, n", "def graphCut(img, center, radius, temp, edge, count, editPoints, padList, theta_width, phi_width):\r\n\r\n\r\n \"\"\"Important note. The labeled image is referred to as temp, or self.temp in the interface.\r\n This stands for template. The previously labled image is fed back into the graphcut\"\"\"\r\n \r\n \"\"\"create polar images and cost arrays\"\"\"\r\n \r\n print \"RUNNING GRAPHCUT!\"\r\n img= padImage(img, padList)\r\n temp= padImage(temp, padList)\r\n edge= padImage(edge, padList)\r\n center= padCenter(center, padList)\r\n \r\n polar_img= img2polar(img, center, radius, theta_width=theta_width, phi_width=phi_width)\r\n\r\n \r\n \r\n polar_grad, y, x = np.gradient(np.array(polar_img, dtype='float'))\r\n \"\"\"Lockett 100416 replacement line below to not use gradient when the image has a surface label\"\"\"\r\n \"\"\"polar_grad = -1 * np.array(polar_img, dtype='float')\"\"\"\r\n \r\n \r\n polar_cost = -1 * np.ones(polar_img.shape)\r\n for r in range(1,radius):\r\n polar_cost[r]= polar_grad[r]-polar_grad[r-1]\r\n\r\n \r\n \r\n \"\"\"\r\n flip the cost image upside down. This is so that the base set is at the bottom of the array\r\n since the graphcut cuts from top to bottom, this inversion is necessary.\r\n \"\"\"\r\n polar_cost_inv=polar_cost[::-1,:,:]\r\n\r\n print \"CONSTRUCTING GRAPH EDGES... \"\r\n \r\n \"\"\"construct the graph using PyMaxFlow\"\"\"\r\n g=maxflow.GraphFloat()\r\n nodeids=g.add_grid_nodes(polar_img.shape)\r\n structure=np.zeros((3,3,3))\r\n structure[2]= np.array([[0,10000,0],[10000, 10000, 10000],[0, 10000, 0]])\r\n g.add_grid_edges(nodeids, structure=structure, symmetric=False)\r\n\r\n \r\n \"\"\"convert the previously labeled image (temp) into a polar transform image. Take the labels and\r\n give them high cost edge weights so the segmentation avoids previously labeled objects\"\"\"\r\n polar_lbl_img= img2polar(temp, center, radius, theta_width=theta_width, phi_width=phi_width)\r\n polar_lbl_img_inv= polar_lbl_img[::-1,:]\r\n \r\n lbl_caps= polar_lbl_img_inv>0\r\n self_caps= (polar_lbl_img_inv==count)\r\n lbl_caps-=self_caps\r\n lbl_source_caps= np.zeros(lbl_caps.shape)\r\n lbl_sink_caps= lbl_caps*10000\r\n g.add_grid_tedges(nodeids, lbl_source_caps, lbl_sink_caps)\r\n \r\n structure2= 10000*np.array([[0,0,0],[0,0,1],[0,1,0]])\r\n g.add_grid_edges(nodeids[radius-1], structure=structure2, symmetric=True)\r\n\r\n \"\"\"add terminal edges using two arrays whose elemnts are the costs of the edges from the source and to the\r\n sink\"\"\"\r\n print \"CONSTRUCTING GRAPH TEDGES...\"\r\n sinkcaps= polar_cost_inv * (polar_cost_inv>=0)\r\n sourcecaps = -1 * polar_cost_inv * (polar_cost_inv<0)\r\n g.add_grid_tedges(nodeids, sourcecaps, sinkcaps)\r\n\r\n \r\n\r\n \r\n \"\"\"accounts for edit points. Takes every point in the edit point list, converts it to its spherical coordinate, and adds high cost\r\n edges in the column of that edit point inverts the x and y coordinates of the center\"\"\"\r\n center= np.array((center[0], center[2], center[1]))\r\n if len(editPoints)!=0:\r\n for coords in editPoints:\r\n\r\n \r\n rad= math.sqrt((center[0]-coords[0])**2+ (center[1]-coords[2])**2 + (center[2]-coords[1])**2) \r\n theta= math.atan2(center[2]-coords[1], coords[2]-center[1])\r\n print str((coords[0]-center[0])/(rad+1))\r\n phi=math.acos(float(coords[0]-center[0])/(rad+1))\r\n if theta<0:\r\n theta=2*math.pi+ theta\r\n theta= theta_width- theta_width*theta/(2*math.pi)-1\r\n phi= phi_width*phi/(math.pi)-1\r\n rad= radius- rad\r\n print \"POLAR COORDS: \" + str((rad, theta, phi))\r\n\r\n for r in range(0, radius):\r\n if r<=rad:\r\n g.add_tedge(nodeids[r, theta, phi], 0, 10000)\r\n \r\n else:\r\n g.add_tedge(nodeids[r, theta, phi], 10000, 0) \r\n\r\n\r\n\r\n\r\n print \"CUTTING GRAPH...\"\r\n g.maxflow()\r\n\r\n \"\"\"s-t mincut of graph. This is converted to cartesian coordinates with the function img2cart. The\r\n images are also closed to eliminate spotty areas\"\"\"\r\n \r\n print \"STARTING CARTESIAN TRANSFORM...\"\r\n polar_img_seg= np.invert(g.get_grid_segments(nodeids)[::-1,:,:])\r\n\r\n \r\n edge_img= np.zeros(img.shape)\r\n seg_img= ndimage.binary_closing(img2cart(img, polar_img_seg, center, radius, theta_width, phi_width))\r\n \r\n \r\n \"\"\"create an edge image of the segmented object\"\"\"\r\n strel=np.ones((3,3,3))\r\n erode_img=ndimage.binary_erosion(seg_img, strel)\r\n edge_img=np.logical_xor(seg_img, erode_img)\r\n \r\n\r\n \"\"\"shears the segmentation image and edge if padding was applied\"\"\"\r\n \r\n\r\n \"\"\"add the object back on to the template image (and the edge image back on the template edge)\r\n If there was an editpoint involved, remove the previous segmentation of that object and add back\r\n on the edited object\"\"\"\r\n if len(editPoints)!=0:\r\n del_img= (temp==count)*count\r\n temp-=del_img\r\n\r\n del_edge_img= (edge==count)*count\r\n edge-= del_edge_img\r\n\r\n\r\n temp+=seg_img*count\r\n edge+=edge_img*count\r\n\r\n temp= shearImage(temp, padList)\r\n edge= shearImage(edge, padList)\r\n \r\n \r\n\r\n print \"FINISHED!\"\r\n \r\n return temp, edge", "def __init__(self, *args):\n _BRepAlgo.BRepAlgo_Cut_swiginit(self,_BRepAlgo.new_BRepAlgo_Cut(*args))", "def labeling(self, tab, i, j, element):\n label = element\n label.grid(row=i, column=j) # this specifies where in the grid\n tab.grid_columnconfigure(j, weight=1) \n # this last line makes the width of the column responsive to change in width of the window", "def label(self, cfg):\n rep = \"\"\n nl = \"\"\n for node in cfg.nodes:\n rep += nl + \"{}\\tgen={}\\tkill={}\\tout={}\".format(\n node, \n set(self.gen.get(node)),\n set(self.kill.get(node)),\n set(self.out.get(node)))\n nl = \"\\n\"\n return rep", "def _make_dijet_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('dijet')\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>djmass)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1eta)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'j1et': ('100', 'inf'),\n 'j2et': ('100', 'inf'),\n 'j1eta': ('0', '320'),\n 'j2eta': ('0', '320'),\n 'djmass': ('1000', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n combgen(\n [(2)(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n ]\n \n dijet(\n [(%(djmasslo).0fdjmass)])\n simple([(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j2etlo).0fet, %(j2etalo).0feta%(j2etahi).0f)])\n )\"\"\" % argvals", "def hilfe(self):\n sZweieck_hilfe(3)", "def gen_hts_lab_full(self):\n self.gen_hts_lab_mono()\n\n hts_lab_time_prn = tuple(' '.join(l[0:2]) for l in self._hts_lab_mono_ttpl)\n\n self.hts_lab_full_prn = tuple(' '.join((l_time, l_gen)) for (l_time, l_gen)\n in zip(hts_lab_time_prn, self.hts_lab_gen_prn))", "def x_group_label(\n x_gr: int, cut: int = 20, name_dict: Dict[AnyStr, AnyStr] = names_dict\n) -> AnyStr:\n name = name_dict[str(x_gr)]\n if len(name) > cut:\n return f\"{name[:cut-3]}...\"\n else:\n return name", "def _labels_of_sentence(self, sentence, split):\n labels = torch.ones(1)\n labels[0] = self.category_int_of_label_string(sentence[0][self.name_to_index_dict['label']]) #\n return labels", "def vertical_core(block,cut,laser):\r\n\r\n\tlayers = int(block[\"thickness\"]/laser[\"z_spacing\"])\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\ttaper = math.tan(angle) * laser[\"z_spacing\"]\r\n\r\n\tu = math.tan(2 * angle) * (block[\"thickness\"] + laser[\"z_final_overshoot\"])\r\n\tz_0 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_y\"])/2 - block[\"origin_y\"] + u)\r\n\tz_1 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_x\"])/2 + block[\"origin_x\"] + u)\r\n\tz_2 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_y\"])/2 + block[\"origin_y\"] + u)\r\n\tz_3 = block[\"thickness\"]*math.cos(angle) + math.sin(angle)*((cut[\"final_dimension_x\"])/2 - block[\"origin_x\"] + u)\r\n\t\r\n\tcutlist = []\r\n\tcutlist.append([\"a_abs\", f\"{math.degrees(angle):.6f}\"])\r\n\tcutlist.append([\"c_abs\", str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\", f\"{z_0:.6f}\"])\r\n\r\n\ty_start_wide = ((u + cut[\"final_dimension_x\"]/2)* math.cos(angle) \r\n\t\t\t\t - block[\"thickness\"]*math.sin(angle) \r\n\t\t\t\t - u/math.cos(angle))\r\n\ty_start_length = ((u + cut[\"final_dimension_y\"]/2)* math.cos(angle) \r\n\t\t\t\t - block[\"thickness\"]*math.sin(angle) \r\n\t\t\t\t - u/math.cos(angle))\r\n\r\n\tdepth_cut = (block[\"thickness\"] + laser[\"z_final_overshoot\"]) * math.cos(angle)/math.cos(2*angle)\r\n\r\n\tcut1 = json.loads(line(block[\"width\"]/2 - block[\"origin_x\"],y_start_length - block[\"origin_y\"],-block[\"width\"]/2 - block[\"origin_x\"],y_start_length - block[\"origin_y\"],depth_cut,laser))\r\n\r\n\tcut2 = json.loads(line(block[\"length\"]/2 + block[\"origin_y\"],y_start_wide - block[\"origin_x\"],-block[\"length\"]/2 + block[\"origin_y\"],y_start_wide - block[\"origin_x\"],depth_cut,laser))\r\n\r\n\tcut3 = json.loads(line(block[\"width\"]/2 + block[\"origin_x\"],y_start_length + block[\"origin_y\"],-block[\"width\"]/2 + block[\"origin_x\"],y_start_length + block[\"origin_y\"],depth_cut,laser))\r\n\r\n\tcut4 = json.loads(line(block[\"length\"]/2 - block[\"origin_y\"],y_start_wide + block[\"origin_x\"],-block[\"length\"]/2 - block[\"origin_y\"],y_start_wide + block[\"origin_x\"],depth_cut,laser))\r\n\r\n\t#cut1 = json.loads(line(block[\"width\"]/2,y_start_length,-block[\"width\"]/2,y_start_length,depth_cut,laser))\r\n\r\n\t#cut2 = json.loads(line(block[\"length\"]/2,y_start_wide,-cut[\"final_dimension_y\"]/2,y_start_wide,depth_cut,laser))\r\n\r\n\t#cut3 = json.loads(line(block[\"width\"]/2,y_start_length,-cut[\"final_dimension_x\"]/2,y_start_length,depth_cut,laser))\r\n\r\n\t#cut4 = json.loads(line(cut[\"final_dimension_y\"]/2,y_start_wide,-cut[\"final_dimension_y\"]/2,y_start_wide,depth_cut,laser))\r\n\r\n\tcutlist = (cutlist + cut1\r\n\t + [[\"c_rel\", \"90\"],[\"z_abs\", f\"{z_1:.6f}\"],] \r\n\t + cut2\r\n\t + [[\"c_rel\", \"90\"],[\"z_abs\", f\"{z_2:.6f}\"]] \r\n\t\t\t\t\t + cut3 \r\n\t\t\t\t\t + [[\"z_abs\", f\"{z_3:.6f}\"],[\"c_rel\", \"90\"]] \r\n\t\t\t\t\t + cut4)\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\r\n\treturn json.dumps(cutlist)", "def SCEC_LOH_1():\n\n #Initialize CrustModel\n model = CrustModel(2)\n\n #Slow layer\n vp=4.000\n vs=2.000\n rho=2.600\n Qa=10000.\n Qb=10000.\n thickness = 1.0\n\n model.add_layer(thickness, vp, vs, rho, Qa, Qb)\n\n #Halfspace\n vp=6.000\n vs=3.464\n rho=2.700\n Qa=10000.\n Qb=10000.\n thickness = 0 #Infinite thickness!\n model.add_layer(thickness, vp, vs, rho, Qa, Qb)\n\n return model", "def sils_cut(T,f,c,d,h, conshdlr):\n Ts = range(1,T+1)\n\n model = sils(T,f,c,d,h)\n y,x,I = model.data\n\n # relax integer variables\n for t in Ts:\n model.chgVarType(y[t], \"C\")\n model.addVar(vtype=\"B\", name=\"fake\") # for making the problem MIP\n\n # compute D[i,j] = sum_{t=i}^j d[t]\n D = {}\n for t in Ts:\n s = 0\n for j in range(t,T+1):\n s += d[j]\n D[t,j] = s\n\n #include the lot sizing constraint handler\n model.includeConshdlr(conshdlr, \"SILS\", \"Constraint handler for single item lot sizing\",\n sepapriority = 0, enfopriority = -1, chckpriority = -1, sepafreq = -1, propfreq = -1,\n eagerfreq = -1, maxprerounds = 0, delaysepa = False, delayprop = False, needscons = False,\n presoltiming = SCIP_PRESOLTIMING.FAST, proptiming = SCIP_PROPTIMING.BEFORELP)\n conshdlr.data = D,Ts\n\n model.data = y,x,I\n return model", "def _subconstituent_name(h):\n if h == 1:\n o = \"1st\"\n elif h == 2:\n o = \"2nd\"\n elif h == 3:\n o = \"3rd\"\n else:\n o = \"%dth\" % h\n return \"%s subconstituent\" % o", "def display_and_label_hulls(self, hulls, src):\n \n labels = []\n\n for hull in hulls:\n\n angle = 0\n MA = 1\n ma = 1\n try:\n _,(MA,ma),angle = cv.fitEllipse(hull)\n except:\n pass\n cosAngle = np.abs(np.cos(angle*np.pi/180))\n\n # Only human-classify hulls if it is reasonably a vertically oriented rectangle\n # This is a hueristic to not have to waste time clasifying hulls clearly not poles\n if (cosAngle < 1.75) and (cosAngle > 0.85) and (MA/ma < 0.28):\n cpy = src.copy()\n hull_img = cv.polylines(cpy, [hull], True, (0,0,255), 3)\n cv.imshow(\"Hull\", hull_img)\n keycode = cv.waitKey(0)\n if keycode == 49:\n labels.append((hull, 0))\n print(\"Not a Pole\")\n elif keycode == 50:\n labels.append((hull, 1))\n print(\"A Pole!\")\n else:\n raise Exception(\"Unexpected Key Pressed\")\n else:\n labels.append((hull, 0))\n cv.destroyAllWindows()\n return labels", "def __init__(self, word_embed_size, vocab):\n super(ModelEmbeddings, self).__init__()\n\n ### YOUR CODE HERE for part 1h\n dropout_rate = 0.3\n n_chars = len(vocab.char2id)\n self.char_embed_size = 50\n self.word_embed_size = word_embed_size\n self.vocab = vocab\n self.char_embed = nn.Embedding(n_chars, self.char_embed_size)\n self.conv = CNN(self.char_embed_size, word_embed_size)\n self.highway = Highway(word_embed_size)\n self.dropout = nn.Dropout(dropout_rate)\n ### END YOUR CODE", "def plot_mass_flow(self,\n watershed, \n output, \n title = 'Subbasin Reach Mass Flow Diagram',\n fontsize = 6, \n theight = 0.2, \n l = 8.5, \n w = 11, \n verbose = True, \n overwrite = True,\n ):\n\n if os.path.exists(output) and not overwrite:\n if verbose: print('file %s exists' % output)\n return\n elif verbose: print('generating a mass linkage plot\\n')\n\n fontheight = fontsize / 72.\n rheight = 3 * fontheight\n rwidth = 12 * fontheight\n xgap = fontheight\n ygap = rheight\n awidth = rheight / 4\n aheight = rheight / 3\n\n # set up a sheet to write the image\n\n fig = pyplot.figure(figsize = (w, l))\n\n ax = fig.add_subplot(111, aspect = 'equal')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n t = ax.set_title(title)\n\n # divide the subbasins into rows and put them on the chart\n # start at the bottom to organize the linkages better\n\n rows = [watershed.outlets, ['outlet']]\n\n top = False\n while not top:\n row = []\n for next in rows[0]:\n for subbasin in watershed.updown:\n if watershed.updown[subbasin] == next: row.append(subbasin)\n if len(row) > 0: \n rows.insert(0, row)\n else: \n top = True\n\n # add an inlet box in the row above each inlet\n\n for inlet in watershed.inlets: \n\n i = 0\n while i < len(rows) - 1:\n\n for subbasin in rows[i]:\n\n if subbasin == inlet:\n \n # find the position of the subbasin in the chart\n\n j = rows[i].index(inlet)\n\n if i > 0:\n\n # figure out where the subbasins point\n \n updowns = [watershed.updown[s] for s in rows[i-1]]\n \n # if first or last, add it there in the row above\n\n if j == 0: \n rows[i-1].insert(0, 'inlet')\n elif j == len(rows[i]) - 1: \n rows[i-1].append('inlet')\n else:\n\n # find the place to add in the preceeding row \n\n n = updowns.index(rows[i][j-1]) + 1\n rows[i-1].insert(n, 'inlet')\n\n i += 1\n\n # write the subbasin boxes to the chart\n\n middle = math.ceil(w // (rwidth + xgap)) // 2\n last = 0\n\n # keep track of the bounding box of the plot\n\n xmin, ymin, xmax, ymax = middle, 0, middle, 0\n\n for i in range(len(rows)):\n\n row = rows[i]\n \n y = (ygap + rheight) * i + theight\n\n # figure out which cell to put in the main column\n\n if i == 0:\n main = row[(len(row) - 1) // 2]\n elif i < len(rows) - 1:\n main = watershed.updown[rows[i-1][last]]\n else: main = 'outlet'\n\n start = middle - row.index(main)\n\n if i < len(rows) - 1: next_row = rows[i + 1]\n\n for subbasin in row:\n x = (rwidth + xgap) * (start + row.index(subbasin))\n r = patches.Rectangle((x, y), rwidth, rheight, fill = False)\n\n # adjust the bounding box\n\n if x < xmin: xmin = x\n if x + rwidth > xmax: xmax = x + rwidth\n if y < ymin: ymin = y\n if y + rheight > ymax: ymax = y + rheight\n\n if subbasin != 'outlet': ax.add_patch(r)\n\n b = ax.text(x + rwidth / 2, y + rheight / 2, subbasin,\n horizontalalignment = 'center',\n verticalalignment = 'center')\n\n # draw the arrow\n\n if i < len(rows) - 1:\n\n x1 = x + rwidth / 2\n\n if i < len(rows) - 2 and subbasin != 'inlet':\n next = watershed.updown[subbasin]\n next_start = (middle - \n next_row.index(watershed.updown[main]))\n x2 = ((rwidth + xgap) * \n (next_start + next_row.index(next))\n + rwidth / 2)\n\n elif subbasin == 'inlet':\n next = watershed.inlets[0]\n next_start = (middle - \n next_row.index(watershed.updown[main]))\n\n x2 = ((rwidth + xgap) * \n (next_start + next_row.index(next))\n + rwidth / 2)\n\n else:\n next_start = middle\n x2 = ((rwidth + xgap) * (middle) + rwidth / 2)\n\n a = pyplot.arrow(x1, y + rheight, x2 - x1, ygap, \n head_width = awidth, head_length = aheight,\n fc = 'k', ec = 'k', \n length_includes_head = True)\n ax.add_patch(a)\n\n last = row.index(main)\n i += 1\n \n pad = 0.02\n\n xmin = xmin - (xmax - xmin) * pad\n xmax = xmax + (xmax - xmin) * pad\n ymin = ymin - (ymax - ymin) * pad\n ymax = ymax + (ymax - ymin) * pad\n\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymax, ymin)\n pyplot.axis('off')\n pyplot.savefig(output, dpi = 200)\n\n pyplot.clf()\n pyplot.close()", "def _get_labels(self, ind):\n pass", "def plot_labels_bundle_pth(device, model, dataset, dataloader):\n ## Get labels and preds\n labels, preds = get_labels_and_preds(device, model, dataloader)\n \n ## Plot the figure\n plot_labels(labels, preds, dataset.id_to_class_dict)", "def write_label(self, contig_name, width, height, font, title_width, upper_left, vertical_label,\n strand, canvas, horizontal_centering=False, center_vertical=False, chop_text=True,\n label_color=(50, 50, 50, 255)):\n upper_left = list(upper_left) # to make it mutable\n shortened = contig_name[-title_width:] # max length 18. Last characters are most unique\n txt = Image.new('RGBA', (width, height))#, color=(0,0,0,50))\n txt_canvas = ImageDraw.Draw(txt)\n text_width = txt_canvas.textsize(shortened, font)[0]\n if not chop_text and text_width > width:\n txt = Image.new('RGBA', (text_width, height)) # TODO performance around txt_canvas\n txt_canvas = ImageDraw.Draw(txt)\n if center_vertical or vertical_label: # Large labels are centered in the column to look nice,\n # rotation indicates strand in big text\n vertically_centered = (height // 2) - multi_line_height(font, shortened, txt)//2\n else: # Place label at the beginning of gene based on strand\n vertically_centered = height - multi_line_height(font, shortened, txt) # bottom\n if strand == \"+\":\n vertically_centered = 0 # top of the box\n txt_canvas.multiline_text((0, max(0, vertically_centered)), shortened, font=font,\n fill=label_color)\n if vertical_label:\n rotation_direction = 90 if strand == '-' else -90\n txt = txt.rotate(rotation_direction, expand=True)\n upper_left[1] += -4 if strand == '-' else 4\n if horizontal_centering:\n margin = width - text_width\n upper_left[0] += margin // 2\n canvas.paste(txt, (upper_left[0], upper_left[1]), txt)", "def hbnb():\n return 'HBNB'", "def TH(self, full=False):\n\t\treturn (arange(self.thbins + 3*self.thbins*(full==True)) + 0.5) * (pi / 2) / self.thbins", "def displayNeedle(self,i):\n #obsolete\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n if modelNode.GetAttribute(\"nth\")==str(i) and modelNode.GetAttribute(\"segmented\")=='1' :\n displayNode = modelNode.GetModelDisplayNode()\n nVisibility = displayNode.GetVisibility()\n \n if nVisibility:\n displayNode.SliceIntersectionVisibilityOff()\n displayNode.SetVisibility(0)\n else:\n displayNode.SliceIntersectionVisibilityOn()\n displayNode.SetVisibility(1)", "def label_joints():\n side_dict = {'C': 0,\n 'L': 1,\n 'R': 2}\n for jnt in mc.ls(type='joint'):\n mc.setAttr('{}.side'.format(jnt), side_dict[jnt.split('_')[0]])\n mc.setAttr('{}.type'.format(jnt), 18)\n mc.setAttr('{}.otherType'.format(jnt), jnt.split('_')[1], type=\"string\")", "def __init__(self, h, d_model, dropout=0.1):\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_model), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)", "def selection_correction_method1(tree, scale, h_in, h_out):\n #h_in = ROOT.TH1D(\"h_in\", \"neutron spectrum with all cuts: inside onset window; Energy [keV]; counts\", 50, 0, 25)\n #h_out = ROOT.TH1D(\"h_out\", \"neutron spectrum with all cuts: outside onset window; Energy [keV]; counts\", 50, 0, 25)\n for event in tree:\n cut = [0, 0]\n S15_ch = i_channel(0, event)\n bpm_ch = i_channel(4, event)\n RT = event.DD_Rise[S15_ch]\n S15_w2 = event.DD_AmplADU[S15_ch]\n onset = event.DD_Rise10pct[S15_ch]\n if cut[0]==0:\n # first cut: for inside onset window\n # if event passes the first cuts\n if S15_w2>1000 and RT>1.1 and RT<1.51 and onset>39 and onset<47:\n # loop over the pmt channel numbers to calculate the time of flight: time bd - time bpm\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n # calculation of the time of flight\n tof = (cfd_pmt-cfd_bpm)%400\n #cut on tof: time of flight of the neutron\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n # fill histogram inside onset window\n h_in.Fill(energy2)\n cut[0]=1\n break\n if cut[1]==0:\n if S15_w2>1000 and RT<1.51 and RT>1.1 and ((onset<36 and onset>15) or (onset>50 and onset<=110)):\n for n_channel in range(5, 16):\n pmt_i = i_channel(n_channel, event)\n cfd_pmt = event.cfdPulse_CFDNS[pmt_i]\n cfd_bpm = event.cfdPulse_CFDNS[bpm_ch]\n tof = (cfd_pmt-cfd_bpm)%400\n if tof<335 and tof>295:\n energy2 = S15_w2*scale\n h_out.Fill(energy2)\n cut[1]=1\n break\n return h_in, h_out", "def split(self, frame, y):\n return super(H2OStratifiedKFold, self).split(frame, y)", "def hbnb():\n return \"HBNB\"", "def hbnb():\n return \"HBNB\"", "def label(self):\r\n raise NotImplementedError", "def width_h_invis(self):\n if m_higgs > 2.0 * self.mx:\n coupling = self.gsxx * self.stheta / np.sqrt(1 - self.stheta**2)\n\n val = (\n (coupling**2 * (m_higgs**2 - 4 * self.mx**2) ** 1.5)\n / (8.0 * m_higgs**2 * np.pi)\n ).real\n\n assert val >= 0\n\n return val\n else:\n return 0.0", "def J_J(h):\n\n h = MTS(h)\n hdot = h.dot\n J_𝒥 = 0.5j * 𝔇inverseLaplacianinverse(\n 0.125 * (3 * h * hdot.bar.ethbar - 3 * hdot * h.bar.ethbar + hdot.bar * h.ethbar - h.bar * hdot.ethbar).eth.im\n ).ethbar.ethbar\n\n return J_𝒥", "def _extract_vanHove(g,j,count_cut,wind):\n count = g[fd('step',j)]['y']['disp_count'][:]\n # get better statistics\n count += g[fd('step',j)]['x']['disp_count'][:]\n # count += count[::-1]\n\n edges = g[fd('step',j)]['y']['disp_edges'][:]\n \n edges = np.array([np.mean(edges[j:j+wind]) for j in range(0,len(count)-wind,wind)])\n count = np.array([np.sum(count[j:j+wind]) for j in range(0,len(count)-wind,wind)])\n\n x_lim = (np.min(edges),np.max(edges))\n\n edges = edges[count>count_cut]\n count = count[count>count_cut]\n\n return edges,count,x_lim" ]
[ "0.5253033", "0.5244755", "0.5173087", "0.51096773", "0.5103869", "0.50852627", "0.5053167", "0.4967331", "0.49329308", "0.4927268", "0.4886447", "0.4883699", "0.48704627", "0.48665786", "0.48657504", "0.48568624", "0.484965", "0.48423848", "0.48153552", "0.4808797", "0.47966635", "0.47905484", "0.47897264", "0.4789602", "0.47883236", "0.4778537", "0.47747788", "0.47747788", "0.47688013", "0.47625986", "0.47623906", "0.47590986", "0.47565174", "0.47555858", "0.47543168", "0.47531363", "0.47462907", "0.47368893", "0.47368893", "0.4735266", "0.47266668", "0.47257203", "0.47236973", "0.47236973", "0.4721428", "0.47145838", "0.47108462", "0.46985096", "0.46970034", "0.46970034", "0.46903425", "0.4678745", "0.46785632", "0.46737352", "0.46721482", "0.46716332", "0.4668553", "0.46664682", "0.46631366", "0.4650076", "0.4649602", "0.46484134", "0.46478298", "0.46409085", "0.46390003", "0.4636259", "0.46301612", "0.46296418", "0.46274868", "0.46253723", "0.46242732", "0.4620448", "0.4620207", "0.46172926", "0.4612796", "0.46107337", "0.46043545", "0.45970452", "0.45920217", "0.45911533", "0.45852852", "0.45762974", "0.45722124", "0.45691916", "0.4559683", "0.4556273", "0.455437", "0.45497307", "0.45415783", "0.45405617", "0.45398763", "0.45394397", "0.45376632", "0.45373675", "0.45241743", "0.45241743", "0.45232397", "0.4519799", "0.45189357", "0.45187494" ]
0.5767312
0
make test label for combinations helper with two simple children.
def _make_combinationsTest_label(chain_parts): assert len(chain_parts) == 1 scenario = chain_parts[0]['hypoScenario'] assert scenario == 'combinationsTest' return """ combgen( [(2)(20et, 0eta320)] simple([(40et, 0eta320) (50et, 0eta320)]) simple([(35et, 0eta240) (55et, 0eta240)]) )"""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sums():\n assert label_parent(1, 2) == 3\n assert label_parent (1, 4) == 8\n # Should ignore arg order\n assert label_parent(4, 1) == 8", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def test_recipe_nutrition_label_widget(self):\n pass", "def test_select_label(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n section1 = create_section(title=\"Test Section 1\", story=story, layout=layout)\n section2 = create_section(title=\"Test Section 2\", story=story, layout=layout)\n form = SectionRelationAdminForm()\n choices_list = list(form.fields['parent'].widget.choices)\n self.assertIn(story.title, choices_list[1][1])\n self.assertIn(story.title, choices_list[2][1])", "def setUp(self):\n\n singleLabels = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_2 = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2}),\n ({'D'}, {0}, set()),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_1 = linkoCreate.Linkograph(\n [({'A'}, set(), {1}),\n ({'D'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_0 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_2 = linkoCreate.Linkograph(\n [({'D'}, set(), set()),\n ({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_1 = linkoCreate.Linkograph(\n [({'D'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n trivialLinkograph = linkoCreate.Linkograph(\n [], ['A', 'B', 'C', 'D'])\n\n\n singleSubLinko1_4 = linkoCreate.Linkograph(\n [({'D'}, set(), {2,3}),\n ({'A'}, set(), {3}),\n ({'C'}, {0}, {3}),\n ({'A'}, {0,1,2}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko2_4 = linkoCreate.Linkograph(\n [({'A'}, set(), {2}),\n ({'C'}, set(), {2}),\n ({'A'}, {0,1}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko3_4 = linkoCreate.Linkograph(\n [({'C'}, set(), {1}),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko4_4 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n simpleLinko = linkoCreate.Linkograph(\n [({'A', 'B', 'C'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'B', 'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n if self.id().split('.')[-1] == 'test_createSubLinkographWithoutCommands':\n self.testParams = [\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': None,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 5,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko0_1},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 0,\n 'ExpectedLinkograph': singleSubLinko0_0},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko1_2},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko1_1},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 0,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko1_4},\n\n {'linko': singleLabels,\n 'lowerBound': 2,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko2_4},\n\n {'linko': singleLabels,\n 'lowerBound': 3,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko3_4},\n\n {'linko': singleLabels,\n 'lowerBound': 4,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko4_4},\n\n ]", "def test_labels(self):\n self.compliance_tester.test_labels(self.oi)", "def test_parent_label(self):\n l = self.d.label(1)\n l2 = self.d.label(31405)\n\n self.assertTrue(l.parent_label is None)\n self.assertTrue(l2 in l.sublabels)\n self.assertEqual(l2.parent_label, l)", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)", "def test_label(self):\n xs = t.Label(t.Exactly(\"x\"), 'CustomLabel')\n self.assertEqual(writePython(xs),\n dd(\"\"\"\n def _G_label_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n _G_label_3, lastError = self.label(_G_label_1, \"CustomLabel\")\n self.considerError(lastError, None)\n _G_label_3\n \"\"\"))", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def test_general_subset_level():\n pass", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def _generateLabelAndName(self, obj, **args):\n result = []\n label = self._generateLabel(obj, **args)\n name = self._generateName(obj, **args)\n result.extend(label)\n if not len(label):\n result.extend(name)\n elif len(name) and name[0].strip() != label[0].strip():\n result.extend(name)\n return result", "def tests_ti_document_add_label(self):\n super().group_add_label()", "def getLabel2(*args):", "def getLabel2(*args):", "def test_checkboxtextgroup(self):\r\n self.check_group('checkboxtextgroup', 'choice', 'checkbox')", "def _generateTableCell2ChildLabel(self, obj, **args):\n result = []\n\n # If this table cell has 2 children and one of them has a\n # 'toggle' action and the other does not, then present this\n # as a checkbox where:\n # 1) we get the checked state from the cell with the 'toggle' action\n # 2) we get the label from the other cell.\n # See Orca bug #376015 for more details.\n #\n if obj.childCount == 2:\n cellOrder = []\n hasToggle = [False, False]\n for i, child in enumerate(obj):\n if self._script.utilities.hasMeaningfulToggleAction(child):\n hasToggle[i] = True\n break\n if hasToggle[0] and not hasToggle[1]:\n cellOrder = [ 1, 0 ]\n elif not hasToggle[0] and hasToggle[1]:\n cellOrder = [ 0, 1 ]\n if cellOrder:\n for i in cellOrder:\n if not hasToggle[i]:\n result.extend(self.generate(obj[i], **args))\n return result", "def test_process_label_in_node(self):\n tree = Node(children=[\n Node(\"Defining secret phrase.\", label=['AB', 'a']),\n Node(\"Has secret phrase. Then some other content\", \n label=['AB', 'b'])\n ], label=['AB'])\n t = Terms(tree)\n t.scoped_terms = {\n ('AB',): [Ref(\"secret phrase\", \"AB-a\", (9,22))]\n }\n # Term is defined in the first child\n self.assertEqual([], t.process(tree.children[0]))\n self.assertEqual(1, len(t.process(tree.children[1])))", "def get_extra_label(self, label_name: str, hierarchy: List[str]) -> Any:", "def test_get_scenarios_expanded(self):\n pass", "def test_nested():\n res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.root.metadata.choose(lambda m: (m[\"name\"], m.uid))))\n assert \"type\" in res # from conditions\n assert \"reason\" in res # from conditions\n assert \"name\" in res # from metadata\n assert \"uid\" in res # from metadata", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def test_nested_sequence(self):\n\n self.taxon_tester('Apis mellifera')\n self.taxon_tester('Apis')\n self.taxon_tester('Apini')\n self.taxon_tester('Apinae')\n # Apidae at 5680 species is a struggle\n self.taxon_tester('Apidae')\n if False:\n # Apoidea: 19566 takes 223 seconds\n self.taxon_tester('Apoidea')\n # Aculeata fails after 339 seconds\n self.taxon_tester('Aculeata')\n self.taxon_tester('Apocrita')\n self.taxon_tester('Hymenoptera')\n self.taxon_tester('Endopterygota')\n self.taxon_tester('Neoptera')\n self.taxon_tester('Pterygota')\n self.taxon_tester('Dicondylia')\n self.taxon_tester('Insecta')\n self.taxon_tester('Hexapoda')\n self.taxon_tester('Pancrustacea')\n self.taxon_tester('Mandibulata')\n self.taxon_tester('Arthropoda')\n self.taxon_tester('Panarthropoda')\n self.taxon_tester('Ecdysozoa')\n self.taxon_tester('Protostomia')\n self.taxon_tester('Bilateria')\n self.taxon_tester('Eumetazoa')\n self.taxon_tester('Metazoa')\n self.taxon_tester('Holozoa')\n self.taxon_tester('Opisthokonta')\n self.taxon_tester('Eukaryota')", "def test_bootstrap_support_labeled(self):\r\n master_tree = parse_newick('((a:2,b:3)ab:2,(c:1,d:2)cd:7)rt;')\r\n \"\"\"\r\n /-------.5 /-a\r\n ---1| \\-b\r\n \\------.5 /-c\r\n \\-d\r\n \"\"\"\r\n t1 = parse_newick('((a:6,b:8.2)hi:2,(c:1,d:2):7);') # same structure\r\n t2 = parse_newick('((a:2,b:3,c:33)ho:2,d:7);') # abc are siblings\r\n new_master, bootstraps = tc.bootstrap_support(master_tree, [t1, t2])\r\n expected = dict([('ab', .5), ('cd', .5), ('rt', 1.0)])\r\n self.assertDictEqual(bootstraps, expected)", "def test_title(names):", "def test_verbose_name_group(self): \n field_verboses = {\n \"title\": \"Название группы\",\n \"slug\": \"Слаг\",\n \"description\": \"Описание группы\",\n }\n for value, expected in field_verboses.items():\n with self.subTest(value=value):\n self.assertEqual(self.group._meta.get_field(value).verbose_name, expected)", "def plugin_second_label():\n return \"second\"", "def test_product_labels(self):\n\n prd = Product.objects.get(id=1)\n # label name\n label_name = prd._meta.get_field('name').verbose_name\n self.assertEqual(label_name, 'name')\n # label description\n label_name = prd._meta.get_field('description').verbose_name\n self.assertEqual(label_name, 'description')\n # label nutrition_grade\n label_name = prd._meta.get_field('nutrition_grade').name\n self.assertEqual(label_name, 'nutrition_grade')\n # label barcode\n label_name = prd._meta.get_field('barcode').verbose_name\n self.assertEqual(label_name, 'barcode')\n # label url\n label_name = prd._meta.get_field('url').verbose_name\n self.assertEqual(label_name, 'url')\n # label url_pic\n label_name = prd._meta.get_field('url_pic').name\n self.assertEqual(label_name, 'url_pic')\n # label store\n label_name = prd._meta.get_field('store').verbose_name\n self.assertEqual(label_name, 'store')\n # label prd_cat\n label_name = prd._meta.get_field('prd_cat').name\n self.assertEqual(label_name, 'prd_cat')\n # label fat\n label_name = prd._meta.get_field('fat').verbose_name\n self.assertEqual(label_name, 'fat')\n # label saturated_fat\n label_name = prd._meta.get_field('saturated_fat').name\n self.assertEqual(label_name, 'saturated_fat')\n # label sugar\n label_name = prd._meta.get_field('sugar').verbose_name\n self.assertEqual(label_name, 'sugar')\n # label salt\n label_name = prd._meta.get_field('salt').verbose_name\n self.assertEqual(label_name, 'salt')", "def is_test(self):\r\n return self.has_label('tests')", "def setUp(self):\n\n\n # InverseLabeling\n invLabeling0 = {'L0': [0, 1, 2]}\n\n invLabeling1 = {'L0' : [0, 2],\n 'L1' : [1]}\n\n invLabeling2 = {\n 'L0' : [0],\n 'L1' : [1],\n 'L2' : [2]\n }\n\n invLabeling3 = {\n 'L1' : [0, 1],\n 'L2' : [2]\n }\n\n invLabeling4 = {\n 'L0' : [0,1],\n 'L1' : [0],\n 'L2' : [2]\n }\n\n invLabeling5 = {\n 'L0': [0, 1, 2],\n 'L1': []\n }\n \n # Create some ontologies\n ontology0 = {'L0': ['L0']}\n\n ontology1 = {}\n\n ontology2 = {'L0': ['L1']}\n\n ontology3 = {'L0': ['L1', 'L2'],\n 'L1': ['L2'],\n 'L2': ['L0']}\n\n if self.id().split('.')[-1] == 'test_createLinkograph':\n self.testParams = [\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology0,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2}),\n ({'L0'}, {0}, {2}),\n ({'L0'}, {0,1}, set())] \n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology1,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology2,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology0,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {2}),\n ({'L1'}, set(), set()),\n ({'L0'}, {0}, set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology1,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L1'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology2,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1}),\n ({'L1'}, {0}, set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1}),\n ({'L1'}, {0}, set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling2,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1,2}),\n ({'L1'}, {0}, {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling3,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L1'}, set(), {2}),\n ({'L1'}, set(), {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling4,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0', 'L1'}, set(), {2}),\n ({'L0'}, set(), {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling5,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n ]", "def test_mutual_exclusivity_of_labels(self, tmpdir, docker_tasker, labels, expected_fail):\n runner = mock_env(tmpdir, docker_tasker, labels=labels)\n if expected_fail:\n with pytest.raises(PluginFailedException) as e:\n runner.run()\n assert 'only one of labels' in str(e.value)\n else:\n runner.run()", "def helper_label(helper):\n return helper.info", "def test_Tree():", "def test_col_data_label_with_attrs(self):\n help_tag = 'span'\n help_text_br = False\n names = ('first', 'billing_address_1')\n attrs = {'style': 'width: 10rem; display: inline-block'}\n label_attrs = {name: attrs for name in names}\n txt = '{}=\"{}\"'.format(*list(attrs.items())[0])\n expected = ['<label for=\"id_first\" {}>First:</label>'.format(txt)]\n expected.append('<label for=\"id_billing_address_1\" {}>street address (line 1):</label>'.format(txt))\n actual = []\n for name in names:\n field = self.form.fields[name]\n response = self.form.collect_col_data(name, field, help_tag, help_text_br, label_attrs)\n actual.append(response.get('label'))\n\n for expect, got in zip(expected, actual):\n self.assertEqual(expect, got)", "def test_help_text_group(self): \n field_help_text = {\n \"title\": \"Дайте назание группе\",\n \"slug\": ('Укажите адрес для группы. Используйте '\n 'только латиницу, цифры, дефисы и знаки '\n 'подчёркивания'),\n } \n for value, expected in field_help_text.items():\n with self.subTest(value=value):\n self.assertEqual(self.group._meta.get_field(value).help_text, expected)", "def test_label_choices(self):\n test_classes = (\n (0, 'No'),\n (1, 'Yes')\n )\n\n form = SingleLabelClassifierForm(classes=test_classes)\n self.assertEqual(tuple(form.fields['label'].choices), test_classes)\n\n form = MultiLabelClassifierForm(classes=test_classes)\n self.assertEqual(tuple(form.fields['label'].choices), test_classes)", "def test_filter_tree(self):\r\n actual = [e.Name for e in filter_tree(\r\n self.tree1, ['bbb', 'ccc']).tips()]\r\n #(a_a:10,(b_b:2,c_c:4):5);\r\n expected = [\r\n e.Name for e in DndParser(\r\n \"((bbb:2,ccc:4));\",\r\n PhyloNode).tips(\r\n )]\r\n self.assertEqual(actual, expected)\r\n\r\n actual = [e.Name for e in filter_tree(\r\n self.tree2, ['bbb', 'ccc']).tips()]\r\n #(a_a:10,(b_b:2,c_c:4):5);\r\n expected = [\r\n e.Name for e in DndParser(\r\n \"(('bbb':2,ccc:4));\",\r\n PhyloNode).tips(\r\n )]\r\n self.assertEqual(actual, expected)", "def test_radiotextgroup(self):\r\n self.check_group('radiotextgroup', 'choice', 'radio')", "def gen_test_case_combination(self):\n\n cases = '\\n'\n\n binary_ops = list(self.BINARY_OPS)\n binary_ops.reverse()\n for op1 in self.BINARY_OPS:\n for op2 in binary_ops:\n\n result = []\n ret = IntOp.binary_op(op2, '0', '1', self.lane_width)\n ret = IntOp.binary_op(op1, ret, '2', self.lane_width)\n result.append(ret)\n\n cases += '\\n' + str(AssertReturn('{lane_type}.{op1}-{lane_type}.{op2}'.format(lane_type=self.LANE_TYPE, op1=op1, op2=op2),\n [SIMD.v128_const('0', self.LANE_TYPE),\n SIMD.v128_const('1', self.LANE_TYPE),\n SIMD.v128_const('2', self.LANE_TYPE)],\n SIMD.v128_const(result, self.LANE_TYPE)))\n cases += '\\n'\n return cases", "def test_multiple_task_groups_dag(\n self, test_multiple_taskgroups_dag, multiple_taskgroups_dag_expected_edges\n ):\n (\n dag,\n group1,\n group2,\n group3,\n (\n group1_emp1,\n group1_emp2,\n group1_emp3,\n group2_emp1,\n group2_emp2,\n group2_emp3,\n group2_op1,\n group2_op2,\n group3_emp1,\n group3_emp2,\n group3_emp3,\n emp_in1,\n emp_in2,\n emp_in3,\n emp_in4,\n emp_out1,\n emp_out2,\n emp_out3,\n emp_out4,\n op_in1,\n op_out1,\n ),\n ) = test_multiple_taskgroups_dag\n\n group1_emp1 >> Label(\"label group1.group1_emp1 <=> group1.group1_emp2\") >> group1_emp3\n\n emp_in1 >> group1\n emp_in2 >> Label(\"label emp_in2 <=> group1\") >> group1\n [emp_in3, emp_in4] >> Label(\"label emp_in3/emp_in4 <=> group1\") >> group1\n XComArg(op_in1, \"test_key\") >> Label(\"label op_in1 <=> group1\") >> group1\n\n (\n [group2_emp1, group2_emp2]\n >> Label(\"label group2.group2_emp1/group2.group2_emp2 <=> group2.group2_emp3\")\n >> group2_emp3\n )\n (\n group2_emp1\n >> Label(\"label group2.group2_emp1 <=> group2.group2_emp2/group2.group2_emp3\")\n >> [group2_emp2, group2_emp3]\n )\n group2_emp3 >> Label(\"label group2.group2_emp3 <=> group3\") >> group3\n\n (\n XComArg(group2_op1, \"test_key\")\n >> Label(\"label group2.group2_op1 <=> group2.group2_op2\")\n >> XComArg(group2_op2, \"test_key\")\n )\n XComArg(group2_op2, \"test_key\") >> Label(\"label group2.group2_op2 <=> group3\") >> group3\n\n group3 >> emp_out1\n group3 >> Label(\"label group3 <=> emp_out2\") >> emp_out2\n group3 >> Label(\"label group3 <=> emp_out3/emp_out4\") >> [emp_out3, emp_out4]\n group3 >> Label(\"label group3 <=> op_out1\") >> XComArg(op_out1, \"test_key\")\n\n group1 >> Label(\"label group1 <=> group2\") >> group2\n\n compare_dag_edges(dag_edges(dag), multiple_taskgroups_dag_expected_edges)", "def test_issue_create_label(self):\n pass", "def test__create_label_w_no_ent_id(ruler: SpaczzRuler) -> None:\n assert ruler._create_label(\"TEST\", None) == \"TEST\"", "def test_grouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n assert 1 == len(uc1.subject.subject)\n self.group(s, uc2)\n assert 1 == len(uc2.subject.subject)\n\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(2, len(s.subject.useCase))", "def setLabel2(*args):", "def tests_ti_document_get_label(self):\n super().group_get_label()", "def test_render_label(self):\n label = self.block.meta.label\n self.assertEqual(label, 'Google Calendar', 'The labels are not the same')", "def check_elm(orphan_root, obj, orphan_labels, orphan_widgets):\n\n oid = obj.attrib.get('id')\n klass = obj.attrib.get('class')\n\n # \"Don't care\" special case\n if klass in widgets_ignored:\n return\n for suffix in widgets_suffixignored:\n if klass[-len(suffix):] == suffix:\n return\n\n # Widgets usual do not strictly require a label, i.e. a labelled parent\n # is enough for context, but some do always need one.\n requires_label = klass in widgets_needlabel\n\n if oid is not None:\n # Check that ids are unique\n if oid in ids_dup:\n if ids[oid] == obj:\n # We are the first, warn\n duplicates = tree.findall(\".//object[@id='\" + oid + \"']\")\n err(filename, tree, obj, \"duplicate-id\", \"has the same id as other elements \" + elms_names_lines(duplicates))\n\n # Check label-for and their dual labelled-by\n label_for = check_rels(filename, tree, root, obj, \"label-for\", \"labelled-by\")\n\n # Check labelled-by and its dual label-for\n labelled_by = check_rels(filename, tree, root, obj, \"labelled-by\", \"label-for\")\n\n # Check label-for and their dual labelled-by\n description_for = check_rels(filename, tree, root, obj, \"description-for\", \"described-by\")\n\n # Check labelled-by and its dual label-for\n described_by = check_rels(filename, tree, root, obj, \"described-by\", \"description-for\")\n\n # Should have only one label\n if len(labelled_by) >= 1:\n if oid in mnemonic_for_elm:\n warn(filename, tree, obj, \"labelled-by-and-mnemonic\",\n \"has both a mnemonic \" + elm_name_line(mnemonic_for_elm[oid][0]) + \"and labelled-by relation\")\n if len(labelled_by) > 1:\n warn(filename, tree, obj, \"multiple-labelled-by\", \"has multiple labelled-by relations\")\n if oid in label_for_elm:\n if len(label_for_elm[oid]) > 1:\n warn(filename, tree, obj, \"duplicate-label-for\", \"is referenced by multiple label-for \" + elms_names_lines(label_for_elm[oid]))\n if oid in mnemonic_for_elm:\n if len(mnemonic_for_elm[oid]) > 1:\n warn(filename, tree, obj, \"duplicate-mnemonic\", \"is referenced by multiple mnemonic_widget \" + elms_names_lines(mnemonic_for_elm[oid]))\n\n # Check member-of\n member_of = check_rels(filename, tree, root, obj, \"member-of\")\n\n # Labels special case\n if klass in widgets_labels:\n properties = check_props(filename, tree, root, obj, \"mnemonic_widget\") + \\\n check_props(filename, tree, root, obj, \"mnemonic-widget\")\n if len(properties) > 1:\n err(filename, tree, obj, \"multiple-mnemonic\", \"has multiple mnemonic_widgets properties\"\n \"%s\" % elms_lines(properties))\n\n # Emit orphaning warnings\n if warn_orphan_labels or orphan_widgets:\n is_orphan_label(filename, tree, root, obj, orphan_root, True)\n\n # We are done with the label\n return\n\n # Not a label, will perhaps need one\n\n # Emit orphaning warnings\n is_orphan_widget(filename, tree, root, obj, orphan_labels, orphan_root, True)", "def get_label(self, hierarchy: List[str]) -> Any:", "def test_first_level_from_bids_no_duplicate_sub_labels(bids_dataset):\n models, *_ = first_level_from_bids(\n dataset_path=bids_dataset,\n task_label='main',\n sub_labels=[\"01\", \"01\"],\n space_label='MNI',\n img_filters=[('desc', 'preproc')],\n slice_time_ref=None,\n )\n\n assert len(models) == 1", "def test_add_empty_nodes_with_label_when_splitting(self):\n print \"----- test_add_empty_nodes_with_label_when_splitting -----\"\n sel_axis = (lambda axis: axis)\n \n #create tree, first node splits in x direction\n tree = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n kdtree.visualize(tree)\n \n point_left = [0.4, 0.5]\n tree.split2(point_left, axis = 0)\n kdtree.visualize(tree)\n \n point1 = [0.3, 0.5]\n found_node = tree.get_path_to_leaf(point1)[-1]\n correct_node1 = 3\n self.assertEqual(found_node.label, correct_node1, \"Not correct node found\")\n \n point_right = [0.6, 0.5]\n tree.split2(point_right, axis = 1)\n kdtree.visualize(tree)\n \n point2 = [0.6, 0.7]\n found_node = tree.get_path_to_leaf(point2)[-1]\n correct_node2 = 6\n self.assertEqual(found_node.label, correct_node2, \"Not correct node found\")\n \n print \"----- end: test_add_empty_nodes_with_label_when_splitting -----\"", "def test_render_value_label(self):\n self.check_html(\n self.widget(choices=self.beatles),\n \"beatles\",\n [\"John\"],\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"J\">John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\">George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )", "def test_issue_add_label(self):\n pass", "def test_create_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.CreateMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n name = 'my label'\r\n myid = 'myid'\r\n description = 'my description'\r\n args = [name, '--description', description, '--shared']\r\n position_names = ['name', 'description', 'shared']\r\n position_values = [name, description, True]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)", "def getLabel(*args):", "def getLabel(*args):", "def getLabel(*args):", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def testFormatLabelAndValue(self):\n\n self.assertEqual('Abc: xyz', self.inv._FormatLabelAndValue('abc', 'xyz', 1))\n self.assertEqual('ABc: xyz', self.inv._FormatLabelAndValue('abc', 'xyz', 2))\n self.assertEqual('ABC: xyz', self.inv._FormatLabelAndValue('abc', 'xyz', 4))", "def test_recipe_nutrition_label_image(self):\n pass", "def test_label_10_targets_with_a_b_c_(self):\r\n user_input = '[{\"a\":\"target1\"}, \\\r\n {\"b\":\"target2\"},{\"c\":\"target3\"},{\"a\":\"target4\"},{\"b\":\"target5\"}, \\\r\n {\"c\":\"target6\"}, {\"a\":\"target7\"},{\"b\":\"target8\"},{\"c\":\"target9\"}, \\\r\n {\"a\":\"target10\"}]'\r\n correct_answer = [\r\n {\r\n 'draggables': ['a'],\r\n 'targets': ['target1', 'target4', 'target7', 'target10'],\r\n 'rule': 'unordered_equal'\r\n },\r\n {\r\n 'draggables': ['b'],\r\n 'targets': ['target2', 'target5', 'target8'],\r\n 'rule': 'unordered_equal'\r\n },\r\n {\r\n 'draggables': ['c'],\r\n 'targets': ['target3', 'target6', 'target9'],\r\n 'rule': 'unordered_equal'\r\n }\r\n ]\r\n self.assertTrue(draganddrop.grade(user_input, correct_answer))", "def test_label_10_targets_with_a_b_c_multiple(self):\r\n user_input = '[{\"a\":\"target1\"}, \\\r\n {\"b\":\"target2\"},{\"c\":\"target3\"},{\"b\":\"target5\"}, \\\r\n {\"c\":\"target6\"}, {\"a\":\"target7\"},{\"b\":\"target8\"},{\"c\":\"target9\"}, \\\r\n {\"a\":\"target1\"}]'\r\n correct_answer = [\r\n {\r\n 'draggables': ['a', 'a', 'a'],\r\n 'targets': ['target1', 'target4', 'target7', 'target10'],\r\n 'rule': 'anyof+number'\r\n },\r\n {\r\n 'draggables': ['b', 'b', 'b'],\r\n 'targets': ['target2', 'target5', 'target8'],\r\n 'rule': 'anyof+number'\r\n },\r\n {\r\n 'draggables': ['c', 'c', 'c'],\r\n 'targets': ['target3', 'target6', 'target9'],\r\n 'rule': 'anyof+number'\r\n }\r\n ]\r\n self.assertTrue(draganddrop.grade(user_input, correct_answer))", "def test_subsystems(self):\n pass", "def test_multiply_some_type_links():", "def test_build_match_tree_with_pairs():\n abbreviation_list = [[\"ELIF\", \"ELI.\"], [\"ELSE\", \"E.\"]]\n expected_tree = {\"E\": {\"L\": {\"I\": {\"F\": \"ELI.\"}, \"S\": {\"E\": \"E.\"}}}}\n tree = build_match_tree(abbreviation_list)\n assert repr(tree) == repr(expected_tree)", "def check_a11y_relation(filename, tree):\n global widgets_ignored, ids, label_for_elm, labelled_by_elm, description_for_elm, described_by_elm, mnemonic_for_elm\n\n def check_elm(orphan_root, obj, orphan_labels, orphan_widgets):\n \"\"\"\n Check one element, knowing that orphan_labels/widgets tell whether\n there are orphan labels and widgets within orphan_root\n \"\"\"\n\n oid = obj.attrib.get('id')\n klass = obj.attrib.get('class')\n\n # \"Don't care\" special case\n if klass in widgets_ignored:\n return\n for suffix in widgets_suffixignored:\n if klass[-len(suffix):] == suffix:\n return\n\n # Widgets usual do not strictly require a label, i.e. a labelled parent\n # is enough for context, but some do always need one.\n requires_label = klass in widgets_needlabel\n\n if oid is not None:\n # Check that ids are unique\n if oid in ids_dup:\n if ids[oid] == obj:\n # We are the first, warn\n duplicates = tree.findall(\".//object[@id='\" + oid + \"']\")\n err(filename, tree, obj, \"duplicate-id\", \"has the same id as other elements \" + elms_names_lines(duplicates))\n\n # Check label-for and their dual labelled-by\n label_for = check_rels(filename, tree, root, obj, \"label-for\", \"labelled-by\")\n\n # Check labelled-by and its dual label-for\n labelled_by = check_rels(filename, tree, root, obj, \"labelled-by\", \"label-for\")\n\n # Check label-for and their dual labelled-by\n description_for = check_rels(filename, tree, root, obj, \"description-for\", \"described-by\")\n\n # Check labelled-by and its dual label-for\n described_by = check_rels(filename, tree, root, obj, \"described-by\", \"description-for\")\n\n # Should have only one label\n if len(labelled_by) >= 1:\n if oid in mnemonic_for_elm:\n warn(filename, tree, obj, \"labelled-by-and-mnemonic\",\n \"has both a mnemonic \" + elm_name_line(mnemonic_for_elm[oid][0]) + \"and labelled-by relation\")\n if len(labelled_by) > 1:\n warn(filename, tree, obj, \"multiple-labelled-by\", \"has multiple labelled-by relations\")\n if oid in label_for_elm:\n if len(label_for_elm[oid]) > 1:\n warn(filename, tree, obj, \"duplicate-label-for\", \"is referenced by multiple label-for \" + elms_names_lines(label_for_elm[oid]))\n if oid in mnemonic_for_elm:\n if len(mnemonic_for_elm[oid]) > 1:\n warn(filename, tree, obj, \"duplicate-mnemonic\", \"is referenced by multiple mnemonic_widget \" + elms_names_lines(mnemonic_for_elm[oid]))\n\n # Check member-of\n member_of = check_rels(filename, tree, root, obj, \"member-of\")\n\n # Labels special case\n if klass in widgets_labels:\n properties = check_props(filename, tree, root, obj, \"mnemonic_widget\") + \\\n check_props(filename, tree, root, obj, \"mnemonic-widget\")\n if len(properties) > 1:\n err(filename, tree, obj, \"multiple-mnemonic\", \"has multiple mnemonic_widgets properties\"\n \"%s\" % elms_lines(properties))\n\n # Emit orphaning warnings\n if warn_orphan_labels or orphan_widgets:\n is_orphan_label(filename, tree, root, obj, orphan_root, True)\n\n # We are done with the label\n return\n\n # Not a label, will perhaps need one\n\n # Emit orphaning warnings\n is_orphan_widget(filename, tree, root, obj, orphan_labels, orphan_root, True)\n\n root = tree.getroot()\n\n # Flush ids and relations from previous files\n ids = {}\n ids_dup = {}\n labelled_by_elm = {}\n label_for_elm = {}\n described_by_elm = {}\n description_for_elm = {}\n mnemonic_for_elm = {}\n\n # First pass to get links into hash tables, no warning, just record duplicates\n for obj in root.iter('object'):\n oid = obj.attrib.get('id')\n if oid is not None:\n if oid not in ids:\n ids[oid] = obj\n else:\n ids_dup[oid] = True\n\n labelled_by = obj.findall(\"accessibility/relation[@type='labelled-by']\")\n labelled_by += obj.findall(\"accessibility/relation[@name='labelled-by']\")\n for rel in labelled_by:\n target = rel.attrib.get('target')\n if target is not None:\n if target not in labelled_by_elm:\n labelled_by_elm[target] = [ obj ]\n else:\n labelled_by_elm[target].append(obj)\n\n label_for = obj.findall(\"accessibility/relation[@type='label-for']\")\n label_for += obj.findall(\"accessibility/relation[@name='label-for']\")\n for rel in label_for:\n target = rel.attrib.get('target')\n if target is not None:\n if target not in label_for_elm:\n label_for_elm[target] = [ obj ]\n else:\n label_for_elm[target].append(obj)\n\n described_by = obj.findall(\"accessibility/relation[@type='described-by']\")\n described_by += obj.findall(\"accessibility/relation[@name='described-by']\")\n for rel in described_by:\n target = rel.attrib.get('target')\n if target is not None:\n if target not in described_by_elm:\n described_by_elm[target] = [ obj ]\n else:\n described_by_elm[target].append(obj)\n\n description_for = obj.findall(\"accessibility/relation[@type='description-for']\")\n description_for += obj.findall(\"accessibility/relation[@name='description-for']\")\n for rel in description_for:\n target = rel.attrib.get('target')\n if target is not None:\n if target not in description_for_elm:\n description_for_elm[target] = [ obj ]\n else:\n description_for_elm[target].append(obj)\n\n mnemonic_for = obj.findall(\"property[@name='mnemonic_widget']\") + \\\n obj.findall(\"property[@name='mnemonic-widget']\")\n for rel in mnemonic_for:\n target = rel.text\n if target is not None:\n if target not in mnemonic_for_elm:\n mnemonic_for_elm[target] = [ obj ]\n else:\n mnemonic_for_elm[target].append(obj)\n\n # Second pass, recursive depth-first, to be able to efficiently know whether\n # there are orphan labels within a part of the tree.\n def recurse(orphan_root, obj, orphan_labels, orphan_widgets):\n if obj == root or is_labelled_parent(obj):\n orphan_root = obj\n orphan_labels, orphan_widgets = orphan_items(filename, tree, root, obj)\n\n if obj.tag == 'object':\n check_elm(orphan_root, obj, orphan_labels, orphan_widgets)\n\n for o in obj:\n recurse(orphan_root, o, orphan_labels, orphan_widgets)\n\n recurse(root, root, False, False)", "def test_stratis_list_default(self):\n for subcommand in [[\"pool\"], [\"filesystem\"], [\"blockdev\"]]:\n for prefix in [[], [\"--propagate\"]]:\n self.assertEqual(RUNNER(prefix + subcommand), 0)", "def pytest_can_run_together(item1, item2):", "def test_series_in_features(self):\n assert parse_command({'test{{A,B}}': {'depends_on': 'name{{A,B}}'}}) == [\n ('testA', {'depends_on': 'nameA'}), ('testB', {'depends_on': 'nameB'})]", "def test_first_level_from_bids_with_subject_labels(bids_dataset):\n warning_message = ('Subject label foo is not present in'\n ' the dataset and cannot be processed')\n with pytest.warns(UserWarning, match=warning_message):\n models, *_ = first_level_from_bids(\n dataset_path=bids_dataset,\n task_label='main',\n sub_labels=[\"foo\", \"01\"],\n space_label='MNI',\n img_filters=[('desc', 'preproc')],\n slice_time_ref=None,\n )\n\n assert models[0].subject_label == '01'", "def test_pathop12(self):\n xpb = XPathBuilder()\n # braces not needed\n xp = xpb.foo & (xpb.bar.foo).parenthesize() | xpb.foobar\n exp = '/foo and (/bar/foo) or /foobar'\n self.assertEqual(xp.tostring(), exp)", "def test_dummy6(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n xp = xpb.bar | xp\n exp = '/bar'\n self.assertEqual(xp.tostring(), exp)", "def test_create_labels(self):\n test_labels = {\"app\": \"my_app\", \"host\": \"examplehost\"}\n labels = pmp.utils.create_labels(test_labels)\n self.assertIsInstance(labels, list)\n self.assertTrue(all([isinstance(x, pmp.LabelPair) for x in labels]))", "def test_xdist_and_select_test_by_bdd_label(xdist_runner: AllurePytestRunner):\n\n output = xdist_runner.run_docstring(\"-v\", \"--allure-features=boo\", \"-n1\")\n\n assert_that(output, has_only_testcases(\n has_entry(\n \"fullName\",\n ends_with(\"test_with_feature_boo\")\n )\n ))", "def test_nested_condition() -> None:\n\n @argcomb(Or(And(\"a\", \"b\"), And(\"c\", \"d\")))\n def f(a: Any = None, b: Any = None, c: Any = None, d: Any = None) -> None:\n ...\n\n # valid\n f(a=1, b=1)\n f(c=1, d=1)\n f(a=1, b=1, c=1, d=1)\n\n # invalid\n with pytest.raises(InvalidArgumentCombination):\n f(a=1)\n with pytest.raises(InvalidArgumentCombination):\n f(a=1, c=1)\n with pytest.raises(InvalidArgumentCombination):\n f()", "def test_abbreviation(self):\n self.assertEqual(self.compound.abbreviation, \"Cool\")", "def test_general_subset_all():\n pass", "def test_check_tree_exact_match(self):\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_3tips_fp)\r\n\r\n # Should find all and give True, True result\r\n\r\n self.assertEqual(actual_subset_results, [True, True])\r\n\r\n # Should get tips not found in fasta labels with 5 tip tree\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find all and give True result\r\n\r\n self.assertEqual(actual_subset_results, [True, ['seq5', 'seq4']])\r\n\r\n # Change two of the fasta labels to not match tree tips\r\n\r\n fasta_labels = ['seq1_1', 'seqX_2', 'seq2_3', 'seqY_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find seqX and seqY as not being a subset\r\n\r\n self.assertEqual(actual_subset_results, [['seqX', 'seqY'],\r\n ['seq3', 'seq5', 'seq4']])", "def test_03_visit_special(self):", "def test_labels(ruler: SpaczzRuler) -> None:\n assert all(\n [label in ruler.labels for label in [\"GPE\", \"STREET\", \"DRUG\", \"NAME\", \"BAND\"]]\n )\n assert len(ruler.labels) == 5", "def test_createSubLinkographWithoutCommands(self):\n self.performTestForParams()", "def test_first_level_from_bids_several_labels_per_entity(tmp_path, entity):\n n_sub = 1\n n_ses = 1\n tasks = [\"main\"]\n n_runs = [1]\n\n bids_path = create_fake_bids_dataset(\n base_dir=tmp_path,\n n_sub=n_sub,\n n_ses=n_ses,\n tasks=tasks,\n n_runs=n_runs,\n entities={entity: [\"A\", \"B\"]},\n )\n\n models, m_imgs, m_events, m_confounds = first_level_from_bids(\n dataset_path=bids_path,\n task_label=\"main\",\n space_label=\"MNI\",\n img_filters=[(\"desc\", \"preproc\"), (entity, \"A\")],\n slice_time_ref=None,\n )\n\n _check_output_first_level_from_bids(n_sub,\n models,\n m_imgs,\n m_events,\n m_confounds)\n n_imgs_expected = n_ses * n_runs[0]\n assert len(m_imgs[0]) == n_imgs_expected", "def test_setter_child_list_tuple(self):\n root = netapp_api.NaElement('root')\n root['l'] = ['l1', 'l2']\n root['t'] = ('t1', 't2')\n l = root.get_child_by_name('l')\n self.assertIsInstance(l, netapp_api.NaElement)\n t = root.get_child_by_name('t')\n self.assertIsInstance(t, netapp_api.NaElement)\n for le in l.get_children():\n self.assertIn(le.get_name(), ['l1', 'l2'])\n for te in t.get_children():\n self.assertIn(te.get_name(), ['t1', 't2'])", "def test_tree_support(self):\r\n master_tree = parse_newick('((a:2,b:3)ab:2,(c:1,d:2)cd:7)rt;')\r\n \"\"\"\r\n /-------.5 /-a\r\n ---1| \\-b\r\n \\------.5 /-c\r\n \\-d\r\n \"\"\"\r\n t2 = parse_newick('((a:2,b:3,c:33)ho:2,d:7);') # abc are siblings\r\n\r\n tc.tree_support(master_tree, t2)\r\n assert_almost_equal(\r\n master_tree.getNodeMatchingName('rt').bootstrap_support, 1.0)", "def test_labels(self):\n return self._test_labels", "def test_issue_edit_label(self):\n pass", "def test_dbpa002_radio_items(dash_duo):\n app = Dash()\n\n options = {\n \"OptionA\": \"Option 1\",\n \"OptionB\": \"Option 2\",\n \"OptionC\": \"Option 3\",\n }\n\n value = \"OptionB\"\n\n with_keywords = RadioItems(\n options=options,\n value=value,\n id=\"with-keywords\",\n )\n without_keywords = RadioItems(options, value, id=\"without-keywords\")\n\n app.layout = html.Div([with_keywords, without_keywords])\n\n dash_duo.start_server(app)\n\n # Check values\n assert [\n a.get_attribute(\"value\")\n for a in dash_duo.wait_for_element(\n \"#with-keywords\"\n ).find_elements_by_tag_name(\"input\")\n ] == [\n a.get_attribute(\"value\")\n for a in dash_duo.wait_for_element(\n \"#without-keywords\"\n ).find_elements_by_tag_name(\"input\")\n ]\n\n # Check labels\n assert [\n a.text\n for a in dash_duo.wait_for_element(\n \"#with-keywords\"\n ).find_elements_by_tag_name(\"label\")\n ] == [\n a.text\n for a in dash_duo.wait_for_element(\n \"#without-keywords\"\n ).find_elements_by_tag_name(\"label\")\n ]", "def test(self):\n for doc, label in zip(self.test_docs(), self.test_labels()):\n yield doc, label", "def test_get_parent_type_name(self):\n pass", "def test_pathop8(self):\n xpb = XPathBuilder()\n xp = (xpb.foo.bar | xpb.foobar).parenthesize() & xpb.action.source\n exp = '(/foo/bar or /foobar) and /action/source'\n self.assertEqual(xp.tostring(), exp)", "def test_setter_child_list_tuple(self):\n root = netapp_api.NaElement('root')\n root['l'] = ['l1', 'l2']\n root['t'] = ('t1', 't2')\n l_element = root.get_child_by_name('l')\n self.assertIsInstance(l_element, netapp_api.NaElement)\n t = root.get_child_by_name('t')\n self.assertIsInstance(t, netapp_api.NaElement)\n for le in l_element.get_children():\n self.assertIn(le.get_name(), ['l1', 'l2'])\n for te in t.get_children():\n self.assertIn(te.get_name(), ['t1', 't2'])", "def __init__(self, root_node, label1, label2):\n super().__init__(self.PROBLEM_NAME)\n self.root_node = root_node\n self.label1 = label1\n self.label2 = label2", "def test_radioselect_field():", "def test_label_callback():\n release_numbers = dict(a='123')\n data = dict(revision='a', attributes=dict(b='c'))\n data2 = dict(revision='b', attributes=dict(d='e'))\n\n assert _label_callback(data, release_numbers) == u'a\\n- Release: 123\\n- b: c'\n assert _label_callback(data2) == u'b\\n- Release: Unknown\\n- d: e'", "def _make_simple_partition_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n label = 'simplepartition(['\n for cp in cps:\n smcstr = str(cp['smc'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr:\n condition_str += ',%s)'\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def test_case1(self):\n\n graph = BipartiteGraph()\n\n graph.addEdge(\"supervisor1\",\"student1\")\n\n val1 = graph.getStudents(\"supervisor1\")\n val2 = graph.getSupervisors(\"student1\")\n\n expected1 = [\"student1\"]\n expected2 = [\"supervisor1\"]\n\n self.assertEqual((val1,val2),(expected1,expected2))", "def test_with_multiple_descriptions():\n soup = generate_case(\"with_descriptions\")\n\n tests.html_schema_doc_asserts.assert_descriptions(\n soup,\n [\n \"Exact address\",\n \"Exact address\",\n \"Delivery info depending on the delivery type\",\n \"The delivery is a gift, no prices displayed\",\n ],\n )", "def test_BuildModel2(self):\n print(\"\\nTest 6: Building a Model with Concat\")\n builder = StaticBuilder(\"Concat\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3, num_islots=2)\n out1 = builder.addOutput()\n\n builder.addDirectedLink(in1, enc1, islot=0)\n builder.addDirectedLink(in2, enc1, islot=1)\n builder.addDirectedLink(enc1, out1)\n \n builder.build()", "def test_multiple_label_traversals(self):\r\n TestEdge.create(self.v1, self.v2)\r\n OtherTestEdge.create(self.v1, self.v3)\r\n YetAnotherTestEdge.create(self.v1, self.v4)\r\n\r\n assert len(self.v1.outV()) == 3\r\n\r\n assert len(self.v1.outV(TestEdge)) == 1\r\n assert len(self.v1.outV(OtherTestEdge)) == 1\r\n assert len(self.v1.outV(YetAnotherTestEdge)) == 1\r\n\r\n out = self.v1.outV(TestEdge, OtherTestEdge)\r\n assert len(out) == 2\r\n assert self.v2.vid in [v.vid for v in out]\r\n assert self.v3.vid in [v.vid for v in out]\r\n\r\n out = self.v1.outV(OtherTestEdge, YetAnotherTestEdge)\r\n assert len(out) == 2\r\n assert self.v3.vid in [v.vid for v in out]\r\n assert self.v4.vid in [v.vid for v in out]", "def test_label_10_targets_with_a_b_c_false(self):\r\n user_input = '[{\"a\":\"target1\"}, \\\r\n {\"b\":\"target2\"},{\"c\":\"target3\"},{\"a\":\"target4\"},{\"b\":\"target5\"}, \\\r\n {\"c\":\"target6\"}, {\"a\":\"target7\"},{\"b\":\"target8\"},{\"c\":\"target9\"}, \\\r\n {\"a\":\"target1\"}]'\r\n correct_answer = [\r\n {\r\n 'draggables': ['a'],\r\n 'targets': ['target1', 'target4', 'target7', 'target10'],\r\n 'rule': 'unordered_equal'\r\n },\r\n {\r\n 'draggables': ['b'],\r\n 'targets': ['target2', 'target5', 'target8'],\r\n 'rule': 'unordered_equal'\r\n },\r\n {\r\n 'draggables': ['c'],\r\n 'targets': ['target3', 'target6', 'target9'],\r\n 'rule': 'unordered_equal'\r\n }\r\n ]\r\n self.assertFalse(draganddrop.grade(user_input, correct_answer))" ]
[ "0.6137433", "0.6108757", "0.60195917", "0.5986544", "0.5936767", "0.58589", "0.5759686", "0.56950617", "0.5624841", "0.55902916", "0.55750483", "0.5560212", "0.5554626", "0.55542696", "0.5553175", "0.5553175", "0.55467266", "0.5503819", "0.5457172", "0.5451371", "0.54313546", "0.53999746", "0.5376586", "0.5375577", "0.5364844", "0.5359766", "0.535864", "0.5338235", "0.53258616", "0.53186446", "0.5317656", "0.5315998", "0.5315065", "0.528178", "0.5272056", "0.5271867", "0.5262255", "0.5256516", "0.5254785", "0.5251119", "0.5245671", "0.52348095", "0.5232146", "0.5209617", "0.520278", "0.52023655", "0.51976585", "0.51973414", "0.5193984", "0.5191381", "0.51910853", "0.5168662", "0.51612574", "0.5156716", "0.5155904", "0.5155904", "0.5155904", "0.515586", "0.51550406", "0.51405126", "0.51366323", "0.5126447", "0.51249856", "0.5119241", "0.5116798", "0.5115688", "0.5107617", "0.510387", "0.5094173", "0.5092521", "0.5089139", "0.50838256", "0.5078707", "0.5075308", "0.50716203", "0.5061532", "0.5061452", "0.50607544", "0.50605214", "0.50554216", "0.50476885", "0.5047116", "0.50363714", "0.5028754", "0.5026291", "0.5024467", "0.50243545", "0.5023076", "0.50198036", "0.5014891", "0.5011064", "0.5007032", "0.50008494", "0.49937844", "0.49911672", "0.49879867", "0.49861643", "0.49833554", "0.49800858", "0.49767923" ]
0.6972838
0
make test label for combinations helper with two simple children.
def _make_partitionsTest_label(chain_parts): assert len(chain_parts) == 1 scenario = chain_parts[0]['hypoScenario'] assert scenario == 'partitionsTest' return """ partgen( [(20et, 0eta320)] simple([(40et, 0eta320) (50et, 0eta320)]) simple([(35et, 0eta240) (55et, 0eta240)]) )"""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_combinationsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'combinationsTest'\n\n \n\n return \"\"\"\n combgen(\n [(2)(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def test_sums():\n assert label_parent(1, 2) == 3\n assert label_parent (1, 4) == 8\n # Should ignore arg order\n assert label_parent(4, 1) == 8", "def test_recipe_nutrition_label_widget(self):\n pass", "def test_select_label(self):\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n section1 = create_section(title=\"Test Section 1\", story=story, layout=layout)\n section2 = create_section(title=\"Test Section 2\", story=story, layout=layout)\n form = SectionRelationAdminForm()\n choices_list = list(form.fields['parent'].widget.choices)\n self.assertIn(story.title, choices_list[1][1])\n self.assertIn(story.title, choices_list[2][1])", "def setUp(self):\n\n singleLabels = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_2 = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2}),\n ({'D'}, {0}, set()),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_1 = linkoCreate.Linkograph(\n [({'A'}, set(), {1}),\n ({'D'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_0 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_2 = linkoCreate.Linkograph(\n [({'D'}, set(), set()),\n ({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_1 = linkoCreate.Linkograph(\n [({'D'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n trivialLinkograph = linkoCreate.Linkograph(\n [], ['A', 'B', 'C', 'D'])\n\n\n singleSubLinko1_4 = linkoCreate.Linkograph(\n [({'D'}, set(), {2,3}),\n ({'A'}, set(), {3}),\n ({'C'}, {0}, {3}),\n ({'A'}, {0,1,2}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko2_4 = linkoCreate.Linkograph(\n [({'A'}, set(), {2}),\n ({'C'}, set(), {2}),\n ({'A'}, {0,1}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko3_4 = linkoCreate.Linkograph(\n [({'C'}, set(), {1}),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko4_4 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n simpleLinko = linkoCreate.Linkograph(\n [({'A', 'B', 'C'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'B', 'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n if self.id().split('.')[-1] == 'test_createSubLinkographWithoutCommands':\n self.testParams = [\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': None,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 5,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko0_1},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 0,\n 'ExpectedLinkograph': singleSubLinko0_0},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko1_2},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko1_1},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 0,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko1_4},\n\n {'linko': singleLabels,\n 'lowerBound': 2,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko2_4},\n\n {'linko': singleLabels,\n 'lowerBound': 3,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko3_4},\n\n {'linko': singleLabels,\n 'lowerBound': 4,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko4_4},\n\n ]", "def test_labels(self):\n self.compliance_tester.test_labels(self.oi)", "def test_parent_label(self):\n l = self.d.label(1)\n l2 = self.d.label(31405)\n\n self.assertTrue(l.parent_label is None)\n self.assertTrue(l2 in l.sublabels)\n self.assertEqual(l2.parent_label, l)", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)", "def test_label(self):\n xs = t.Label(t.Exactly(\"x\"), 'CustomLabel')\n self.assertEqual(writePython(xs),\n dd(\"\"\"\n def _G_label_1():\n _G_exactly_2, lastError = self.exactly('x')\n self.considerError(lastError, None)\n return (_G_exactly_2, self.currentError)\n _G_label_3, lastError = self.label(_G_label_1, \"CustomLabel\")\n self.considerError(lastError, None)\n _G_label_3\n \"\"\"))", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def test_general_subset_level():\n pass", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def _generateLabelAndName(self, obj, **args):\n result = []\n label = self._generateLabel(obj, **args)\n name = self._generateName(obj, **args)\n result.extend(label)\n if not len(label):\n result.extend(name)\n elif len(name) and name[0].strip() != label[0].strip():\n result.extend(name)\n return result", "def tests_ti_document_add_label(self):\n super().group_add_label()", "def getLabel2(*args):", "def getLabel2(*args):", "def test_checkboxtextgroup(self):\r\n self.check_group('checkboxtextgroup', 'choice', 'checkbox')", "def _generateTableCell2ChildLabel(self, obj, **args):\n result = []\n\n # If this table cell has 2 children and one of them has a\n # 'toggle' action and the other does not, then present this\n # as a checkbox where:\n # 1) we get the checked state from the cell with the 'toggle' action\n # 2) we get the label from the other cell.\n # See Orca bug #376015 for more details.\n #\n if obj.childCount == 2:\n cellOrder = []\n hasToggle = [False, False]\n for i, child in enumerate(obj):\n if self._script.utilities.hasMeaningfulToggleAction(child):\n hasToggle[i] = True\n break\n if hasToggle[0] and not hasToggle[1]:\n cellOrder = [ 1, 0 ]\n elif not hasToggle[0] and hasToggle[1]:\n cellOrder = [ 0, 1 ]\n if cellOrder:\n for i in cellOrder:\n if not hasToggle[i]:\n result.extend(self.generate(obj[i], **args))\n return result", "def test_process_label_in_node(self):\n tree = Node(children=[\n Node(\"Defining secret phrase.\", label=['AB', 'a']),\n Node(\"Has secret phrase. Then some other content\", \n label=['AB', 'b'])\n ], label=['AB'])\n t = Terms(tree)\n t.scoped_terms = {\n ('AB',): [Ref(\"secret phrase\", \"AB-a\", (9,22))]\n }\n # Term is defined in the first child\n self.assertEqual([], t.process(tree.children[0]))\n self.assertEqual(1, len(t.process(tree.children[1])))", "def get_extra_label(self, label_name: str, hierarchy: List[str]) -> Any:", "def test_get_scenarios_expanded(self):\n pass", "def test_nested():\n res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.root.metadata.choose(lambda m: (m[\"name\"], m.uid))))\n assert \"type\" in res # from conditions\n assert \"reason\" in res # from conditions\n assert \"name\" in res # from metadata\n assert \"uid\" in res # from metadata", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def test_nested_sequence(self):\n\n self.taxon_tester('Apis mellifera')\n self.taxon_tester('Apis')\n self.taxon_tester('Apini')\n self.taxon_tester('Apinae')\n # Apidae at 5680 species is a struggle\n self.taxon_tester('Apidae')\n if False:\n # Apoidea: 19566 takes 223 seconds\n self.taxon_tester('Apoidea')\n # Aculeata fails after 339 seconds\n self.taxon_tester('Aculeata')\n self.taxon_tester('Apocrita')\n self.taxon_tester('Hymenoptera')\n self.taxon_tester('Endopterygota')\n self.taxon_tester('Neoptera')\n self.taxon_tester('Pterygota')\n self.taxon_tester('Dicondylia')\n self.taxon_tester('Insecta')\n self.taxon_tester('Hexapoda')\n self.taxon_tester('Pancrustacea')\n self.taxon_tester('Mandibulata')\n self.taxon_tester('Arthropoda')\n self.taxon_tester('Panarthropoda')\n self.taxon_tester('Ecdysozoa')\n self.taxon_tester('Protostomia')\n self.taxon_tester('Bilateria')\n self.taxon_tester('Eumetazoa')\n self.taxon_tester('Metazoa')\n self.taxon_tester('Holozoa')\n self.taxon_tester('Opisthokonta')\n self.taxon_tester('Eukaryota')", "def test_bootstrap_support_labeled(self):\r\n master_tree = parse_newick('((a:2,b:3)ab:2,(c:1,d:2)cd:7)rt;')\r\n \"\"\"\r\n /-------.5 /-a\r\n ---1| \\-b\r\n \\------.5 /-c\r\n \\-d\r\n \"\"\"\r\n t1 = parse_newick('((a:6,b:8.2)hi:2,(c:1,d:2):7);') # same structure\r\n t2 = parse_newick('((a:2,b:3,c:33)ho:2,d:7);') # abc are siblings\r\n new_master, bootstraps = tc.bootstrap_support(master_tree, [t1, t2])\r\n expected = dict([('ab', .5), ('cd', .5), ('rt', 1.0)])\r\n self.assertDictEqual(bootstraps, expected)", "def test_title(names):", "def test_verbose_name_group(self): \n field_verboses = {\n \"title\": \"Название группы\",\n \"slug\": \"Слаг\",\n \"description\": \"Описание группы\",\n }\n for value, expected in field_verboses.items():\n with self.subTest(value=value):\n self.assertEqual(self.group._meta.get_field(value).verbose_name, expected)", "def plugin_second_label():\n return \"second\"", "def test_product_labels(self):\n\n prd = Product.objects.get(id=1)\n # label name\n label_name = prd._meta.get_field('name').verbose_name\n self.assertEqual(label_name, 'name')\n # label description\n label_name = prd._meta.get_field('description').verbose_name\n self.assertEqual(label_name, 'description')\n # label nutrition_grade\n label_name = prd._meta.get_field('nutrition_grade').name\n self.assertEqual(label_name, 'nutrition_grade')\n # label barcode\n label_name = prd._meta.get_field('barcode').verbose_name\n self.assertEqual(label_name, 'barcode')\n # label url\n label_name = prd._meta.get_field('url').verbose_name\n self.assertEqual(label_name, 'url')\n # label url_pic\n label_name = prd._meta.get_field('url_pic').name\n self.assertEqual(label_name, 'url_pic')\n # label store\n label_name = prd._meta.get_field('store').verbose_name\n self.assertEqual(label_name, 'store')\n # label prd_cat\n label_name = prd._meta.get_field('prd_cat').name\n self.assertEqual(label_name, 'prd_cat')\n # label fat\n label_name = prd._meta.get_field('fat').verbose_name\n self.assertEqual(label_name, 'fat')\n # label saturated_fat\n label_name = prd._meta.get_field('saturated_fat').name\n self.assertEqual(label_name, 'saturated_fat')\n # label sugar\n label_name = prd._meta.get_field('sugar').verbose_name\n self.assertEqual(label_name, 'sugar')\n # label salt\n label_name = prd._meta.get_field('salt').verbose_name\n self.assertEqual(label_name, 'salt')", "def is_test(self):\r\n return self.has_label('tests')", "def setUp(self):\n\n\n # InverseLabeling\n invLabeling0 = {'L0': [0, 1, 2]}\n\n invLabeling1 = {'L0' : [0, 2],\n 'L1' : [1]}\n\n invLabeling2 = {\n 'L0' : [0],\n 'L1' : [1],\n 'L2' : [2]\n }\n\n invLabeling3 = {\n 'L1' : [0, 1],\n 'L2' : [2]\n }\n\n invLabeling4 = {\n 'L0' : [0,1],\n 'L1' : [0],\n 'L2' : [2]\n }\n\n invLabeling5 = {\n 'L0': [0, 1, 2],\n 'L1': []\n }\n \n # Create some ontologies\n ontology0 = {'L0': ['L0']}\n\n ontology1 = {}\n\n ontology2 = {'L0': ['L1']}\n\n ontology3 = {'L0': ['L1', 'L2'],\n 'L1': ['L2'],\n 'L2': ['L0']}\n\n if self.id().split('.')[-1] == 'test_createLinkograph':\n self.testParams = [\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology0,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2}),\n ({'L0'}, {0}, {2}),\n ({'L0'}, {0,1}, set())] \n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology1,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology2,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology0,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {2}),\n ({'L1'}, set(), set()),\n ({'L0'}, {0}, set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology1,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L1'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology2,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1}),\n ({'L1'}, {0}, set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1}),\n ({'L1'}, {0}, set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling2,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1,2}),\n ({'L1'}, {0}, {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling3,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L1'}, set(), {2}),\n ({'L1'}, set(), {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling4,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0', 'L1'}, set(), {2}),\n ({'L0'}, set(), {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling5,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n ]", "def test_mutual_exclusivity_of_labels(self, tmpdir, docker_tasker, labels, expected_fail):\n runner = mock_env(tmpdir, docker_tasker, labels=labels)\n if expected_fail:\n with pytest.raises(PluginFailedException) as e:\n runner.run()\n assert 'only one of labels' in str(e.value)\n else:\n runner.run()", "def helper_label(helper):\n return helper.info", "def test_Tree():", "def test_col_data_label_with_attrs(self):\n help_tag = 'span'\n help_text_br = False\n names = ('first', 'billing_address_1')\n attrs = {'style': 'width: 10rem; display: inline-block'}\n label_attrs = {name: attrs for name in names}\n txt = '{}=\"{}\"'.format(*list(attrs.items())[0])\n expected = ['<label for=\"id_first\" {}>First:</label>'.format(txt)]\n expected.append('<label for=\"id_billing_address_1\" {}>street address (line 1):</label>'.format(txt))\n actual = []\n for name in names:\n field = self.form.fields[name]\n response = self.form.collect_col_data(name, field, help_tag, help_text_br, label_attrs)\n actual.append(response.get('label'))\n\n for expect, got in zip(expected, actual):\n self.assertEqual(expect, got)", "def test_help_text_group(self): \n field_help_text = {\n \"title\": \"Дайте назание группе\",\n \"slug\": ('Укажите адрес для группы. Используйте '\n 'только латиницу, цифры, дефисы и знаки '\n 'подчёркивания'),\n } \n for value, expected in field_help_text.items():\n with self.subTest(value=value):\n self.assertEqual(self.group._meta.get_field(value).help_text, expected)", "def test_label_choices(self):\n test_classes = (\n (0, 'No'),\n (1, 'Yes')\n )\n\n form = SingleLabelClassifierForm(classes=test_classes)\n self.assertEqual(tuple(form.fields['label'].choices), test_classes)\n\n form = MultiLabelClassifierForm(classes=test_classes)\n self.assertEqual(tuple(form.fields['label'].choices), test_classes)", "def test_filter_tree(self):\r\n actual = [e.Name for e in filter_tree(\r\n self.tree1, ['bbb', 'ccc']).tips()]\r\n #(a_a:10,(b_b:2,c_c:4):5);\r\n expected = [\r\n e.Name for e in DndParser(\r\n \"((bbb:2,ccc:4));\",\r\n PhyloNode).tips(\r\n )]\r\n self.assertEqual(actual, expected)\r\n\r\n actual = [e.Name for e in filter_tree(\r\n self.tree2, ['bbb', 'ccc']).tips()]\r\n #(a_a:10,(b_b:2,c_c:4):5);\r\n expected = [\r\n e.Name for e in DndParser(\r\n \"(('bbb':2,ccc:4));\",\r\n PhyloNode).tips(\r\n )]\r\n self.assertEqual(actual, expected)", "def test_radiotextgroup(self):\r\n self.check_group('radiotextgroup', 'choice', 'radio')", "def gen_test_case_combination(self):\n\n cases = '\\n'\n\n binary_ops = list(self.BINARY_OPS)\n binary_ops.reverse()\n for op1 in self.BINARY_OPS:\n for op2 in binary_ops:\n\n result = []\n ret = IntOp.binary_op(op2, '0', '1', self.lane_width)\n ret = IntOp.binary_op(op1, ret, '2', self.lane_width)\n result.append(ret)\n\n cases += '\\n' + str(AssertReturn('{lane_type}.{op1}-{lane_type}.{op2}'.format(lane_type=self.LANE_TYPE, op1=op1, op2=op2),\n [SIMD.v128_const('0', self.LANE_TYPE),\n SIMD.v128_const('1', self.LANE_TYPE),\n SIMD.v128_const('2', self.LANE_TYPE)],\n SIMD.v128_const(result, self.LANE_TYPE)))\n cases += '\\n'\n return cases", "def test_multiple_task_groups_dag(\n self, test_multiple_taskgroups_dag, multiple_taskgroups_dag_expected_edges\n ):\n (\n dag,\n group1,\n group2,\n group3,\n (\n group1_emp1,\n group1_emp2,\n group1_emp3,\n group2_emp1,\n group2_emp2,\n group2_emp3,\n group2_op1,\n group2_op2,\n group3_emp1,\n group3_emp2,\n group3_emp3,\n emp_in1,\n emp_in2,\n emp_in3,\n emp_in4,\n emp_out1,\n emp_out2,\n emp_out3,\n emp_out4,\n op_in1,\n op_out1,\n ),\n ) = test_multiple_taskgroups_dag\n\n group1_emp1 >> Label(\"label group1.group1_emp1 <=> group1.group1_emp2\") >> group1_emp3\n\n emp_in1 >> group1\n emp_in2 >> Label(\"label emp_in2 <=> group1\") >> group1\n [emp_in3, emp_in4] >> Label(\"label emp_in3/emp_in4 <=> group1\") >> group1\n XComArg(op_in1, \"test_key\") >> Label(\"label op_in1 <=> group1\") >> group1\n\n (\n [group2_emp1, group2_emp2]\n >> Label(\"label group2.group2_emp1/group2.group2_emp2 <=> group2.group2_emp3\")\n >> group2_emp3\n )\n (\n group2_emp1\n >> Label(\"label group2.group2_emp1 <=> group2.group2_emp2/group2.group2_emp3\")\n >> [group2_emp2, group2_emp3]\n )\n group2_emp3 >> Label(\"label group2.group2_emp3 <=> group3\") >> group3\n\n (\n XComArg(group2_op1, \"test_key\")\n >> Label(\"label group2.group2_op1 <=> group2.group2_op2\")\n >> XComArg(group2_op2, \"test_key\")\n )\n XComArg(group2_op2, \"test_key\") >> Label(\"label group2.group2_op2 <=> group3\") >> group3\n\n group3 >> emp_out1\n group3 >> Label(\"label group3 <=> emp_out2\") >> emp_out2\n group3 >> Label(\"label group3 <=> emp_out3/emp_out4\") >> [emp_out3, emp_out4]\n group3 >> Label(\"label group3 <=> op_out1\") >> XComArg(op_out1, \"test_key\")\n\n group1 >> Label(\"label group1 <=> group2\") >> group2\n\n compare_dag_edges(dag_edges(dag), multiple_taskgroups_dag_expected_edges)", "def test_issue_create_label(self):\n pass", "def test__create_label_w_no_ent_id(ruler: SpaczzRuler) -> None:\n assert ruler._create_label(\"TEST\", None) == \"TEST\"", "def test_grouping(self):\n s = self.create(ComponentItem, UML.Component)\n uc1 = self.create(UseCaseItem, UML.UseCase)\n uc2 = self.create(UseCaseItem, UML.UseCase)\n\n self.group(s, uc1)\n assert 1 == len(uc1.subject.subject)\n self.group(s, uc2)\n assert 1 == len(uc2.subject.subject)\n\n # Classifier.useCase is not navigable to UseCase\n # self.assertEqual(2, len(s.subject.useCase))", "def setLabel2(*args):", "def tests_ti_document_get_label(self):\n super().group_get_label()", "def test_render_label(self):\n label = self.block.meta.label\n self.assertEqual(label, 'Google Calendar', 'The labels are not the same')", "def check_elm(orphan_root, obj, orphan_labels, orphan_widgets):\n\n oid = obj.attrib.get('id')\n klass = obj.attrib.get('class')\n\n # \"Don't care\" special case\n if klass in widgets_ignored:\n return\n for suffix in widgets_suffixignored:\n if klass[-len(suffix):] == suffix:\n return\n\n # Widgets usual do not strictly require a label, i.e. a labelled parent\n # is enough for context, but some do always need one.\n requires_label = klass in widgets_needlabel\n\n if oid is not None:\n # Check that ids are unique\n if oid in ids_dup:\n if ids[oid] == obj:\n # We are the first, warn\n duplicates = tree.findall(\".//object[@id='\" + oid + \"']\")\n err(filename, tree, obj, \"duplicate-id\", \"has the same id as other elements \" + elms_names_lines(duplicates))\n\n # Check label-for and their dual labelled-by\n label_for = check_rels(filename, tree, root, obj, \"label-for\", \"labelled-by\")\n\n # Check labelled-by and its dual label-for\n labelled_by = check_rels(filename, tree, root, obj, \"labelled-by\", \"label-for\")\n\n # Check label-for and their dual labelled-by\n description_for = check_rels(filename, tree, root, obj, \"description-for\", \"described-by\")\n\n # Check labelled-by and its dual label-for\n described_by = check_rels(filename, tree, root, obj, \"described-by\", \"description-for\")\n\n # Should have only one label\n if len(labelled_by) >= 1:\n if oid in mnemonic_for_elm:\n warn(filename, tree, obj, \"labelled-by-and-mnemonic\",\n \"has both a mnemonic \" + elm_name_line(mnemonic_for_elm[oid][0]) + \"and labelled-by relation\")\n if len(labelled_by) > 1:\n warn(filename, tree, obj, \"multiple-labelled-by\", \"has multiple labelled-by relations\")\n if oid in label_for_elm:\n if len(label_for_elm[oid]) > 1:\n warn(filename, tree, obj, \"duplicate-label-for\", \"is referenced by multiple label-for \" + elms_names_lines(label_for_elm[oid]))\n if oid in mnemonic_for_elm:\n if len(mnemonic_for_elm[oid]) > 1:\n warn(filename, tree, obj, \"duplicate-mnemonic\", \"is referenced by multiple mnemonic_widget \" + elms_names_lines(mnemonic_for_elm[oid]))\n\n # Check member-of\n member_of = check_rels(filename, tree, root, obj, \"member-of\")\n\n # Labels special case\n if klass in widgets_labels:\n properties = check_props(filename, tree, root, obj, \"mnemonic_widget\") + \\\n check_props(filename, tree, root, obj, \"mnemonic-widget\")\n if len(properties) > 1:\n err(filename, tree, obj, \"multiple-mnemonic\", \"has multiple mnemonic_widgets properties\"\n \"%s\" % elms_lines(properties))\n\n # Emit orphaning warnings\n if warn_orphan_labels or orphan_widgets:\n is_orphan_label(filename, tree, root, obj, orphan_root, True)\n\n # We are done with the label\n return\n\n # Not a label, will perhaps need one\n\n # Emit orphaning warnings\n is_orphan_widget(filename, tree, root, obj, orphan_labels, orphan_root, True)", "def get_label(self, hierarchy: List[str]) -> Any:", "def test_first_level_from_bids_no_duplicate_sub_labels(bids_dataset):\n models, *_ = first_level_from_bids(\n dataset_path=bids_dataset,\n task_label='main',\n sub_labels=[\"01\", \"01\"],\n space_label='MNI',\n img_filters=[('desc', 'preproc')],\n slice_time_ref=None,\n )\n\n assert len(models) == 1", "def test_add_empty_nodes_with_label_when_splitting(self):\n print \"----- test_add_empty_nodes_with_label_when_splitting -----\"\n sel_axis = (lambda axis: axis)\n \n #create tree, first node splits in x direction\n tree = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n kdtree.visualize(tree)\n \n point_left = [0.4, 0.5]\n tree.split2(point_left, axis = 0)\n kdtree.visualize(tree)\n \n point1 = [0.3, 0.5]\n found_node = tree.get_path_to_leaf(point1)[-1]\n correct_node1 = 3\n self.assertEqual(found_node.label, correct_node1, \"Not correct node found\")\n \n point_right = [0.6, 0.5]\n tree.split2(point_right, axis = 1)\n kdtree.visualize(tree)\n \n point2 = [0.6, 0.7]\n found_node = tree.get_path_to_leaf(point2)[-1]\n correct_node2 = 6\n self.assertEqual(found_node.label, correct_node2, \"Not correct node found\")\n \n print \"----- end: test_add_empty_nodes_with_label_when_splitting -----\"", "def test_render_value_label(self):\n self.check_html(\n self.widget(choices=self.beatles),\n \"beatles\",\n [\"John\"],\n html=(\n \"\"\"<select multiple name=\"beatles\">\n <option value=\"J\">John</option>\n <option value=\"P\">Paul</option>\n <option value=\"G\">George</option>\n <option value=\"R\">Ringo</option>\n </select>\"\"\"\n ),\n )", "def test_issue_add_label(self):\n pass", "def test_create_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.CreateMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n name = 'my label'\r\n myid = 'myid'\r\n description = 'my description'\r\n args = [name, '--description', description, '--shared']\r\n position_names = ['name', 'description', 'shared']\r\n position_values = [name, description, True]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)", "def getLabel(*args):", "def getLabel(*args):", "def getLabel(*args):", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def testFormatLabelAndValue(self):\n\n self.assertEqual('Abc: xyz', self.inv._FormatLabelAndValue('abc', 'xyz', 1))\n self.assertEqual('ABc: xyz', self.inv._FormatLabelAndValue('abc', 'xyz', 2))\n self.assertEqual('ABC: xyz', self.inv._FormatLabelAndValue('abc', 'xyz', 4))", "def test_recipe_nutrition_label_image(self):\n pass", "def test_label_10_targets_with_a_b_c_(self):\r\n user_input = '[{\"a\":\"target1\"}, \\\r\n {\"b\":\"target2\"},{\"c\":\"target3\"},{\"a\":\"target4\"},{\"b\":\"target5\"}, \\\r\n {\"c\":\"target6\"}, {\"a\":\"target7\"},{\"b\":\"target8\"},{\"c\":\"target9\"}, \\\r\n {\"a\":\"target10\"}]'\r\n correct_answer = [\r\n {\r\n 'draggables': ['a'],\r\n 'targets': ['target1', 'target4', 'target7', 'target10'],\r\n 'rule': 'unordered_equal'\r\n },\r\n {\r\n 'draggables': ['b'],\r\n 'targets': ['target2', 'target5', 'target8'],\r\n 'rule': 'unordered_equal'\r\n },\r\n {\r\n 'draggables': ['c'],\r\n 'targets': ['target3', 'target6', 'target9'],\r\n 'rule': 'unordered_equal'\r\n }\r\n ]\r\n self.assertTrue(draganddrop.grade(user_input, correct_answer))", "def test_label_10_targets_with_a_b_c_multiple(self):\r\n user_input = '[{\"a\":\"target1\"}, \\\r\n {\"b\":\"target2\"},{\"c\":\"target3\"},{\"b\":\"target5\"}, \\\r\n {\"c\":\"target6\"}, {\"a\":\"target7\"},{\"b\":\"target8\"},{\"c\":\"target9\"}, \\\r\n {\"a\":\"target1\"}]'\r\n correct_answer = [\r\n {\r\n 'draggables': ['a', 'a', 'a'],\r\n 'targets': ['target1', 'target4', 'target7', 'target10'],\r\n 'rule': 'anyof+number'\r\n },\r\n {\r\n 'draggables': ['b', 'b', 'b'],\r\n 'targets': ['target2', 'target5', 'target8'],\r\n 'rule': 'anyof+number'\r\n },\r\n {\r\n 'draggables': ['c', 'c', 'c'],\r\n 'targets': ['target3', 'target6', 'target9'],\r\n 'rule': 'anyof+number'\r\n }\r\n ]\r\n self.assertTrue(draganddrop.grade(user_input, correct_answer))", "def test_subsystems(self):\n pass", "def test_multiply_some_type_links():", "def test_build_match_tree_with_pairs():\n abbreviation_list = [[\"ELIF\", \"ELI.\"], [\"ELSE\", \"E.\"]]\n expected_tree = {\"E\": {\"L\": {\"I\": {\"F\": \"ELI.\"}, \"S\": {\"E\": \"E.\"}}}}\n tree = build_match_tree(abbreviation_list)\n assert repr(tree) == repr(expected_tree)", "def check_a11y_relation(filename, tree):\n global widgets_ignored, ids, label_for_elm, labelled_by_elm, description_for_elm, described_by_elm, mnemonic_for_elm\n\n def check_elm(orphan_root, obj, orphan_labels, orphan_widgets):\n \"\"\"\n Check one element, knowing that orphan_labels/widgets tell whether\n there are orphan labels and widgets within orphan_root\n \"\"\"\n\n oid = obj.attrib.get('id')\n klass = obj.attrib.get('class')\n\n # \"Don't care\" special case\n if klass in widgets_ignored:\n return\n for suffix in widgets_suffixignored:\n if klass[-len(suffix):] == suffix:\n return\n\n # Widgets usual do not strictly require a label, i.e. a labelled parent\n # is enough for context, but some do always need one.\n requires_label = klass in widgets_needlabel\n\n if oid is not None:\n # Check that ids are unique\n if oid in ids_dup:\n if ids[oid] == obj:\n # We are the first, warn\n duplicates = tree.findall(\".//object[@id='\" + oid + \"']\")\n err(filename, tree, obj, \"duplicate-id\", \"has the same id as other elements \" + elms_names_lines(duplicates))\n\n # Check label-for and their dual labelled-by\n label_for = check_rels(filename, tree, root, obj, \"label-for\", \"labelled-by\")\n\n # Check labelled-by and its dual label-for\n labelled_by = check_rels(filename, tree, root, obj, \"labelled-by\", \"label-for\")\n\n # Check label-for and their dual labelled-by\n description_for = check_rels(filename, tree, root, obj, \"description-for\", \"described-by\")\n\n # Check labelled-by and its dual label-for\n described_by = check_rels(filename, tree, root, obj, \"described-by\", \"description-for\")\n\n # Should have only one label\n if len(labelled_by) >= 1:\n if oid in mnemonic_for_elm:\n warn(filename, tree, obj, \"labelled-by-and-mnemonic\",\n \"has both a mnemonic \" + elm_name_line(mnemonic_for_elm[oid][0]) + \"and labelled-by relation\")\n if len(labelled_by) > 1:\n warn(filename, tree, obj, \"multiple-labelled-by\", \"has multiple labelled-by relations\")\n if oid in label_for_elm:\n if len(label_for_elm[oid]) > 1:\n warn(filename, tree, obj, \"duplicate-label-for\", \"is referenced by multiple label-for \" + elms_names_lines(label_for_elm[oid]))\n if oid in mnemonic_for_elm:\n if len(mnemonic_for_elm[oid]) > 1:\n warn(filename, tree, obj, \"duplicate-mnemonic\", \"is referenced by multiple mnemonic_widget \" + elms_names_lines(mnemonic_for_elm[oid]))\n\n # Check member-of\n member_of = check_rels(filename, tree, root, obj, \"member-of\")\n\n # Labels special case\n if klass in widgets_labels:\n properties = check_props(filename, tree, root, obj, \"mnemonic_widget\") + \\\n check_props(filename, tree, root, obj, \"mnemonic-widget\")\n if len(properties) > 1:\n err(filename, tree, obj, \"multiple-mnemonic\", \"has multiple mnemonic_widgets properties\"\n \"%s\" % elms_lines(properties))\n\n # Emit orphaning warnings\n if warn_orphan_labels or orphan_widgets:\n is_orphan_label(filename, tree, root, obj, orphan_root, True)\n\n # We are done with the label\n return\n\n # Not a label, will perhaps need one\n\n # Emit orphaning warnings\n is_orphan_widget(filename, tree, root, obj, orphan_labels, orphan_root, True)\n\n root = tree.getroot()\n\n # Flush ids and relations from previous files\n ids = {}\n ids_dup = {}\n labelled_by_elm = {}\n label_for_elm = {}\n described_by_elm = {}\n description_for_elm = {}\n mnemonic_for_elm = {}\n\n # First pass to get links into hash tables, no warning, just record duplicates\n for obj in root.iter('object'):\n oid = obj.attrib.get('id')\n if oid is not None:\n if oid not in ids:\n ids[oid] = obj\n else:\n ids_dup[oid] = True\n\n labelled_by = obj.findall(\"accessibility/relation[@type='labelled-by']\")\n labelled_by += obj.findall(\"accessibility/relation[@name='labelled-by']\")\n for rel in labelled_by:\n target = rel.attrib.get('target')\n if target is not None:\n if target not in labelled_by_elm:\n labelled_by_elm[target] = [ obj ]\n else:\n labelled_by_elm[target].append(obj)\n\n label_for = obj.findall(\"accessibility/relation[@type='label-for']\")\n label_for += obj.findall(\"accessibility/relation[@name='label-for']\")\n for rel in label_for:\n target = rel.attrib.get('target')\n if target is not None:\n if target not in label_for_elm:\n label_for_elm[target] = [ obj ]\n else:\n label_for_elm[target].append(obj)\n\n described_by = obj.findall(\"accessibility/relation[@type='described-by']\")\n described_by += obj.findall(\"accessibility/relation[@name='described-by']\")\n for rel in described_by:\n target = rel.attrib.get('target')\n if target is not None:\n if target not in described_by_elm:\n described_by_elm[target] = [ obj ]\n else:\n described_by_elm[target].append(obj)\n\n description_for = obj.findall(\"accessibility/relation[@type='description-for']\")\n description_for += obj.findall(\"accessibility/relation[@name='description-for']\")\n for rel in description_for:\n target = rel.attrib.get('target')\n if target is not None:\n if target not in description_for_elm:\n description_for_elm[target] = [ obj ]\n else:\n description_for_elm[target].append(obj)\n\n mnemonic_for = obj.findall(\"property[@name='mnemonic_widget']\") + \\\n obj.findall(\"property[@name='mnemonic-widget']\")\n for rel in mnemonic_for:\n target = rel.text\n if target is not None:\n if target not in mnemonic_for_elm:\n mnemonic_for_elm[target] = [ obj ]\n else:\n mnemonic_for_elm[target].append(obj)\n\n # Second pass, recursive depth-first, to be able to efficiently know whether\n # there are orphan labels within a part of the tree.\n def recurse(orphan_root, obj, orphan_labels, orphan_widgets):\n if obj == root or is_labelled_parent(obj):\n orphan_root = obj\n orphan_labels, orphan_widgets = orphan_items(filename, tree, root, obj)\n\n if obj.tag == 'object':\n check_elm(orphan_root, obj, orphan_labels, orphan_widgets)\n\n for o in obj:\n recurse(orphan_root, o, orphan_labels, orphan_widgets)\n\n recurse(root, root, False, False)", "def test_stratis_list_default(self):\n for subcommand in [[\"pool\"], [\"filesystem\"], [\"blockdev\"]]:\n for prefix in [[], [\"--propagate\"]]:\n self.assertEqual(RUNNER(prefix + subcommand), 0)", "def pytest_can_run_together(item1, item2):", "def test_series_in_features(self):\n assert parse_command({'test{{A,B}}': {'depends_on': 'name{{A,B}}'}}) == [\n ('testA', {'depends_on': 'nameA'}), ('testB', {'depends_on': 'nameB'})]", "def test_first_level_from_bids_with_subject_labels(bids_dataset):\n warning_message = ('Subject label foo is not present in'\n ' the dataset and cannot be processed')\n with pytest.warns(UserWarning, match=warning_message):\n models, *_ = first_level_from_bids(\n dataset_path=bids_dataset,\n task_label='main',\n sub_labels=[\"foo\", \"01\"],\n space_label='MNI',\n img_filters=[('desc', 'preproc')],\n slice_time_ref=None,\n )\n\n assert models[0].subject_label == '01'", "def test_pathop12(self):\n xpb = XPathBuilder()\n # braces not needed\n xp = xpb.foo & (xpb.bar.foo).parenthesize() | xpb.foobar\n exp = '/foo and (/bar/foo) or /foobar'\n self.assertEqual(xp.tostring(), exp)", "def test_dummy6(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n xp = xpb.bar | xp\n exp = '/bar'\n self.assertEqual(xp.tostring(), exp)", "def test_create_labels(self):\n test_labels = {\"app\": \"my_app\", \"host\": \"examplehost\"}\n labels = pmp.utils.create_labels(test_labels)\n self.assertIsInstance(labels, list)\n self.assertTrue(all([isinstance(x, pmp.LabelPair) for x in labels]))", "def test_xdist_and_select_test_by_bdd_label(xdist_runner: AllurePytestRunner):\n\n output = xdist_runner.run_docstring(\"-v\", \"--allure-features=boo\", \"-n1\")\n\n assert_that(output, has_only_testcases(\n has_entry(\n \"fullName\",\n ends_with(\"test_with_feature_boo\")\n )\n ))", "def test_nested_condition() -> None:\n\n @argcomb(Or(And(\"a\", \"b\"), And(\"c\", \"d\")))\n def f(a: Any = None, b: Any = None, c: Any = None, d: Any = None) -> None:\n ...\n\n # valid\n f(a=1, b=1)\n f(c=1, d=1)\n f(a=1, b=1, c=1, d=1)\n\n # invalid\n with pytest.raises(InvalidArgumentCombination):\n f(a=1)\n with pytest.raises(InvalidArgumentCombination):\n f(a=1, c=1)\n with pytest.raises(InvalidArgumentCombination):\n f()", "def test_abbreviation(self):\n self.assertEqual(self.compound.abbreviation, \"Cool\")", "def test_general_subset_all():\n pass", "def test_check_tree_exact_match(self):\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_3tips_fp)\r\n\r\n # Should find all and give True, True result\r\n\r\n self.assertEqual(actual_subset_results, [True, True])\r\n\r\n # Should get tips not found in fasta labels with 5 tip tree\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find all and give True result\r\n\r\n self.assertEqual(actual_subset_results, [True, ['seq5', 'seq4']])\r\n\r\n # Change two of the fasta labels to not match tree tips\r\n\r\n fasta_labels = ['seq1_1', 'seqX_2', 'seq2_3', 'seqY_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find seqX and seqY as not being a subset\r\n\r\n self.assertEqual(actual_subset_results, [['seqX', 'seqY'],\r\n ['seq3', 'seq5', 'seq4']])", "def test_03_visit_special(self):", "def test_labels(ruler: SpaczzRuler) -> None:\n assert all(\n [label in ruler.labels for label in [\"GPE\", \"STREET\", \"DRUG\", \"NAME\", \"BAND\"]]\n )\n assert len(ruler.labels) == 5", "def test_createSubLinkographWithoutCommands(self):\n self.performTestForParams()", "def test_first_level_from_bids_several_labels_per_entity(tmp_path, entity):\n n_sub = 1\n n_ses = 1\n tasks = [\"main\"]\n n_runs = [1]\n\n bids_path = create_fake_bids_dataset(\n base_dir=tmp_path,\n n_sub=n_sub,\n n_ses=n_ses,\n tasks=tasks,\n n_runs=n_runs,\n entities={entity: [\"A\", \"B\"]},\n )\n\n models, m_imgs, m_events, m_confounds = first_level_from_bids(\n dataset_path=bids_path,\n task_label=\"main\",\n space_label=\"MNI\",\n img_filters=[(\"desc\", \"preproc\"), (entity, \"A\")],\n slice_time_ref=None,\n )\n\n _check_output_first_level_from_bids(n_sub,\n models,\n m_imgs,\n m_events,\n m_confounds)\n n_imgs_expected = n_ses * n_runs[0]\n assert len(m_imgs[0]) == n_imgs_expected", "def test_setter_child_list_tuple(self):\n root = netapp_api.NaElement('root')\n root['l'] = ['l1', 'l2']\n root['t'] = ('t1', 't2')\n l = root.get_child_by_name('l')\n self.assertIsInstance(l, netapp_api.NaElement)\n t = root.get_child_by_name('t')\n self.assertIsInstance(t, netapp_api.NaElement)\n for le in l.get_children():\n self.assertIn(le.get_name(), ['l1', 'l2'])\n for te in t.get_children():\n self.assertIn(te.get_name(), ['t1', 't2'])", "def test_tree_support(self):\r\n master_tree = parse_newick('((a:2,b:3)ab:2,(c:1,d:2)cd:7)rt;')\r\n \"\"\"\r\n /-------.5 /-a\r\n ---1| \\-b\r\n \\------.5 /-c\r\n \\-d\r\n \"\"\"\r\n t2 = parse_newick('((a:2,b:3,c:33)ho:2,d:7);') # abc are siblings\r\n\r\n tc.tree_support(master_tree, t2)\r\n assert_almost_equal(\r\n master_tree.getNodeMatchingName('rt').bootstrap_support, 1.0)", "def test_labels(self):\n return self._test_labels", "def test_issue_edit_label(self):\n pass", "def test_dbpa002_radio_items(dash_duo):\n app = Dash()\n\n options = {\n \"OptionA\": \"Option 1\",\n \"OptionB\": \"Option 2\",\n \"OptionC\": \"Option 3\",\n }\n\n value = \"OptionB\"\n\n with_keywords = RadioItems(\n options=options,\n value=value,\n id=\"with-keywords\",\n )\n without_keywords = RadioItems(options, value, id=\"without-keywords\")\n\n app.layout = html.Div([with_keywords, without_keywords])\n\n dash_duo.start_server(app)\n\n # Check values\n assert [\n a.get_attribute(\"value\")\n for a in dash_duo.wait_for_element(\n \"#with-keywords\"\n ).find_elements_by_tag_name(\"input\")\n ] == [\n a.get_attribute(\"value\")\n for a in dash_duo.wait_for_element(\n \"#without-keywords\"\n ).find_elements_by_tag_name(\"input\")\n ]\n\n # Check labels\n assert [\n a.text\n for a in dash_duo.wait_for_element(\n \"#with-keywords\"\n ).find_elements_by_tag_name(\"label\")\n ] == [\n a.text\n for a in dash_duo.wait_for_element(\n \"#without-keywords\"\n ).find_elements_by_tag_name(\"label\")\n ]", "def test(self):\n for doc, label in zip(self.test_docs(), self.test_labels()):\n yield doc, label", "def test_get_parent_type_name(self):\n pass", "def test_pathop8(self):\n xpb = XPathBuilder()\n xp = (xpb.foo.bar | xpb.foobar).parenthesize() & xpb.action.source\n exp = '(/foo/bar or /foobar) and /action/source'\n self.assertEqual(xp.tostring(), exp)", "def test_setter_child_list_tuple(self):\n root = netapp_api.NaElement('root')\n root['l'] = ['l1', 'l2']\n root['t'] = ('t1', 't2')\n l_element = root.get_child_by_name('l')\n self.assertIsInstance(l_element, netapp_api.NaElement)\n t = root.get_child_by_name('t')\n self.assertIsInstance(t, netapp_api.NaElement)\n for le in l_element.get_children():\n self.assertIn(le.get_name(), ['l1', 'l2'])\n for te in t.get_children():\n self.assertIn(te.get_name(), ['t1', 't2'])", "def __init__(self, root_node, label1, label2):\n super().__init__(self.PROBLEM_NAME)\n self.root_node = root_node\n self.label1 = label1\n self.label2 = label2", "def test_radioselect_field():", "def test_label_callback():\n release_numbers = dict(a='123')\n data = dict(revision='a', attributes=dict(b='c'))\n data2 = dict(revision='b', attributes=dict(d='e'))\n\n assert _label_callback(data, release_numbers) == u'a\\n- Release: 123\\n- b: c'\n assert _label_callback(data2) == u'b\\n- Release: Unknown\\n- d: e'", "def _make_simple_partition_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n label = 'simplepartition(['\n for cp in cps:\n smcstr = str(cp['smc'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr:\n condition_str += ',%s)'\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def test_case1(self):\n\n graph = BipartiteGraph()\n\n graph.addEdge(\"supervisor1\",\"student1\")\n\n val1 = graph.getStudents(\"supervisor1\")\n val2 = graph.getSupervisors(\"student1\")\n\n expected1 = [\"student1\"]\n expected2 = [\"supervisor1\"]\n\n self.assertEqual((val1,val2),(expected1,expected2))", "def test_with_multiple_descriptions():\n soup = generate_case(\"with_descriptions\")\n\n tests.html_schema_doc_asserts.assert_descriptions(\n soup,\n [\n \"Exact address\",\n \"Exact address\",\n \"Delivery info depending on the delivery type\",\n \"The delivery is a gift, no prices displayed\",\n ],\n )", "def test_BuildModel2(self):\n print(\"\\nTest 6: Building a Model with Concat\")\n builder = StaticBuilder(\"Concat\")\n in1 = builder.addInput(10)\n in2 = builder.addInput(20)\n enc1 = builder.addInner(3, num_islots=2)\n out1 = builder.addOutput()\n\n builder.addDirectedLink(in1, enc1, islot=0)\n builder.addDirectedLink(in2, enc1, islot=1)\n builder.addDirectedLink(enc1, out1)\n \n builder.build()", "def test_multiple_label_traversals(self):\r\n TestEdge.create(self.v1, self.v2)\r\n OtherTestEdge.create(self.v1, self.v3)\r\n YetAnotherTestEdge.create(self.v1, self.v4)\r\n\r\n assert len(self.v1.outV()) == 3\r\n\r\n assert len(self.v1.outV(TestEdge)) == 1\r\n assert len(self.v1.outV(OtherTestEdge)) == 1\r\n assert len(self.v1.outV(YetAnotherTestEdge)) == 1\r\n\r\n out = self.v1.outV(TestEdge, OtherTestEdge)\r\n assert len(out) == 2\r\n assert self.v2.vid in [v.vid for v in out]\r\n assert self.v3.vid in [v.vid for v in out]\r\n\r\n out = self.v1.outV(OtherTestEdge, YetAnotherTestEdge)\r\n assert len(out) == 2\r\n assert self.v3.vid in [v.vid for v in out]\r\n assert self.v4.vid in [v.vid for v in out]", "def test_label_10_targets_with_a_b_c_false(self):\r\n user_input = '[{\"a\":\"target1\"}, \\\r\n {\"b\":\"target2\"},{\"c\":\"target3\"},{\"a\":\"target4\"},{\"b\":\"target5\"}, \\\r\n {\"c\":\"target6\"}, {\"a\":\"target7\"},{\"b\":\"target8\"},{\"c\":\"target9\"}, \\\r\n {\"a\":\"target1\"}]'\r\n correct_answer = [\r\n {\r\n 'draggables': ['a'],\r\n 'targets': ['target1', 'target4', 'target7', 'target10'],\r\n 'rule': 'unordered_equal'\r\n },\r\n {\r\n 'draggables': ['b'],\r\n 'targets': ['target2', 'target5', 'target8'],\r\n 'rule': 'unordered_equal'\r\n },\r\n {\r\n 'draggables': ['c'],\r\n 'targets': ['target3', 'target6', 'target9'],\r\n 'rule': 'unordered_equal'\r\n }\r\n ]\r\n self.assertFalse(draganddrop.grade(user_input, correct_answer))" ]
[ "0.6972838", "0.6137433", "0.60195917", "0.5986544", "0.5936767", "0.58589", "0.5759686", "0.56950617", "0.5624841", "0.55902916", "0.55750483", "0.5560212", "0.5554626", "0.55542696", "0.5553175", "0.5553175", "0.55467266", "0.5503819", "0.5457172", "0.5451371", "0.54313546", "0.53999746", "0.5376586", "0.5375577", "0.5364844", "0.5359766", "0.535864", "0.5338235", "0.53258616", "0.53186446", "0.5317656", "0.5315998", "0.5315065", "0.528178", "0.5272056", "0.5271867", "0.5262255", "0.5256516", "0.5254785", "0.5251119", "0.5245671", "0.52348095", "0.5232146", "0.5209617", "0.520278", "0.52023655", "0.51976585", "0.51973414", "0.5193984", "0.5191381", "0.51910853", "0.5168662", "0.51612574", "0.5156716", "0.5155904", "0.5155904", "0.5155904", "0.515586", "0.51550406", "0.51405126", "0.51366323", "0.5126447", "0.51249856", "0.5119241", "0.5116798", "0.5115688", "0.5107617", "0.510387", "0.5094173", "0.5092521", "0.5089139", "0.50838256", "0.5078707", "0.5075308", "0.50716203", "0.5061532", "0.5061452", "0.50607544", "0.50605214", "0.50554216", "0.50476885", "0.5047116", "0.50363714", "0.5028754", "0.5026291", "0.5024467", "0.50243545", "0.5023076", "0.50198036", "0.5014891", "0.5011064", "0.5007032", "0.50008494", "0.49937844", "0.49911672", "0.49879867", "0.49861643", "0.49833554", "0.49800858", "0.49767923" ]
0.6108757
2
Entry point to this Module. Return a chain label according to the value of cp['hypoScenario'], where cp is an element of list/ chainDict['chainPart']
def chainDict2jetLabel(chain_dict): # suported scenarios router = { 'simple': _make_simple_label, 'HT': _make_ht_label, 'vbenf': _make_vbenf_label, 'dijet': _make_dijet_label, 'combinationsTest': _make_combinationsTest_label, 'partitionsTest': _make_partitionsTest_label, } # chain_part - scenario association cp_sorter = {} for k in router: cp_sorter[k] = [] for cp in chain_dict['chainParts']: if cp['signature'] != 'Jet' and cp['signature'] != 'Bjet': continue for k in cp_sorter: if cp['hypoScenario'].startswith(k): cp_sorter[k].append(cp) break # obtain labels by scenario. labels = [] for k, chain_parts in cp_sorter.items(): if chain_parts: labels.append(router[k](chain_parts)) assert labels nlabels = len(labels) if nlabels == 1: return labels[0] if nlabels == 2: alabel = """\ and([] %s %s)""" % (tuple(labels)) return alabel # more than 2 labels is not expected assert False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_ht_label(chain_parts):\n\n assert len(chain_parts) == 1, '_make_ht_label, no. of chain parts != 1'\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('HT'), '_make_ht_label(): scenario does not start with HT'\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>ht)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'ht': ('0', 'inf'),\n 'et': ('0', 'inf'),\n 'eta': ('0', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n nargs = len(args)\n assert len(args) <= len(arg_res), 'bad num of args %d, expected < %d' % (len(args),\n len(arg_res))\n\n # obtain argument values frrom scenario\n while args:\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = float(defaults[key][0])\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = float(defaults[key][1])\n argvals[key+'hi'] = hi\n\n print (argvals)\n assert len(argvals) == 2*nargs, 'no of args: %d, expected %d' % (len(argvals), 2*nargs)\n\n print ('sent 100')\n result = \"\"\"\n ht([(%(htlo).0fht) \n (%(etlo).0fet)\n (%(etalo).0feta%(etahi).0f)\n ])\"\"\" % argvals\n print (result)\n return result", "def _make_combinationsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'combinationsTest'\n\n \n\n return \"\"\"\n combgen(\n [(2)(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def _make_simple_comb_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n simple_strs = []\n\n for cp in cps:\n print(cp)\n simple_strs.append(_make_simple_label([cp]))\n\n label = 'combgen([(%d)]' % len(cps)\n for s in simple_strs:\n label += ' %s ' % s\n label += ')'\n return label", "def _make_simple_label(chain_parts):\n \n if not _select_simple_chainparts(chain_parts):\n msg = 'Jet Configuration error: '\\\n 'chain fails substring selection: not \"simple\" '\n\n raise NotImplementedError(msg)\n \n label = 'simple(['\n for cp in chain_parts:\n smcstr = str(cp['smc'])\n jvtstr = str(cp['jvt'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr: # Run 2 chains have \"INF\" in the SMC substring\n condition_str += ',%s)' % smcstr.replace('INF','')\n elif jvtstr:\n condition_str += ',%s)' % jvtstr\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def _make_simple_partition_label(chain_dict):\n\n cps = chain_dict['chainParts']\n if not (_select_simple_chainparts(cps)):\n raise NotImplementedError(\n 'chain fails substring selection: not \"simple\": %s' % (\n chain_dict['chainName']))\n \n label = 'simplepartition(['\n for cp in cps:\n smcstr = str(cp['smc'])\n if smcstr == 'nosmc':\n smcstr = ''\n for i in range(int(cp['multiplicity'])):\n # condition_str = '(%set,%s,%s)' % (str(cp['threshold']),\n # str(cp['etaRange']),\n # smcstr,)\n condition_str = '(%set,%s' % (str(cp['threshold']),\n str(cp['etaRange']),)\n if smcstr:\n condition_str += ',%s)'\n else:\n condition_str += ')'\n label += condition_str\n label += '])'\n return label", "def _make_partitionsTest_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario == 'partitionsTest'\n\n \n\n return \"\"\"\n partgen(\n [(20et, 0eta320)]\n \n simple([(40et, 0eta320) (50et, 0eta320)])\n simple([(35et, 0eta240) (55et, 0eta240)])\n )\"\"\"", "def _make_dijet_label(chain_parts):\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n \n assert scenario.startswith('dijet')\n\n arg_res = [\n re.compile(r'^(?P<lo>\\d*)(?P<key>djmass)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j1eta)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2et)(?P<hi>\\d*)$'),\n re.compile(r'^(?P<lo>\\d*)(?P<key>j2eta)(?P<hi>\\d*)$'),\n ]\n\n defaults = {\n 'j1et': ('100', 'inf'),\n 'j2et': ('100', 'inf'),\n 'j1eta': ('0', '320'),\n 'j2eta': ('0', '320'),\n 'djmass': ('1000', 'inf'),\n }\n\n\n args = _args_from_scenario(scenario)\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n combgen(\n [(2)(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n ]\n \n dijet(\n [(%(djmasslo).0fdjmass)])\n simple([(%(j1etlo).0fet, %(j1etalo).0feta%(j1etahi).0f)\n (%(j2etlo).0fet, %(j2etalo).0feta%(j2etahi).0f)])\n )\"\"\" % argvals", "def get_label(self, hierarchy: List[str]) -> Any:", "def _make_vbenf_label(chain_parts):\n\n # toy label for development: run simple and dijet independently.\n # simple makes Et cuts on two jets. Independently (sharing possible)\n # of jets choosean by simple, the dijet\n # scenario requires a dijet of mass > 900, and opening angle in phi > 2.6\n\n assert len(chain_parts) == 1\n scenario = chain_parts[0]['hypoScenario']\n assert scenario.startswith('vbenf')\n args = _args_from_scenario(scenario)\n if not args:\n return 'and([]simple([(50et)(70et)])combgen([(2)] dijet([(900djmass, 26djdphi)])))' \n arg_res = [\n re.compile(r'(?P<lo>\\d*)(?P<key>fbet)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>mass)(?P<hi>\\d*)'),\n re.compile(r'(?P<lo>\\d*)(?P<key>et)(?P<hi>\\d*)'),\n ]\n\n defaults = {\n 'et': ('101', 'inf'),\n 'mass': ('800', 'inf'),\n 'fbet': ('501', 'inf'),\n }\n\n argvals = {}\n while args:\n assert len(args) == len(arg_res)\n arg = args.pop()\n for r in arg_res:\n m = r.match(arg)\n if m is not None:\n arg_res.remove(r)\n gd = m.groupdict()\n key = gd['key']\n try:\n lo = float(gd['lo'])\n except ValueError:\n lo = defaults[key][0]\n argvals[key+'lo'] = lo \n try:\n hi = float(gd['hi'])\n except ValueError:\n hi = defaults[key][1]\n argvals[key+'hi'] = hi\n\n assert len(args) == len(arg_res)\n assert len(args) == 0\n\n return \"\"\"\n and\n (\n []\n simple\n (\n [(%(etlo).0fet, 500neta)(%(etlo).0fet, peta500)]\n )\n combgen\n (\n [(10et, 0eta320)]\n dijet\n (\n [(%(masslo).0fdjmass, 26djdphi)]\n ) \n simple\n (\n [(10et, 0eta320)(20et, 0eta320)]\n )\n )\n )\"\"\" % argvals", "def chain():\n chain_identifier, url = get_vars(request, [\"id\", \"data\"])\n info('chain=%s' % chain_identifier)\n chain = LAPPS_SERVICE_CHAINS.get_chain(chain_identifier)\n info('source-url=%s' % url)\n data = requests.get(url).text\n result = chain.run({\n \"discriminator\": \"http://vocab.lappsgrid.org/ns/media/text\", \n \"payload\": data})\n info(\"discriminator=%s\" % result.get('discriminator'))\n return render_template(\"chain.html\",\n chain=chain,\n fname=url,\n result=result,\n builder=HtmlBuilder())", "def chain_name(self) -> str:\n return pulumi.get(self, \"chain_name\")", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def get_extra_label(self, label_name: str, hierarchy: List[str]) -> Any:", "def chainLabel(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n cdef char label[2]\n label[0] = freesasa_structure_atom_chain(self._c_structure,i)\n label[1] = '\\0'\n return label", "def __init__(self, **kwargs):\n\n polymer_type = \"PE\"\n helice = Helice()\n num_monomers = 30\n tacticity = \"\"\n chiriality = \"\"\n head_tail_defect_ratio = 0\n configs = 30\n infinite = False\n\n for key in kwargs:\n if key == \"polymer_type\":\n polymer_type = kwargs[\"polymer_type\"]\n elif key == \"helice\":\n helice = kwargs[\"helice\"]\n elif key == \"num_monomers\":\n num_monomers = kwargs[\"num_monomers\"]\n if is_integer_num(num_monomers):\n if num_monomers < 1:\n raise ValueError(\n \"Number of monomers should be equal or larger than 1\"\n )\n else:\n raise ValueError(\"Number of monomers should be an integer\")\n elif key == \"tacticity\":\n tacticity = kwargs[\"tacticity\"]\n elif key == \"chiriality\":\n chiriality = kwargs[\"chiriality\"]\n elif key == \"head_tail_defect_ratio\":\n head_tail_defect_ratio = kwargs[\"head_tail_defect_ratio\"]\n elif key == \"configs\":\n configs = kwargs[\"configs\"]\n elif key == \"infinite\":\n infinite = kwargs[\"infinite\"]\n else:\n raise KeyError(\n \"Unknown input %s for Chain class\\n Please see help for more information\"\n % key\n )\n\n if polymer_type not in polymer_types:\n raise ValueError(\n polymer_type\n + \" do not exist in our library, please consider using custom feature\"\n )\n self.polymer_type = polymer_types[polymer_type]\n\n if self.polymer_type.helicity:\n self.custom = 0\n else:\n self.custom = 1\n\n if self.custom:\n print(\"Warning: Custom type, only read helice motifs and turns info\")\n self.helice = helice\n\n if not 0 <= (head_tail_defect_ratio) <= 1:\n raise ValueError(\n \"Defect ratio of head to head and tail to tail connections is\",\n head_tail_defect_ratio,\n \"and should be in the range of [0,1]\",\n )\n self.head_tail_defect_ratio = head_tail_defect_ratio\n\n self.unit_num_monomers = 1\n if \"num_monomers\" not in kwargs:\n if infinite:\n num_monomers = 2\n else:\n num_monomers = 1\n\n self.num_monomers = num_monomers\n\n self.tacticity = tacticity\n if self.tacticity:\n if self.tacticity == \"N/A\":\n self.tacticity = \"\"\n else:\n print(\"Warning: Custom type does not have tacticity\")\n self.tacticity = \"\"\n\n self.chiriality = chiriality\n if self.chiriality:\n if self.chiriality == \"N/A\":\n self.chiriality = \"\"\n else:\n print(\"Warning: Custom type does not have chiriality\")\n self.chiriality = \"\"\n\n self.infinite = infinite\n\n else:\n monomer_backbone_atoms = len(self.polymer_type.backbone_atoms)\n\n if helice.atoms % monomer_backbone_atoms:\n raise Exception(\n \"Number of backbone atoms in a motif must be multiple of number of monomer backbone atoms %d\\n\"\n % monomer_backbone_atoms\n )\n if tacticity == \"syndiotactic\":\n multiple = int(monomer_backbone_atoms * 2 / helice.atoms)\n if (multiple * helice.atoms) % (monomer_backbone_atoms * 2):\n raise Exception(\n \"Number of backbone atoms in a motif for syndiotactic configuration must be multiple of twice of \\\n the number of monomer backbone atoms %d\\n\"\n % monomer_backbone_atoms\n * 2\n )\n elif multiple != 1:\n print(\n \"Number of backbone atoms in a motif for syndiotactic configuration should be multiple of twice \\\n of the number of monomer backbone atoms %d\\n\"\n % (monomer_backbone_atoms * 2)\n )\n print(\n \"Trying Helice_%d_%d_%d...\"\n % (\n helice.atoms * multiple,\n helice.motifs,\n helice.turns * multiple,\n )\n )\n helice = Helice(\n helice.atoms * multiple, helice.motifs, helice.turns * multiple\n )\n # else:\n # if monomer_backbone_atoms != helice.atoms:\n # raise ValueError(\"Number of backbone atoms in a motif must be %d\" % helice.atoms)\n helice_backbone_atoms = helice.atoms * helice.motifs\n self.helice = helice\n\n if not 0 <= (head_tail_defect_ratio) <= 1:\n raise ValueError(\n \"Defect ratio of head to head and tail to tail connections is\",\n head_tail_defect_ratio,\n \"and should be in the range of [0,1]\",\n )\n self.head_tail_defect_ratio = head_tail_defect_ratio\n\n self.unit_num_monomers = int(helice_backbone_atoms / monomer_backbone_atoms)\n if \"num_monomers\" not in kwargs:\n if infinite:\n if tacticity == \"atactic\" or head_tail_defect_ratio:\n num_monomers = 10 * self.unit_num_monomers\n elif helice_backbone_atoms > 2:\n num_monomers = self.unit_num_monomers\n else:\n num_monomers = 2\n\n if num_monomers < self.unit_num_monomers:\n raise ValueError(\n \"Number of monomers should be equal or larger than %d in order to generate Helice_%s chain.\\nCurrent \\\n number of monomers is %d\"\n % (self.unit_num_monomers, helice, num_monomers)\n )\n\n if infinite:\n if num_monomers % self.unit_num_monomers:\n raise ValueError(\n \"Number of monomers should be multiple of %d in order to generate infinite periodic Helice_%s \\\n chain.\\nCurrent number of monomers is %d\"\n % (self.unit_num_monomers, helice, num_monomers)\n )\n elif num_monomers * monomer_backbone_atoms < 3:\n raise ValueError(\n \"Number of backbone atoms should be more than 2 in order to create infinite periodic \\\n chain.\\nCurrent number of backbone atoms along the periodic chain is %d\\nPlease increate \\\n number of monomers.\"\n % (num_monomers * monomer_backbone_atoms)\n )\n self.num_monomers = num_monomers + 2 if infinite else num_monomers\n\n self.tacticity = tacticity\n if self.tacticity:\n if self.tacticity == \"N/A\":\n self.tacticity = \"\"\n elif self.tacticity not in [\"isotactic\", \"atactic\", \"syndiotactic\"]:\n raise TypeError(\n \"Unknown tacticity, please specify one of the following: isotactic, atactic and syndiotactic\"\n )\n elif not self.polymer_type.side_atom:\n raise ValueError(\"Please specify side_atom\")\n\n self.chiriality = chiriality\n if str(self.helice) in [\"2_1_1\", \"4_1_2\"]:\n self.torsion_seq = [180, 180, 180, 180]\n if self.chiriality:\n self.chiriality = \"\"\n print(\"Zig-zag conformation does not have chiriality\")\n elif str(self.helice) in [\"2_2_1\", \"4_2_2\"]:\n if self.chiriality == \"left\":\n self.torsion_seq = [300, 300, 300, 300]\n elif self.chiriality == \"right\":\n self.torsion_seq = [60, 60, 60, 60]\n else:\n raise ValueError(\"Please specify chiriality: left or right\")\n elif str(self.helice) in [\"2_3_1\", \"4_3_2\"]:\n if self.chiriality == \"left\":\n self.torsion_seq = [180, 300, 180, 300]\n elif self.chiriality == \"right\":\n self.torsion_seq = [60, 180, 60, 180]\n else:\n raise ValueError(\"Please specify chiriality: left or right\")\n elif str(self.helice) == \"4_1_1\":\n self.torsion_seq = [60, 180, 300, 180]\n if self.chiriality:\n self.chiriality = \"\"\n print(\"Helice_4_1_1 conformation does not have chiriality\")\n elif str(self.helice) == \"4_2_1\":\n if self.chiriality == \"left\":\n self.torsion_seq = [180, 180, 300, 300]\n elif self.chiriality == \"right\":\n self.torsion_seq = [60, 60, 180, 180]\n else:\n raise ValueError(\"Please specify chiriality: left or right\")\n elif str(self.helice) == \"4_3_1\":\n if self.chiriality == \"left\":\n if self.helice.sub_type:\n self.torsion_seq = [180, 300, 300, 300]\n else:\n self.torsion_seq = [180, 180, 180, 300]\n elif self.chiriality == \"right\":\n if self.helice.sub_type:\n self.torsion_seq = [60, 60, 60, 180]\n else:\n self.torsion_seq = [60, 180, 180, 180]\n else:\n raise ValueError(\"Please specify chiriality: left or right\")\n else:\n raise Exception(\"Helice_%s is currently not supported\" % self.helice)\n\n self.configs = configs\n self.infinite = infinite\n # self.pattern = 0\n self.monomers = []\n self.weights = {}", "def describe(self):\n branch = randint(0,62)\n \n if 0 <= branch <= 29: \n if self.casteOrder[0] == 'soldiers':\n if self.genesis == 'escape persecution': \n self.description = '{2}: A full service {3} for retired {1}'.format(choice(self.__class__.badjectives), choice(self.__class__.soldierSynonyms), self.name, choice(self.__class__.settlements[self.tech])) \n elif self.genesis == 'maintain control': \n self.description = 'The penal mining colony for {0} {1} on {2}'.format(choice(self.__class__.badjectives), choice(self.__class__.casteSynonyms[self.casteOrder[5]]), self.name, choice(self.__class__.settlements[self.tech])) \n elif self.genesis == 'explore the unknown':\n self.description = 'The frontier garrison and {2} {0} recently conquered {1}'.format(choice(self.__class__.techPreps[self.tech]), self.name, choice(self.__class__.settlements[self.tech]))\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'The militarized {3} and bonsai garden for the mandatory contemplation of {0} {1} {2}'.format(choice(self.__class__.seedIdeals), choice(self.__class__.techPreps[self.tech]), self.name, choice(self.__class__.settlements[self.tech]))\n elif self.genesis == 'spread the gospel':\n self.description = 'An outpost for crusaders who took up arms to defend {2} {3} on {0} {1}'.format(choice(self.__class__.techAdjectives[self.tech]), self.name, choice(self.__class__.badjectives), choice(self.__class__.priestSynonyms)) \n elif self.casteOrder[0] == 'scientists':\n if self.genesis == 'escape persecution': \n self.description = 'A sanctuary for {0} who challenged the dogma of powerful {1} {2} {3}'.format(choice(self.__class__.scientistSynonyms), choice(self.__class__.priestSynonyms), choice(self.__class__.techPreps[self.tech]), self.name) \n elif self.genesis == 'maintain control': \n self.description = '{0}, home to a coalition of {1} eugenicists and their {2} servants'.format(self.name, choice(self.__class__.scientistAdjectives), choice(self.__class__.casteAdjectives[self.casteOrder[5]]))\n elif self.genesis == 'explore the unknown':\n self.description = 'An Extremely Large Hadron Collider {0} {1} {2}'.format(choice(self.__class__.techPreps[self.tech]), choice(self.__class__.techAdjectives[self.tech]), self.name)\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'The research institute and {2} for abiogenesis on {0} {1}'.format(choice(self.__class__.techAdjectives[self.tech]), self.name, choice(self.__class__.settlements[self.tech]))\n elif self.genesis == 'spread the gospel':\n self.description = 'The Galactic Academy of Sciences founded by {0} {1} {2} {3}'.format(choice(self.__class__.scientistAdjectives), choice(self.__class__.scientistSynonyms), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.casteOrder[0] == 'laborers':\n if self.genesis == 'escape persecution':\n self.description = 'The {0} for unionized {1} {2} {3}'.format(choice(self.__class__.settlements[self.tech]), choice(self.__class__.laborerSynonyms), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'maintain control':\n self.description = '{0} {1}: divisional headquarters for the communist party in this region of the galaxy (where evidence of {2} has been redacted)'.format(choice(self.__class__.techAdjectives[self.tech]).capitalize(), self.name, choice(self.__class__.controlIdeals))\n elif self.genesis == 'explore the unknown':\n self.description = 'A team of robot {0} sent to survey {1} {2}'.format(choice(self.__class__.laborerSynonyms), choice(self.__class__.techAdjectives[self.tech]), self.name)\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'Drones sent to terraform {0} {1} and build {2}'.format(choice(self.__class__.techAdjectives[self.tech]), self.name, choice(self.__class__.seedPlaces))\n elif self.genesis == 'spread the gospel':\n self.description = 'The {0} {1} {2} where a hard day\\'s work is highly valued'.format(choice(self.__class__.settlements[self.tech]), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.casteOrder[0] == 'merchants':\n if self.genesis == 'escape persecution':\n self.description = '{0} {1}: a refuge for {2} who fled a communist revolution on their home planet'.format(choice(self.__class__.techAdjectives[self.tech]).capitalize(), self.name, choice(self.__class__.merchantSynonyms))\n elif self.genesis == 'maintain control':\n self.description = 'The monopolistic conglomerate of {0} {1} {2} {3}'.format(choice(self.__class__.merchantAdjectives), choice(self.__class__.merchantSynonyms), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'explore the unknown':\n self.description = '{3} {0}, home to a consortium of {1} seeking to monetize {2}'.format(self.name, choice(self.__class__.merchantSynonyms), choice(self.__class__.genesisIdeals[choice(self.__class__.genesisReasons)]), choice(self.__class__.techAdjectives[self.tech]).capitalize())\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'An oligarchy of wealthy {0} who recently opened a for-profit {1} on {2}'.format(choice(self.__class__.merchantSynonyms), choice(self.__class__.settlements[self.tech]), self.name)\n elif self.genesis == 'spread the gospel':\n self.description = 'An orbital printing press that rains down copies of {0} onto {1} {2}'.format(choice(self.__class__.merchantBooks), choice(self.__class__.techAdjectives[self.tech]), self.name)\n elif self.casteOrder[0] == 'artists':\n if self.genesis == 'escape persecution':\n self.description = 'The {0}\\' commune for the free and naked expression of {1} {2} {3}'.format(choice(self.__class__.artistSynonyms), choice(self.__class__.escapeIdeals), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'maintain control':\n self.description = 'The {0} with mandatory art classes on {1} {2}'.format(choice(self.__class__.settlements[self.tech]), choice(self.__class__.techAdjectives[self.tech]), self.name)\n elif self.genesis == 'explore the unknown':\n self.description = '{0} {1}, an observation deck where {2} observe naked and confined {3} to better understand the mysteries of {4}'.format(choice(self.__class__.techPreps[self.tech]).capitalize(), self.name, choice(self.__class__.artistSynonyms), choice(self.__class__.casteSynonyms[self.casteOrder[5]]), choice(self.__class__.genesisIdeals[choice(self.__class__.genesisReasons)]))\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'The {0} for {1} {2} who choose to better understand their craft through lovemaking on {3}'.format(choice(self.__class__.settlements[self.tech]), choice(self.__class__.seedAdjectives), choice(self.__class__.artistSynonyms), self.name)\n elif self.genesis == 'spread the gospel':\n self.description = 'A {0} on {1} that hosts an annual conference for pop {2}'.format(choice(self.__class__.settlements[self.tech]), self.name, choice(self.__class__.artistSynonyms))\n elif self.casteOrder[0] == 'priests':\n if self.genesis == 'escape persecution':\n self.description = '{3} {0}, home to an order of heretical {1} who reject the {2} doctrine of their people'.format(self.name, choice(self.__class__.priestSynonyms), choice(self.__class__.gospelAdjectives), choice(self.__class__.techAdjectives[self.tech]).capitalize())\n elif self.genesis == 'maintain control':\n self.description = 'The orthodox {0} for {1} {2} {3} {4}'.format(choice(self.__class__.gospelPlaces), choice(self.__class__.gospelAdjectives), choice(self.__class__.priestSynonyms), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'explore the unknown':\n self.description = '{0} {1}, a seminary for the metaphysical contemplation of {2}'.format(choice(self.__class__.techPreps[self.tech]).capitalize(), self.name, choice(self.__class__.gospelIdeals))\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'The marriage counseling clinic and devotional {2} {0} {1}'.format(choice(self.__class__.techPreps[self.tech]), self.name, choice(self.__class__.settlements[self.tech])) \n elif self.genesis == 'spread the gospel':\n self.description = 'A {0} mission on {1} {2} for converting {3} natives'.format(choice(self.__class__.gospelAdjectives), choice(self.__class__.techAdjectives[self.tech]), self.name, choice(self.__class__.badjectives))\n \n elif 30 <= branch <= 44:\n if self.tech == 'pre-industrial technology':\n if self.genesis == 'explore the unknown':\n self.description = '{0}, home to a historical reenactment society of {2} {1}'.format(self.name, choice(self.__class__.casteSynonyms[self.casteOrder[0]]), choice(self.__class__.badjectives))\n elif self.genesis == 'escape persecution':\n self.description = '{1}: a {2} of {0} Luddites'.format(choice(self.__class__.casteAdjectives[self.casteOrder[0]]), self.name, choice(self.__class__.preindustrialSettlements))\n elif self.genesis == 'maintain control':\n self.description = 'The {0} fiefdoms of {1}, where {2} serve their {3} liege lord'.format(choice(self.__class__.preindustrialAdjectives), self.name, choice(self.__class__.casteSynonyms[self.casteOrder[5]]), choice(self.__class__.casteAdjectives[self.casteOrder[0]]))\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'A harem of {0} {1} on medieval {2}'.format(choice(self.__class__.seedAdjectives), choice(self.__class__.casteSynonyms[self.casteOrder[0]]), self.name)\n elif self.genesis == 'spread the gospel':\n self.description = 'The Puritan {0} for evangelical {1} on {2}'.format(choice(self.__class__.preindustrialSettlements), choice(self.__class__.casteSynonyms[self.casteOrder[0]]), self.name)\n elif self.tech == 'machine technology':\n if self.genesis == 'explore the unknown':\n self.description = 'An array of radio telescopes outside the {0} on {1} {2}'.format(choice(self.__class__.machineSettlements), choice(self.__class__.machineAdjectives), self.name)\n elif self.genesis == 'escape persecution':\n self.description = 'Public housing for {0} {1} gentrified {2}'.format(choice(self.__class__.casteSynonyms[self.casteOrder[0]]), choice(self.__class__.machinePreps), self.name) \n elif self.genesis == 'maintain control':\n self.description = 'The juvenile detention center for {0} children who obsess over {1} on {2}'.format(choice(self.__class__.badjectives), choice(self.__class__.genesisIdeals[choice(self.__class__.genesisReasons)]), self.name)\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'Just a giant cruise ship full of {0} {1} on {2}'.format(choice(self.__class__.seedAdjectives), choice(self.__class__.casteSynonyms[self.casteOrder[0]]), self.name)\n elif self.genesis == 'spread the gospel':\n self.description = 'A megachurch run by {0} {1}, broadcasting live from {2}'.format(choice(self.__class__.priestAdjectives), choice(self.__class__.casteSynonyms[self.casteOrder[0]]), self.name)\n elif self.tech == 'ubiquitous technology':\n if self.genesis == 'explore the unknown':\n self.description = 'A quantum data center for simulating the lives of {0} {1} {2} {3}'.format(choice(self.__class__.casteAdjectives[self.casteOrder[0]]), choice(self.__class__.casteSynonyms[choice(self.__class__.castes)]), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'escape persecution':\n self.description = 'The cryogenics facility for the preservation of {0} {1} {2} {3}'.format(choice(self.__class__.badjectives), choice(self.__class__.casteSynonyms[self.casteOrder[0]]), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'maintain control':\n self.description = 'Orbiting {0}: a socially stratified {1} governed by {2}'.format(self.name, choice(self.__class__.ubiqitousSettlements), choice(self.__class__.casteSynonyms[self.casteOrder[0]]))\n elif self.genesis == 'seed the galaxy with life':\n self.description = '{0} {1}, home to an assembly plant for android {2}'.format(choice(self.__class__.ubiqitousAdjectives).capitalize(), self.name, choice(self.__class__.casteSynonyms[self.casteOrder[0]]))\n elif self.genesis == 'spread the gospel':\n self.description = '{0}, home to an anthropological society of {1} who covertly inject the theme of {2} into the folklore of other civilizations'.format(self.name, choice(self.__class__.casteSynonyms[self.casteOrder[0]]), choice(self.__class__.genesisIdeals[choice(self.__class__.genesisReasons)]))\n \n elif 45 <= branch <= 62:\n if self.casteOrder[0] == 'laborers':\n if self.tech == 'pre-industrial technology':\n self.description = 'A kibbutz for {0} {1} on {2}'.format(choice(self.__class__.genesisAdjectives[self.genesis]), choice(self.laborerSynonyms), self.name)\n elif self.tech == 'machine technology':\n self.description = 'The worker-owned and operated {1} of {2} {0}'.format(self.name, choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.machineAdjectives))\n elif self.tech == 'ubiquitous technology':\n self.description = '{0}, host to a psychic hivemind of {1} {2}'.format(self.name, choice(self.__class__.genesisAdjectives[self.genesis]), choice(self.__class__.laborerSynonyms))\n elif self.casteOrder[0] == 'artists':\n if self.tech == 'pre-industrial technology':\n self.description = 'The conservatory for neoclassical {0} who explore the theme of {1} on {2}'.format(choice(self.__class__.artistSynonyms), choice(self.__class__.genesisIdeals[self.genesis]), self.name)\n elif self.tech == 'machine technology':\n self.description = 'A series of art installations conceived by {1} at various {0} on {2}'.format(choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.artistSynonyms), self.name)\n elif self.tech == 'ubiquitous technology':\n self.description = '{0}: a collective of AI {1} who parody the human theme of {2}'.format(self.name, choice(self.__class__.artistSynonyms), choice(self.__class__.genesisIdeals[self.genesis]))\n elif self.casteOrder[0] == 'priests':\n if self.tech == 'pre-industrial technology':\n self.description = '{0} {1}, home to a scriptorium dedicated to the penning of meditations on {2}'.format(choice(self.__class__.preindustrialAdjectives).capitalize(), self.name, choice(self.__class__.genesisIdeals[self.genesis]))\n elif self.tech == 'machine technology':\n self.description = 'Polygamous cultists who live among the {0} of {1} {2}'.format(choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.machineAdjectives), self.name)\n elif self.tech == 'ubiquitous technology':\n self.description = 'A pyramidic burial chamber and monument to {0} {1} {2}'.format(choice(self.__class__.genesisIdeals[self.genesis]), choice(self.__class__.ubiquitousPreps), self.name)\n elif self.casteOrder[0] == 'scientists':\n if self.tech == 'pre-industrial technology':\n self.description = '{0}, home to a collective of {1} {2} who reproduce scientific experiments from pre-industrial Earth'.format(self.name, choice(self.genesisAdjectives[self.genesis]), choice(self.__class__.scientistSynonyms))\n elif self.tech == 'machine technology':\n self.description = 'A society of {0} who work simulated jobs at {1} on {3} to better understand {2}'.format(choice(self.__class__.scientistSynonyms), choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.genesisIdeals[self.genesis]), self.name)\n elif self.tech == 'ubiquitous technology':\n self.description = 'The postdoctoral program for {0} tenure-track {1} {2} {3}'.format(choice(self.__class__.genesisAdjectives[self.genesis]), choice(self.__class__.scientistSynonyms), choice(self.__class__.ubiquitousPreps), self.name)\n elif self.casteOrder[0] == 'soldiers':\n if self.tech == 'pre-industrial technology':\n self.description = '{0}, where {1} {2} study their martial arts in the quiet isolation of a {3}'.format(self.name, choice(self.__class__.genesisAdjectives[self.genesis]), choice(self.__class__.soldierSynonyms), choice(self.__class__.preindustrialSettlements))\n elif self.tech == 'machine technology':\n self.description = '{0}, where {1} {2} spar each other to overcome their frustrations surrounding {3}'.format(self.name, choice(self.__class__.badjectives), choice(self.__class__.soldierSynonyms), choice(self.__class__.genesisIdeals[self.genesis]))\n elif self.tech == 'ubiquitous technology':\n self.description = 'The highly militarized {0} {1} {2}'.format(choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.ubiquitousPreps), self.name)\n elif self.casteOrder[0] == 'merchants':\n if self.tech == 'pre-industrial technology':\n self.description = 'An antique fair on {0} where 1% of the proceeds are donated to the study of {1}'.format(self.name, choice(self.__class__.genesisIdeals[self.genesis]))\n elif self.tech == 'machine technology':\n self.description = 'A chain of retail {0} near the {1} on {2}'.format(choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.machineSettlements), self.name)\n elif self.tech == 'ubiquitous technology':\n self.description = 'A pay-by-the-hour computer simulation {0} {1} where patrons can experience {2}'.format(choice(self.__class__.ubiquitousPreps), self.name, choice(self.__class__.genesisIdeals[self.genesis]))", "def main(codelabel):\n try:\n code = Code.get_from_string(codelabel)\n except NotExistent:\n print(\"The code '{}' does not exist\".format(codelabel))\n sys.exit(1)\n\n print(\"Testing CP2K ENERGY on H2 (DFT) without StructureData...\")\n\n # parameters\n parameters = Dict(\n dict={\n 'FORCE_EVAL': {\n 'METHOD': 'Quickstep',\n 'DFT': {\n 'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',\n 'QS': {\n 'EPS_DEFAULT': 1.0e-12,\n 'WF_INTERPOLATION': 'ps',\n 'EXTRAPOLATION_ORDER': 3,\n },\n 'MGRID': {\n 'NGRIDS': 4,\n 'CUTOFF': 280,\n 'REL_CUTOFF': 30,\n },\n 'XC': {\n 'XC_FUNCTIONAL': {\n '_': 'LDA',\n },\n },\n 'POISSON': {\n 'PERIODIC': 'none',\n 'PSOLVER': 'MT',\n },\n },\n 'SUBSYS': {\n # structure directly included in parameters\n 'CELL': {\n 'ABC': '4.0 4.0 4.75'\n },\n 'COORD': {\n ' ': ['H 2.0 2.0 2.737166', 'H 2.0 2.0 2.000000']\n },\n 'KIND': [\n {\n '_': 'O',\n 'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',\n 'POTENTIAL': 'GTH-LDA-q6'\n },\n {\n '_': 'H',\n 'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',\n 'POTENTIAL': 'GTH-LDA-q1'\n },\n ],\n },\n }\n })\n\n # resources\n options = {\n \"resources\": {\n \"num_machines\": 1,\n \"num_mpiprocs_per_machine\": 1,\n },\n \"max_wallclock_seconds\": 1 * 3 * 60,\n }\n\n inputs = {'parameters': parameters, 'code': code, 'metadata': {'options': options,}}\n\n print(\"submitted calculation...\")\n calc = run(Cp2kCalculation, **inputs)\n\n # check energy\n expected_energy = -1.14005678487\n if abs(calc['output_parameters'].dict.energy - expected_energy) < 1e-10:\n print(\"OK, energy has the expected value\")\n else:\n print(\"ERROR!\")\n print(\"Expected energy value: {}\".format(expected_energy))\n print(\"Actual energy value: {}\".format(calc['output_parameters'].dict.energy))\n sys.exit(3)\n\n sys.exit(0)", "def load_label(self, pr):\n return", "def get_step_label_at_index(self, index):\n return self[index][1]", "def get_label(genotype_type):\n if genotype_type == \"Hom\":\n return 0\n elif genotype_type == \"Het\":\n return 1\n elif genotype_type == \"Hom_alt\":\n return 2", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"label\")", "def t_labeling_cohort(**kargs): \n def read_ids(fname): \n assert fname.find('.id') > 0\n fp = os.path.join(basedir, fname)\n assert os.path.exists(fp), 'Invalid input: %s' % fp\n df_id = pd.read_csv(fp, sep=sep, header=0, index_col=False, error_bad_lines=True)\n return df_id['person_id'].values\n def seq_to_str(seqx, sep=','): \n return [sep.join(str(s) for s in seq) for seq in seqx] \n def str_to_seq(df, col='sequence', sep=','):\n seqx = []\n for seqstr in df[col].values: \n s = seqstr.split(sep)\n seqx.append(s)\n return seqx\n def to_str(tokens, sep='+'): \n return sep.join([str(tok) for tok in tokens])\n\n # import labeling\n import seqReader as sr\n\n ### CKD cohort \n # basedir = sys_config.read('DataIn') # data-in simlink to data ... 10.17 \n # 'data-in' is reserved for input data not generated from within the system \n basedir = sys_config.read('DataExpRoot')\n \n # cohort attributes\n cohort_name = 'CKD'\n fname = 'eMerge_NKF_Stage_20170818.csv' \n header = ['patientId', 'Case_Control_Unknown_Status', 'NKF_Stage', ]\n sep = ','\n\n fpath = os.path.join(basedir, fname)\n df = pd.read_csv(fpath, sep=sep, header=0, index_col=False, error_bad_lines=True)\n\n # use stages as labels \n labelset = list(df['NKF_Stage'].unique())\n print('info> cohort: %s | labels (n=%d):\\n%s\\n' % (cohort_name, len(labelset), labelset))\n # [log] 7 labels\n\n labels = df['NKF_Stage']\n\n # only read documents with data \n idx = person_ids = getPersonIDs(cohort=cohort_name, inputdir=basedir, sep=sep) # cohort, inputdir, sep, sep_compo\n seqparams.TDoc.verifyIDs(idx) # ascending order? duplicates? \n\n n_persons = len(idx)\n print('info> n_persons: %d' % n_persons)\n\n ### find labels\n # don't use the data source's ordering of IDs, which when made via seqMaker2.py was sorted\n # ERROR: labels = df.loc[df['patientId'].isin(idx)]['NKF_Stage'].values\n \n sort_keys = ['patientId', ]\n # df_test1 = df.sort_values(sort_keys, ascending=True)\n # l = df['patientId'].values\n # assert all(l[i] <= l[i+1] for i in xrange(len(l)-1)) # passed\n # assert all(l == idx) # passed\n\n # filter > sort > extract (good!)\n # output np.array\n # labels = labels_ref = labelDocByDataFrame(, person_ids=idx, id_field='patientId', label_field='NKF_Stage')\n labels = labels_ref = labelDocByFile(fpath, person_ids=idx, id_field='patientId', label_field='NKF_Stage')\n\n # n_labels = len(labels)\n # print('info> Got %d labels' % n_labels)\n\n # [test] verify the ID and labels\n # print('status> verifying the match between IDs and labels')\n # for i, (r, row) in enumerate(df_test1.iterrows()): # sorted entries\n # pid, label = row['patientId'], row['NKF_Stage']\n # if pid in idx: \n # assert label == labels[i], \"%d-th label: %s <> %s\" % (i, label, labels[i])\n ## [conclusion] the label ordering via df_test1 does not agree!!! \n\n # extract labels according to the ID ordering\n # sampleIDs = random.sample(range(n_persons), 50)\n # labels = []\n # for pid in idx: \n # row = df.loc[df['patientId']==pid] # row is a dataframe\n # assert row.shape[0] == 1, 'Found dups: id=%s => %s' % (pid, row)\n # l = list(row['NKF_Stage'].values)\n # labels.extend(l)\n # assert len(labels) == len(labels_ref) == len(idx) # passed\n # assert all(labels_ref == labels), \"ordering inconsistency:\\n%s\\n VS \\n%s\\n\" % (labels_ref[:50], labels[:50]) # passed\n\n n_labels = len(labels)\n print('info> verified %d labels' % n_labels)\n\n # double check with structured version of the sequences produced by seqMaker2 (header: person_id, sequence, timestamp)\n # tfile = 'condition_drug_timed_seq-%s.csv' % cohort_name # test file\n # fpath2 = os.path.join(basedir, tfile)\n # # if os.path.exists(fpath2): \n # dft = pd.read_csv(fpath2, sep='|', header=0, index_col=False, error_bad_lines=True)\n # print('info> from timed_seq .csv | n_persons: %d =?= n_labels: %d' % (dft.shape[0], n_labels)) # n_persons: 2833 =?= n_labels: 2833\n\n ### Read Sequences\n\n print('info> 1. CSeq from .csv')\n ret = readDocFromCSV(cohort=cohort_name, inputdir=basedir)\n print('info> making structured format of the coding sequences (cohort:%s, n_labels:%d)' % (cohort_name, n_labels))\n # df = readToCSV(cohort=cohort_name, labels=labels)\n \n seqx = ret['sequence'] # list(dft['sequence'].values)\n tseqx = ret.get('timestamp', []) # list(dft['timestamp'].values)\n if tseqx: \n assert len(seqx) == len(tseqx), \"len(seqx)=%d, len(times)=%d\" % (len(seqx), len(tseqx))\n\n print('info> 2. CSeq from .dat')\n seqx2, tseqx2 = readDoc(cohort=cohort_name, inputdir=basedir, include_timestamps=True) # ifiles\n\n # can then create .csv via readDocToCSV() # [params] cohort, basedir, ifiles, labels\n\n if tseqx2: \n assert len(seqx2) == len(tseqx2), \"len(seqx)=%d, len(times)=%d\" % (len(seqx2), len(tseqx2))\n n_docs, n_docs2 = len(seqx), len(seqx2)\n \n # print('info> read %d from .dat =?= %d from .csv' % (n_docs2, n_docs))\n assert n_docs == n_docs2, \".dat and .csv formats are not consistent n_doc: %d (csv) <> %d (dat)\" % (n_docs, n_docs2)\n\n # when did they diverge? \n # n_matched = 0\n # for i, seq in enumerate(seqx): \n # s1 = seq # list of tokens\n\n # try: \n # s2 = seqx2[i] # list of tokens\n # except: \n # s2 = []\n\n # if s1 == s2: \n # n_matched += 1 \n # else: \n # msg = \".csv not consistent with .dat (n_matched=%d)=>\\n%s\\nVS\\n%s\\n\" % (n_matched, s1, s2)\n # raise ValueError, msg \n\n n_docs_src = df.shape[0]\n assert n_docs == n_labels, \"n_labels: %d <> n_docs: %d ...\" % (n_labels, n_docs)\n\n print('input> n_doc_src (cohort source document): %d, n_doc (parsed, has data in DB): %d' % (n_docs_src, n_docs))\n\n print('info> writing labels to .csv')\n df2 = sr.readDocToCSV(cohort=cohort_name, labels=labels)\n print('info> created .csv format with columns:\\n%s\\n' % df2.columns.values)\n n_docs3 = df2.shape[0]\n assert n_docs3 == n_docs\n\n return", "def print_common_hub_words(rem_stop_words):\n results = {'_removing stop-words':rem_stop_words}\n\n print '------ CLASSIFICATION EVALUATION --------'\n print '> Reading cases..'\n descriptions_path = '../data/tasa/TASA900_dependencies'\n texts, labels = data.read_files(descriptions_path)\n\n print '> Creating representations..'\n fd = nltk.probability.FreqDist()\n for i, text in enumerate(texts):\n if i%100==0: print ' ',str(i)+'/'+str(len(texts))\n g = graph_representation.construct_dependency_network(text, remove_stop_words=rem_stop_words)\n hubs = graph.get_hubs(g, 10)\n for h in hubs:\n fd.inc(h[0])\n g = None # just to make sure..\n\n results['tasa'] = fd.keys()\n\n print '------ RETRIEVAL EVALUATION --------'\n print '> Reading cases..'\n descriptions_path = '../data/air/problem_descriptions_dependencies'\n description_texts, labels = data.read_files(descriptions_path)\n\n print '> Creating representations..'\n fd = nltk.probability.FreqDist()\n for i, text in enumerate(description_texts):\n if i%100==0: print ' ',str(i)+'/'+str(len(description_texts))\n g = graph_representation.construct_dependency_network(text, remove_stop_words=rem_stop_words)\n hubs = graph.get_hubs(g, 10)\n for h in hubs:\n fd.inc(h[0])\n g = None # just to make sure..\n\n results['air'] = fd.keys()\n\n if rem_stop_words:\n modifier = 'without'\n else:\n modifier = 'with'\n data.pickle_to_file(results, 'output/dependencies/common_hubs_'+modifier+'stop_words')\n\n pp.pprint(results)\n return results", "def get_parent_struc (chain, added_chains_dict):\n if \"-\" in chain.id:\n structure = added_chains_dict[chain.id]\n else:\n structure = chain.get_parent().get_parent()\n return structure", "def get_label(settings):", "def _generate_pipeline_labels(self, job):\n jobname = self._get_jobname(job)\n labels = {\"name\": jobname, \"app\": \"snakemake\"}\n return labels", "def plugin_second_label():\n return \"second\"", "def main(structure, label, return_type):\n sequence = get_sequence(structure=structure, label=label,\n return_type=return_type)\n if sequence is not None:\n click.echo(sequence)", "def __init__(self):\n self.label = \"Grand WOFE\"\n self.description = \"From list of Evidence layers generate weights tables and output rasters from Calculate Respons and Logistic Regression.\"\n self.canRunInBackground = False\n self.category = \"Weights of Evidence\"", "def get_chain(self, chain_id):\n if self.default_model is None:\n return None\n if self.default_model.chain_dict.has_key(chain_id):\n return self.default_model.chain_dict[chain_id]\n return None", "def pred_wine_type():\n \n Alcohol\t=request.args.get('Alcohol')\n Malic=request.args.get('Malic')\n Ash=request.args.get('Ash')\n Alcalinity=request.args.get('Alcalinity')\n Magnesium=request.args.get('Magnesium')\n Phenols=request.args.get('Phenols')\n Flavanoids=request.args.get('Flavanoids')\n Nonflavanoids=request.args.get('Nonflavanoids')\n Proanthocyanins=request.args.get('Proanthocyanins')\n Color=request.args.get('Color')\n Hue=request.args.get('Hue')\n Dilution=request.args.get('Dilution')\n Proline=request.args.get('Proline')\n prediction=classifier.predict([[Alcohol,Malic,Ash,Alcalinity,Magnesium,Phenols,Flavanoids,Nonflavanoids,Proanthocyanins,Color,Hue,Dilution,Proline]])\n return 'The predicted value is'+str(prediction)", "def chain(self, chain_id, model_num = 0):\n return self.struct[model_num][chain_id]", "def default_chain(self):\n self.name = \"Default Chain Mail Armor\"\n self.rarity = \"Common\"\n self.pdef_value = 15\n self.mdef_value = 2\n self.increase_crit = 0\n self.desc = \"A rusty piece of chain mail, old and discoloured\"", "def get_step_label_at_index(self, index):\n return self.routine_template.get_step_label_at_index(index)", "def get_clarifications_winogrande(ex, nlp, comet_model):\n personx, persony = ex['option1'], ex['option2']\n\n # Only extract relations for people\n if personx[0] != personx[0].upper() or persony[0] != persony[0].upper():\n return []\n\n input_event = ex[\"sentence\"]\n outputs = {category: comet_model.predict(input_event, category, num_beams=5) for category in comet_model.categories}\n\n curr_events = []\n for category, prefix in CATEGORY_TO_PREFIX.items():\n for out_event in outputs[category]:\n if out_event != \"none\" and out_event != \"\":\n if not out_event.lower().startswith(\"person\") and not out_event.lower().startswith(\"other\"):\n out_event = \" \".join((prefix, out_event))\n\n out_event = re.sub(\"personx\", personx, out_event, flags=re.I)\n out_event = re.sub(\"person x\", personx, out_event, flags=re.I)\n out_event = re.sub(\"persony\", persony, out_event, flags=re.I)\n out_event = re.sub(\"person y\", persony, out_event, flags=re.I)\n\n question = CATEGORY_TO_QUESTION[category].replace(\"PersonX\", personx)\n curr_events.append((question, out_event))\n\n return curr_events", "def test_get_nested_attribute(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.behaviours.dummy.class_name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"DummyBehaviour\\n\"", "def q_getdifficulty(abe, page, chain):\n if chain is None:\n return 'Shows the difficulty of the last block in CHAIN.\\n' \\\n '/chain/CHAIN/q/getdifficulty\\n'\n target = abe.store.get_target(chain.id)\n return \"\" if target is None else util.target_to_difficulty(target)", "def label_rule_for_company(text: str) -> str:\n match = re.search(LABEL_SPECIFICATION[\"RE_COMPANY_PRIMARY\"], text)\n if not match:\n match = re.search(LABEL_SPECIFICATION[\"RE_COMPANY_SECONDARY\"], text)\n if match:\n text = match.group(\"label\").strip()\n if len(text) > 1:\n return text\n else:\n return \"\"\n return \"\"", "def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name", "def label(self, cfg):\n rep = \"\"\n nl = \"\"\n for node in cfg.nodes:\n rep += nl + \"{}\\tgen={}\\tkill={}\\tout={}\".format(\n node, \n set(self.gen.get(node)),\n set(self.kill.get(node)),\n set(self.out.get(node)))\n nl = \"\\n\"\n return rep", "def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"", "def test_label_callback():\n release_numbers = dict(a='123')\n data = dict(revision='a', attributes=dict(b='c'))\n data2 = dict(revision='b', attributes=dict(d='e'))\n\n assert _label_callback(data, release_numbers) == u'a\\n- Release: 123\\n- b: c'\n assert _label_callback(data2) == u'b\\n- Release: Unknown\\n- d: e'", "def get_label():\n inp = option_text('Input label name (leave blank for no label):')\n add_to_collected('label', inp)\n OPTIONS['label'] = inp\n return", "def _GenerateConfLine(self, term, precedence=None):\n if term.term.qos in self.qos_value_map:\n target = []\n qos_value = self.qos_value_map[term.term.qos]\n target.append('match protocol ip src-ip any src-port any')\n target.append('dst-ip any dst-port any application')\n target.append(term.term.name)\n if precedence:\n target.append('dscp %s set traffic-class 1' %\n self.precedence_value_map[precedence])\n else:\n target.append('dscp any set traffic-class 1')\n target.append('lan-qos-dscp %s wan-qos-dscp %s\\n\\n' %\n (qos_value, qos_value))\n return ' '.join(target)\n return ''", "def get_clarifications_copa(ex, nlp, comet_model):\n category_to_prefix_causes = {\"xIntent\": CATEGORY_TO_PREFIX[\"xIntent\"],\n \"xNeed\": CATEGORY_TO_PREFIX[\"xNeed\"]}\n\n category_to_prefix_effects = CATEGORY_TO_PREFIX.copy()\n category_to_prefix_effects.pop(\"xIntent\")\n category_to_prefix_effects.pop(\"xNeed\")\n category_to_prefix_effects.pop(\"xAttr\")\n\n input_event = ex[\"premise\"]\n personx, is_named_entity = get_personx(nlp, input_event)\n\n if personx == \"\":\n return []\n\n personx = personx if (is_named_entity or personx == \"I\") else personx.lower()\n outputs = {category: comet_model.predict(input_event, category, num_beams=5) for category in comet_model.categories}\n\n if ex[\"question\"] == \"cause\":\n category_to_prefix = category_to_prefix_causes\n else:\n category_to_prefix = category_to_prefix_effects\n\n curr_events = []\n for category, prefix in category_to_prefix.items():\n for out_event in outputs[category]:\n if out_event != \"none\" and out_event != \"\":\n if not out_event.lower().startswith(\"person\") and not out_event.lower().startswith(\"other\"):\n out_event = \" \".join((prefix, out_event))\n\n out_event = re.sub(\"personx\", personx, out_event, flags=re.I)\n out_event = re.sub(\"person x\", personx, out_event, flags=re.I)\n out_event = re.sub(\"persony\", \"others\", out_event, flags=re.I)\n out_event = re.sub(\"person y\", \"others\", out_event, flags=re.I)\n\n question = CATEGORY_TO_QUESTION[category].replace(\"PersonX\", personx)\n curr_events.append((question, out_event))\n\n return curr_events", "def make_text(chain_dict):\n\n# This selects the first random key to begin the chain.\n \n random_key = random.choice(chain_dict.keys())\n\n# This creates a random text list to append a random value from the original random key.\n# The random key is then reassigned to a tuple containing the second key in the random keys tuple,\n# and adds the randomly generated value of the earlier random key tuple as the second tuple value. \n# Convert tuples in random_key to strings, then added all strings together, then added to empty list\n\n first_words = ' '.join(map(str,random_key)) \n random_text_list = [first_words]\n \n for i in range(1,100):\n #while random_key in chain_dict: \n next = random.choice(chain_dict[random_key]) \n random_text_list.append(next)\n random_key = (random_key[1],next)\n\n pretty_text = []\n pretty_text = ' '.join(map(str, random_text_list)) \n return pretty_text", "def label_rule_for_others(text: str, label_type: str) -> str:\n match = re.search(LABEL_SPECIFICATION[f\"RE_{label_type.upper()}\"], text)\n if match:\n return match.group(\"label\").strip()\n return \"\"", "def main(argv):\n global fileName, hyp, run_type, rep_type, n_processor, output_dir # Used by both master and slave processes\n fileName = args.outPrefix\n hyp_default = args.default\n hyp_adjust = args.hyperparam\n output_dir = args.outPrefix\n n_processor = args.num_worker\n run_type = args.type1\n rep_type = args.type2\n\n hyp = loadHyp(pFileName=hyp_default)\n updateHyp(hyp,hyp_adjust)\n print(hyp)\n\n master()", "def get_label(urs):\n return assign_term(urs)[1]", "def gen_hts_lab_mono(self):\n chosen_fields_lst = ['start', 'stop', 'phone']\n\n self._hts_lab_mono_ttpl = \\\n lst.transp_ttpl(\n tuple(\n tuple(self._proc_ldic[field]) for field in chosen_fields_lst\n )\n )\n\n self._hts_lab_mono_ttpl = tuple(tuple([\"{:>10}\".format(row[0]), \"{:>10}\".format(row[1]), row[2]])\n for row in self._hts_lab_mono_ttpl)\n\n self.hts_lab_mono_prn = tuple(' '.join(l) for l in self._hts_lab_mono_ttpl)", "def get_label_from_analysis_model(analysis_model, id_hash):\n\n root = os.path.basename(os.path.dirname(analysis_model.compilation)).replace(\"_\", \" \")\n output = analysis_model.output_directory.replace(\"_\", \" \") if analysis_model.output_directory else \"analysis\"\n return \"{0} -> {1}, ({2})\".format(root, output, id_hash[-6:])", "def main( argv = None ):\n\n if argv == None: argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser( version = \"%prog version: $Id$\", \n usage = globals()[\"__doc__\"] )\n\n parser.add_option(\"--category\", dest=\"category\", type=\"choice\",\n choices = (\"B\", \"C\"), help=\"supply help\" )\n\n ## add common options (-h/--help, ...) and parse command line \n (options, args) = E.Start( parser, argv = argv )\n\n data = getData(options.stdin)\n if options.category == \"B\":\n options.stdout.write(\"Category B pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in b2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n\n elif options.category == \"C\":\n options.stdout.write(\"Category C pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in c2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n else:\n raise ValueError(\"must specify the category of pathway\")\n\n\n ## write footer and output benchmark information.\n E.Stop()", "def __str__(self):\n return self.piece_behavior.summary", "def get_labels(self):\n if self.option == \"term\":\n return ['platform characteristics', 'atmospheric winds', 'radio wave','weather events', 'geomagnetism', 'atmospheric electricity','microwave', 'atmospheric temperature', 'atmospheric water vapor','atmospheric pressure', 'aerosols', 'atmospheric radiation','atmospheric chemistry', 'precipitation', 'sensor characteristics','radar', 'infrared wavelengths', 'visible wavelengths','weather/climate advisories', 'clouds', 'lidar', 'ocean optics','ultraviolet wavelengths', 'cryospheric indicators','land use/land cover', 'topography', 'surface thermal properties','spectral/engineering', 'soils', 'snow/ice', 'geothermal dynamics','natural hazards', 'surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics']\n elif self.option == \"mostdepth\":\n return ['flight data logs', 'turbulence', 'radio wave flux', 'lightning', 'magnetic field', 'atmospheric conductivity', 'electric field', 'data synchronization time', 'brightness temperature', 'vertical profiles', 'water vapor profiles', 'air temperature', 'upper level winds', 'atmospheric pressure measurements', 'upper air temperature', 'humidity', 'dew point temperature', 'aerosol particle properties', 'emissivity', 'trace gases/trace species', 'liquid precipitation', 'cloud liquid water/ice', 'microwave radiance', 'sensor counts', 'total pressure', 'airspeed/ground speed', 'total temperature', 'static pressure', 'wind speed', 'wind direction', 'radar reflectivity', 'doppler velocity', 'infrared imagery', 'visible imagery', 'water vapor', 'vertical wind velocity/speed', 'aerosol backscatter', 'weather forecast', 'tropical cyclones', 'visible radiance', 'infrared radiance', 'total precipitable water', 'boundary layer temperature', 'atmospheric temperature indices', 'cloud height', 'flight level winds', 'cloud droplet distribution', 'cloud droplet concentration/size', 'cloud condensation nuclei', 'cloud microphysics', 'hydrometeors', 'ozone', 'wind profiles', 'cloud base temperature', 'cloud base height', 'liquid water equivalent', 'solar radiation', 'planetary boundary layer height', 'surface winds', 'precipitation amount', 'precipitation rate', 'surface pressure', 'rain', 'cloud optical depth/thickness', 'aerosol extinction', 'aerosol optical depth/thickness', 'cirrus cloud systems', 'lidar depolarization ratio', 'radar backscatter', 'radar cross-section', 'return power', 'mean radial velocity', 'radiance', 'air quality', 'climate advisories', 'atmospheric emitted radiation', 'optical depth/thickness', 'surface temperature', 'ultraviolet flux', 'spectrum width', 'microwave imagery', 'lidar backscatter', 'relative humidity', 'u/v wind components', 'wind speed/wind direction', 'radar imagery', 'snow depth', 'land use/land cover classification', 'digital elevation/terrain model (dem)', 'snow', 'droplet size', 'droplet concentration/size', 'drizzle', 'precipitation anomalies', 'snow water equivalent', 'solid precipitation', 'total surface precipitation rate', 'particle size distribution', 'skin temperature', 'attitude characteristics', 'land surface temperature', 'hail', 'reflectance', 'soil moisture/water content', 'soil temperature', 'soil bulk density', 'surface roughness', 'present weather', 'snow density', 'ambient temperature', 'aerosol forward scatter', 'floods', 'snow cover', 'sigma naught', 'precipitable water', 'stage height', 'rivers/streams', 'shortwave radiation', 'photosynthetically active radiation', 'longwave radiation', 'net radiation', 'hourly precipitation amount', '24 hour precipitation amount', 'soil moisture', 'satellite orbits/revolution', 'sea surface temperature', 'heat flux', 'latent heat flux', 'cloud fraction', '3 and 6 hour precipitation amount', 'geopotential height', 'particulate matter', 'particle images', 'water vapor indices', 'horizontal wind velocity/speed', 'electrical conductivity', 'dissolved carbon dioxide', 'hurricanes', 'tropical cyclone track', 'convective clouds/systems (observed/analyzed)', 'cloud top height', 'viewing geometry', 'temperature profiles', 'vertical wind shear', 'wind shear', 'carbon monoxide', 'sea level pressure', 'water vapor tendency', 'potential temperature', 'angstrom exponent', 'ultraviolet radiation', 'solar irradiance', 'scattering', 'absorption', 'water vapor mixing ratio profiles', 'sea surface temperature indices', 'extreme eastern tropical pacific sst', 'sedimentation', 'erosion', 'sediment transport', 'sediments', 'tropopause', 'ocean chemistry', 'ocean optics', 'ocean temperature', 'salinity/density', 'pigments', 'ocean color', 'attenuation/transmission', 'inorganic carbon', 'organic carbon', 'photosynthetically available radiation', 'chlorophyll', 'optical depth', 'fluorescence', 'vegetation index', 'gelbstoff', 'phytoplankton', 'vegetation index2', 'cloud precipitable water', 'landscape ecology', 'ultraviolet radiance', 'cloud ceiling', 'aerosol radiance', 'carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles', 'organic particles', 'sulfate particles', 'radiative flux', 'transmittance', 'atmospheric stability', 'cloud asymmetry', 'cloud frequency', 'cloud top pressure', 'cloud top temperature', 'cloud vertical distribution', 'cloud emissivity', 'cloud radiative forcing', 'cloud reflectance', 'rain storms', 'reflected infrared', 'thermal infrared', 'incoming solar radiation', 'clouds', 'cloud properties', 'cloud types', 'orbital characteristics', 'sensor characteristics', 'maximum/minimum temperature', 'condensation', 'platform characteristics', 'geolocation', 'geodetics', 'coordinate reference system', 'aerosols', 'topographical relief maps', 'terrain elevation', 'normalized difference vegetation index (ndvi)', 'infrared flux', 'visible flux', 'albedo', 'land use/land cover', 'topography', 'lidar', 'lidar waveform', 'plant phenology', 'vegetation cover', 'crop/plant yields', 'land use classes', 'landscape patterns', 'forest harvesting and engineering', 'forest management', 'total surface water', 'agricultural plant science', 'photosynthesis', 'primary production', 'leaf characteristics', 'evapotranspiration', 'fire occurrence', 'surface thermal properties', 'canopy characteristics', 'evergreen vegetation', 'crown', 'deciduous vegetation', 'anisotropy', 'fire ecology', 'biomass burning', 'wildfires', 'topographical relief', 'burned area', 'surface radiative properties', 'environmental sustainability', 'boundaries', 'anthropogenic/human influenced ecosystems', 'emissions', 'sulfur dioxide', 'population', 'infrastructure', 'environmental assessments', 'public health', 'conservation', 'agriculture production', 'administrative divisions', 'economic resources', 'socioeconomics', 'lake/pond', 'rivers/stream', 'political divisions', 'environmental vulnerability index (evi)', 'ecosystems', 'urban areas', 'sustainability', 'treaty agreements/results', 'human settlements', 'population estimates', 'nitrogen dioxide', 'cropland', 'pasture', 'particulates', 'cyclones', 'mortality', 'environmental impacts', 'droughts', 'earthquakes', 'population distribution', 'fertilizers', 'animal manure and waste', 'urbanization/urban sprawl', 'landslides', 'avalanche', 'urban lands', 'mangroves', 'volcanic eruptions', 'pesticides', 'population size', 'population density', 'lakes/reservoirs', 'surface water', 'rural areas', 'infant mortality rates', 'amphibians', 'mammals', 'carbon', 'sulfur oxides', 'methane', 'non-methane hydrocarbons/volatile organic compounds', 'nitrogen oxides', 'natural gas', 'coal', 'coastal elevation', 'biodiversity functions', 'nuclear radiation exposure', 'radiation exposure', 'poverty levels', 'malnutrition', 'wetlands', 'sea level rise', 'vulnerability levels/index', 'ground water', 'snow/ice', 'electricity', 'energy production/use', 'sustainable development', 'deforestation', 'household income', 'discharge/flow', 'hydropattern', 'nitrogen', 'phosphorus', 'carbon dioxide', 'alpine/tundra', 'forests', 'vegetation', 'permafrost', 'nutrients', 'plant characteristics', 'leaf area index (lai)', 'soil gas/air', 'ammonia', 'nitrous oxide', 'ecosystem functions', 'litter characteristics', 'soil chemistry', 'soil respiration', 'active layer', 'soil depth', 'cation exchange capacity', 'organic matter', 'soil porosity', 'soil texture', 'permafrost melt', 'land subsidence', 'freeze/thaw', 'surface water features', 'chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride', 'molecular hydrogen', 'sulfur compounds', 'fire models', 'biomass', 'dominant species', 'vegetation species', 'sulfur', 'tree rings', 'soil classification', 'heat index', 'sea ice concentration', 'ocean heat budget', 'reforestation', 'even-toed ungulates', 'species recruitment', 'population dynamics', 'range changes', 'topographic effects', 'land resources', 'river ice depth/extent', 'snow melt', 'river ice', 'animal commodities', 'animal ecology and behavior', 'phenological changes', 'water depth', 'inundation', 'forest fire science', 'biogeochemical cycles', 'radiative forcing', 'soil heat budget', 'drainage', 'respiration rate', 'river/lake ice breakup', 'river/lake ice freeze', 'reclamation/revegetation/restoration', 'permafrost temperature', 'indigenous/native species', 'fire dynamics', 'lichens', 'plants', 'plant succession', 'carbon flux', 'coastal', 'salt marsh', 'degradation', 'altitude', 'carbon and hydrocarbon compounds', 'halocarbons and halogens', 'forest composition/vegetation structure', 'water vapor indicators', 'barometric altitude', 'atmospheric water vapor', 'terrestrial ecosystems', 'volatile organic compounds', 'boundary layer winds', 'forest fire danger index', 'periglacial processes', 'landscape processes', 'evaporation', 'soil horizons/profile', 'shrubland/scrub', 'soil ph', 'soils', 'soil water holding capacity', 'community structure', 'pingo', 'soil color', 'virtual temperature', 'formaldehyde', 'hydroxyl', 'photolysis rates', 'cloud dynamics', 'nitric oxide', 'molecular oxygen', 'smog', 'peroxyacyl nitrate', 'hydrogen compounds', 'nitrogen compounds', 'oxygen compounds', 'stable isotopes', 'chemical composition', 'actinic flux', 'tropospheric ozone', 'fossil fuel burning', 'industrial emissions', 'denitrification rate', 'sunshine', 'runoff', 'soil structure', 'mosses/hornworts/liverworts', 'peatlands', 'hydraulic conductivity', 'snow/ice temperature', 'vegetation water content', 'discharge', 'chlorophyll concentrations', 'outgoing longwave radiation', 'geomorphic landforms/processes', 'soil compaction', 'soil impedance', 'canopy transmittance', 'water table', 'decomposition', 'water temperature', 'dissolved gases', 'total dissolved solids', 'agricultural expansion', 'forest science', 'pressure tendency', 'visibility', 'biomass dynamics', 'agricultural lands', 'grasslands', 'savannas', 'grazing dynamics/plant herbivory', 'herbivory', 'paleoclimate reconstructions', 'drought indices', 'fire weather index', 'animal yields', 'multivariate enso index', 'dissolved solids', 'ocean currents', 'salinity', 'coastal processes', 'atmospheric pressure', 'afforestation/reforestation', 'fresh water river discharge', 'surface water chemistry', 'drainage basins', 'resource development site', 'dunes', 'flood plain', 'endangered species', 'precipitation indices', 'temperature indices', 'forest yields', 'stratigraphic sequence', 'freeze/frost', 'frost', 'hydrogen cyanide', 'land management', 'nutrient cycling', 'industrialization', 'suspended solids', 'deserts', 'weathering', 'gas flaring', 'atmospheric temperature', 'ice extent', 'fraction of absorbed photosynthetically active radiation (fapar)', 'marshes', 'swamps', 'lake ice', 'atmospheric winds', 'watershed characteristics', 'transportation', 'soil rooting depth', 'isotopes', 'cultural features', 'consumer behavior', 'boundary surveys', 'aquifers', 'land productivity', 'water quality/water chemistry', 'sediment composition', 'dissolved oxygen', 'surface water processes/measurements', 'turbidity', 'conductivity', 'ph', 'calcium', 'magnesium', 'potassium', 'micronutrients/trace elements', 'social behavior', 'sulfate', 'sediment chemistry', 'biogeochemical processes', 'water ion concentrations', 'cropping systems', 'percolation', 'groundwater chemistry', 'reforestation/revegetation', 'species/population interactions', 'soil infiltration', 'alkalinity', 'soil fertility', 'phosphorous compounds', 'radioisotopes', 'cooling degree days', 'angiosperms (flowering plants)', 'glacial landforms', 'glacial processes', 'contour maps', 'estuaries', 'methane production/use', 'natural gas production/use', 'petroleum production/use', 'visualization/image processing', 'subsetting/supersetting', 'transformation/conversion', 'forest mensuration', 'acid deposition', 'differential pressure', 'precipitation', 'marine ecosystems', 'consumption rates', 'radio wave', 'soil organic carbon (soc)', 'soil erosion', 'halocarbons', 'trace elements/trace metals', 'biomass energy production/use', 'riparian wetlands', 'soil consistence', 'snow stratigraphy', 'thermal conductivity', 'estuary', 'tidal height', 'plant diseases/disorders/pests', 'layered precipitable water', 'atmospheric chemistry', 'water vapor concentration profiles', 'specific humidity', 'total runoff', 'pressure thickness', 'wind stress', 'atmospheric heating', 'conduction', 'hydrogen chloride', 'nitric acid', 'radar', 'land surface/agriculture indicators', 'satellite soil moisture index', 'chlorine nitrate', 'chlorofluorocarbons', 'dinitrogen pentoxide', 'antenna temperature', 'glaciers', 'ice sheets', 'dimethyl sulfide', 'potential vorticity', 'ice fraction', 'atmospheric radiation', 'runoff rate', 'temperature tendency', 'wind dynamics', 'wind direction tendency', 'base flow', 'bromine monoxide', 'chlorine monoxide', 'methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy', 'cloud base pressure', 'temperature anomalies', 'nitrate', 'ocean mixed layer', 'precipitation trends', 'temperature trends', 'convection', 'ground ice', 'oxygen', 'phosphate', 'solar induced fluorescence', 'chlorine dioxide', 'sun-earth interactions', 'uv aerosol index', 'volcanic activity', 'potential evapotranspiration', 'ultraviolet wavelengths', 'ice temperature', 'sea surface skin temperature', 'sea surface height', 'sublimation', 'convective surface precipitation rate', 'hydrogen fluoride', 'airglow', 'energy deposition', 'x-ray flux', 'electron flux', 'proton flux', 'magnetic fields/magnetic currents']\n else:\n return ['platform characteristics', 'atmospheric winds','radio wave', 'weather events', 'geomagnetism','atmospheric electricity', 'microwave', 'atmospheric temperature','atmospheric water vapor', 'atmospheric pressure', 'aerosols','atmospheric radiation', 'atmospheric chemistry', 'precipitation','sensor characteristics', 'radar', 'infrared wavelengths','visible wavelengths', 'weather/climate advisories', 'clouds','lidar', 'ocean optics', 'ultraviolet wavelengths','cryospheric indicators', 'land use/land cover', 'topography','surface thermal properties', 'spectral/engineering', 'soils','snow/ice', 'geothermal dynamics', 'natural hazards','surface water', 'vegetation','land surface/agriculture indicators','gravity/gravitational field', 'marine advisories', 'altitude','water quality/water chemistry', 'ocean temperature','ocean winds', 'atmospheric/ocean indicators', 'coastal processes','erosion/sedimentation', 'marine sediments', 'ocean chemistry','salinity/density', 'ocean color', 'aquatic ecosystems','vegetation2', 'landscape', 'cloud properties','surface radiative properties', 'geodetics','agricultural plant science', 'forest science','ecological dynamics', 'environmental impacts', 'sustainability','boundaries', 'ecosystems', 'air quality', 'population','infrastructure', 'environmental governance/management','public health', 'economic resources', 'socioeconomics','environmental vulnerability index (evi)', 'human settlements','agricultural chemicals', 'animal science','habitat conversion/fragmentation', 'animals/vertebrates','earth gases/liquids', 'rocks/minerals/crystals','social behavior', 'ground water', 'frozen ground','terrestrial hydrosphere indicators', 'ocean heat budget','biospheric indicators', 'animal commodities', 'fungi', 'plants','carbon flux', 'geomorphic landforms/processes','paleoclimate indicators', 'ocean circulation', 'sea ice','geochemistry', 'visualization/image processing','subsetting/supersetting', 'transformation/conversion','ocean pressure', 'glaciers/ice sheets', 'protists','solar activity', 'sun-earth interactions','sea surface topography', 'solar energetic particle properties','solar energetic particle flux','ionosphere/magnetosphere dynamics','flight data logs','wind dynamics', 'radio wave flux', 'lightning', 'magnetic field','atmospheric conductivity', 'electric field','data synchronization time', 'brightness temperature','upper air temperature', 'water vapor profiles','surface temperature', 'upper level winds','atmospheric pressure measurements', 'water vapor indicators','aerosol particle properties', 'emissivity','trace gases/trace species', 'liquid precipitation','cloud microphysics', 'microwave radiance', 'sensor counts','total pressure', 'airspeed/ground speed', 'total temperature','static pressure', 'humidity', 'radar reflectivity','doppler velocity', 'infrared imagery', 'visible imagery','aerosol backscatter', 'weather forecast', 'tropical cyclones','visible radiance', 'infrared radiance','atmospheric temperature indices', 'cloud droplet distribution','cloud condensation nuclei', 'hydrometeors', 'oxygen compounds','wind profiles', 'liquid water equivalent', 'solar radiation','planetary boundary layer height', 'surface winds','precipitation amount', 'precipitation rate', 'surface pressure','aerosol extinction', 'aerosol optical depth/thickness','tropospheric/high-level clouds (observed/analyzed)','lidar depolarization ratio', 'radar backscatter','radar cross-section', 'return power', 'radial velocity','radiance', 'climate advisories', 'atmospheric emitted radiation','optical depth/thickness', 'ultraviolet flux', 'spectrum width','microwave imagery', 'lidar backscatter', 'radar imagery','snow depth', 'land use/land cover classification','terrain elevation', 'solid precipitation', 'droplet size','droplet concentration/size', 'precipitation anomalies','snow water equivalent', 'total surface precipitation rate','skin temperature', 'water vapor', 'attitude characteristics','land surface temperature', 'reflectance','soil moisture/water content', 'soil temperature','soil bulk density', 'surface roughness', 'present weather','snow density', 'geothermal temperature','aerosol forward scatter', 'floods', 'snow cover', 'sigma naught','precipitable water', 'surface water processes/measurements','surface water features', 'shortwave radiation','photosynthetically active radiation', 'longwave radiation','net radiation', 'flight level winds', 'soil moisture','satellite orbits/revolution', 'heat flux','precipitation profiles', 'geopotential height','particulate matter', 'particle images', 'water vapor indices','electrical conductivity', 'gases', 'sea surface temperature','convective clouds/systems (observed/analyzed)','viewing geometry', 'wind shear','carbon and hydrocarbon compounds', 'sea level pressure','water vapor processes', 'ultraviolet radiation','solar irradiance', 'scattering', 'absorption','sea surface temperature indices', 'sedimentation', 'erosion','sediment transport', 'sediments', 'tropopause', 'nan', 'pigments','attenuation/transmission', 'inorganic carbon', 'organic carbon','photosynthetically available radiation', 'chlorophyll','optical depth', 'fluorescence', 'vegetation index', 'gelbstoff','plankton', 'vegetation index2', 'landscape ecology','ultraviolet radiance', 'aerosol radiance','carbonaceous aerosols', 'dust/ash/smoke', 'nitrate particles','organic particles', 'sulfate particles', 'radiative flux','transmittance', 'atmospheric stability','cloud radiative transfer', 'rain storms', 'reflected infrared','thermal infrared', 'incoming solar radiation', 'cloud types','orbital characteristics', 'geolocation','coordinate reference system', 'infrared flux', 'visible flux','albedo', 'lidar waveform', 'plant phenology', 'vegetation cover','crop/plant yields', 'land use classes', 'landscape patterns','forest harvesting and engineering', 'forest management','ecosystem functions', 'leaf characteristics', 'fire ecology','total surface water', 'primary production', 'photosynthesis','canopy characteristics', 'evergreen vegetation', 'crown','deciduous vegetation', 'anisotropy', 'biomass burning','wildfires', 'topographical relief','environmental sustainability','anthropogenic/human influenced ecosystems', 'emissions','sulfur compounds', 'environmental assessments', 'conservation','agriculture production', 'administrative divisions','freshwater ecosystems', 'political divisions', 'urban areas','treaty agreements/results', 'population estimates','nitrogen compounds', 'particulates', 'mortality', 'droughts','earthquakes', 'population distribution', 'fertilizers','animal manure and waste', 'urbanization/urban sprawl','landslides', 'avalanche', 'mangroves', 'volcanic eruptions','pesticides', 'population size', 'population density','rural areas', 'amphibians', 'mammals', 'carbon', 'sulfur oxides','land management', 'natural gas', 'sedimentary rocks','coastal elevation', 'community dynamics','nuclear radiation exposure', 'radiation exposure','poverty levels', 'malnutrition', 'sea level rise','vulnerability levels/index', 'electricity','energy production/use', 'sustainable development','deforestation', 'household income', 'nitrogen', 'phosphorus','terrestrial ecosystems', 'permafrost', 'nutrients','plant characteristics', 'soil gas/air', 'litter characteristics','soil chemistry', 'soil respiration', 'active layer', 'soil depth','cation exchange capacity', 'organic matter', 'soil porosity','soil texture', 'permafrost melt','ground water processes/measurements', 'freeze/thaw','halocarbons and halogens', 'hydrogen compounds', 'biomass','dominant species', 'vegetation species', 'sulfur', 'tree rings','soil classification', 'sea ice concentration', 'reforestation','species/population interactions', 'range changes','topographic effects', 'land resources', 'river ice depth/extent','snow melt', 'river ice', 'animal ecology and behavior','phenological changes', 'forest fire science', 'radiative forcing','soil heat budget', 'river/lake ice breakup','river/lake ice freeze', 'reclamation/revegetation/restoration','lichens', 'marine ecosystems', 'coastal landforms', 'degradation','forest composition/vegetation structure', 'barometric altitude','volatile organic compounds', 'forest fire danger index','periglacial processes', 'landscape processes','soil horizons/profile', 'soil ph', 'soil water holding capacity','fluvial landforms', 'soil color', 'glacial processes','photochemistry', 'cloud dynamics', 'nitrogen oxides', 'smog','chemical composition', 'actinic flux', 'tropospheric ozone','fossil fuel burning', 'industrial emissions','denitrification rate', 'sunshine', 'soil structure','mosses/hornworts/liverworts', 'hydraulic conductivity','snow/ice temperature', 'water characteristics','outgoing longwave radiation', 'soil compaction', 'soil impedance','canopy transmittance', 'ground water features', 'solids','agricultural expansion', 'pressure tendency', 'visibility','herbivory', 'paleoclimate reconstructions', 'drought indices','fire weather index', 'animal yields', 'teleconnections','carbon dioxide', 'dissolved solids', 'ocean currents', 'salinity','afforestation/reforestation', 'fresh water river discharge','surface water chemistry', 'aeolian landforms','precipitation indices', 'temperature indices', 'forest yields','stratigraphic sequence', 'freeze/frost', 'frost','industrialization', 'ice core records', 'suspended solids','weathering', 'gas flaring', 'ice extent', 'biogeochemical cycles','lake ice', 'isotopes', 'watershed characteristics','transportation', 'soil rooting depth', 'geochemical properties','carbon monoxide', 'cultural features', 'consumer behavior','boundary surveys', 'land productivity', 'sediment composition','calcium', 'magnesium', 'potassium','micronutrients/trace elements', 'sediment chemistry','biogeochemical processes', 'cropping systems','groundwater chemistry', 'reforestation/revegetation','soil infiltration', 'soil fertility','angiosperms (flowering plants)', 'glacial landforms','forest mensuration', 'acid deposition', 'differential pressure','soil erosion', 'trace elements/trace metals', 'soil consistence','snow stratigraphy', 'thermal conductivity', 'estuaries','tidal height', 'plant diseases/disorders/pests','pressure thickness', 'atmospheric heating', 'conduction','evaporation', 'turbulence', 'wind stress','satellite soil moisture index', 'antenna temperature', 'glaciers','ice sheets', 'nitrate', 'ocean mixed layer','precipitation indicators', 'temperature indicators', 'ground ice','alkalinity', 'dissolved gases', 'oxygen', 'ph', 'phosphate','solar induced fluorescence', 'volcanic activity','ice temperature', 'sea surface height', 'airglow','energy deposition', 'x-ray flux', 'electron flux', 'proton flux','magnetic fields/magnetic currents', 'vertical profiles','air temperature', 'dew point temperature','cloud liquid water/ice', 'wind speed', 'wind direction','vertical wind velocity/speed', 'total precipitable water','boundary layer temperature', 'cloud height','cloud droplet concentration/size', 'ozone','cloud base temperature', 'cloud base height', 'rain','cloud optical depth/thickness', 'cirrus/systems','mean radial velocity', 'relative humidity', 'u/v wind components','wind speed/wind direction','digital elevation/terrain model (dem)', 'snow', 'drizzle','particle size distribution', 'hail', 'ambient temperature','stage height', 'rivers/streams', 'hourly precipitation amount','24 hour precipitation amount', 'latent heat flux','cloud fraction', '3 and 6 hour precipitation amount','horizontal wind velocity/speed', 'dissolved carbon dioxide','hurricanes', 'tropical cyclone track', 'cloud top height','temperature profiles', 'vertical wind shear','water vapor tendency', 'potential temperature','angstrom exponent', 'water vapor mixing ratio profiles','extreme eastern tropical pacific sst', 'phytoplankton','cloud precipitable water', 'cloud asymmetry', 'cloud ceiling','cloud frequency', 'cloud top pressure', 'cloud top temperature','cloud vertical distribution', 'cloud emissivity','cloud radiative forcing', 'cloud reflectance','maximum/minimum temperature', 'condensation','topographical relief maps', 'evapotranspiration','fire occurrence', 'burned area', 'sulfur dioxide', 'lake/pond','rivers/stream', 'nitrogen dioxide', 'agricultural lands','cyclones', 'urban lands', 'lakes/reservoirs','infant mortality rates', 'methane','non-methane hydrocarbons/volatile organic compounds', 'coal','biodiversity functions', 'wetlands', 'discharge/flow','hydropattern', 'alpine/tundra', 'forests','leaf area index (lai)', 'ammonia', 'nitrous oxide','land subsidence', 'normalized difference vegetation index (ndvi)','chlorinated hydrocarbons', 'methyl bromide', 'methyl chloride','molecular hydrogen', 'fire models', 'heat index','even-toed ungulates', 'species recruitment','population dynamics', 'water depth', 'inundation', 'drainage','respiration rate', 'permafrost temperature','indigenous/native species', 'fire dynamics', 'plant succession','coastal', 'salt marsh', 'boundary layer winds', 'shrubland/scrub','community structure', 'pingo', 'virtual temperature','formaldehyde', 'hydroxyl', 'photolysis rates', 'nitric oxide','molecular oxygen', 'peroxyacyl nitrate', 'stable isotopes','runoff', 'vegetation water content', 'discharge','chlorophyll concentrations', 'water table', 'decomposition','water temperature', 'total dissolved solids', 'biomass dynamics','grasslands', 'savannas', 'grazing dynamics/plant herbivory','multivariate enso index', 'drainage basins','resource development site', 'dunes', 'flood plain','endangered species', 'hydrogen cyanide', 'nutrient cycling','deserts','fraction of absorbed photosynthetically active radiation (fapar)','aquifers', 'dissolved oxygen', 'turbidity', 'conductivity','sulfate', 'water ion concentrations', 'percolation','phosphorous compounds', 'radioisotopes', 'cooling degree days','contour maps', 'methane production/use','natural gas production/use', 'petroleum production/use','consumption rates', 'soil organic carbon (soc)', 'halocarbons','biomass energy production/use', 'estuary','layered precipitable water', 'water vapor concentration profiles','hydrogen chloride', 'nitric acid', 'chlorine nitrate','chlorofluorocarbons', 'dinitrogen pentoxide', 'dimethyl sulfide','vorticity', 'ice fraction', 'temperature tendency','wind direction tendency', 'bromine monoxide', 'chlorine monoxide','methyl cyanide', 'hypochlorous acid', 'methanol', 'hydroperoxy','cloud base pressure', 'temperature anomalies','precipitation trends', 'temperature trends', 'convection','chlorine dioxide', 'uv aerosol index','sea surface skin temperature', 'sublimation','convective surface precipitation rate', 'hydrogen fluoride']", "def eval_final_label(args):\n cfg, lbl = util.get_label_cfg_by_args(args)\n uid = cfg['uniqueid']\n print('We are playing with %s' % uid)\n outdir='models/%s/gate_expert' % uid\n outname='gate_expert_model.pt'\n if KLLOSS:\n outname = 'gate_expert_kldiv_model.pt'\n if args.warm:\n outname = outname.replace('.pt', '_warm.pt')\n mdl_path = os.path.join(outdir, outname)\n gate_expert = GateExpertNet(mdl_path, False)\n eval_fun = gate_expert.get_p_y\n\n data = npload(cfg['file_path'], uid)\n datax = data[cfg['x_name']]\n p, v = eval_fun(datax)\n\n label = np.argmax(p, axis=1)\n\n if args.draw:\n fig, ax = plt.subplots()\n n_expert = np.amax(label) + 1\n for i in range(n_expert):\n mask = label == i\n ax.scatter(datax[mask, 0], datax[mask, 1])\n plt.show()\n\n label_name = 'data/pen/gate_expert_label.npy'\n if KLLOSS:\n label_name = label_name.replace('_label', '_kldiv_label')\n if args.warm:\n label_name = label_name.replace('.npy', '_warm.npy')\n np.save(label_name, label)", "def main():\r\n if len(sys.argv) == 2:\r\n if sys.argv[1] == 'branch_name':\r\n print branch_name()\r\n elif sys.argv[1] == 'plat_id':\r\n print plat_id()\r\n else:\r\n print plat_id()\r\n print branch_name()\r\n return", "def step_name(self):\n return \"main\"", "def setup_protocol(self):\n self.ctx.inputs = {\n 'codename': self.inputs.codename,\n 'parameters': {},\n 'settings': {},\n 'options': ParameterData(dict={\n 'resources': {\n 'num_machines': 1\n },\n 'max_wallclock_seconds': 1800,\n }),\n }\n\n if self.inputs.protocol == 'standard':\n self.report('running the workchain in the \"{}\" protocol'.format(self.inputs.protocol.value))\n self.ctx.protocol = {\n 'kpoints_mesh_offset': [0., 0., 0.],\n 'kpoints_mesh_density': 0.2,\n 'convergence_threshold': 2.E-06,\n 'smearing': 'marzari-vanderbilt',\n 'degauss': 0.02,\n 'occupations': 'smearing',\n 'tstress': True,\n 'pseudo_familyname': 'SSSP',\n 'pseudo_data': {\n 'H': {'cutoff': 55, 'dual': 8, 'pseudo': '031US'},\n 'He': {'cutoff': 55, 'dual': 4, 'pseudo': 'SG15'},\n 'Li': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Be': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'B': {'cutoff': 40, 'dual': 8, 'pseudo': '031PAW'},\n 'C': {'cutoff': 50, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'N': {'cutoff': 55, 'dual': 8, 'pseudo': 'THEOS'},\n 'O': {'cutoff': 45, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'F': {'cutoff': 50, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Ne': {'cutoff': 200, 'dual': 8, 'pseudo': '100PAW'},\n 'Na': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Mg': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Al': {'cutoff': 30, 'dual': 8, 'pseudo': '100PAW'},\n 'Si': {'cutoff': 30, 'dual': 8, 'pseudo': '100US'},\n 'P': {'cutoff': 30, 'dual': 8, 'pseudo': '100US'},\n 'S': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Cl': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Ar': {'cutoff': 120, 'dual': 8, 'pseudo': '100US'},\n 'K': {'cutoff': 50, 'dual': 8, 'pseudo': '100US'},\n 'Ca': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Sc': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Ti': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'V': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Cr': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.5'},\n 'Mn': {'cutoff': 70, 'dual': 12, 'pseudo': '031PAW'},\n 'Fe': {'cutoff': 90, 'dual': 12, 'pseudo': '031PAW'},\n 'Co': {'cutoff': 55, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Ni': {'cutoff': 45, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Cu': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Zn': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Ga': {'cutoff': 35, 'dual': 8, 'pseudo': '031US'},\n 'Ge': {'cutoff': 40, 'dual': 8, 'pseudo': '100PAW'},\n 'As': {'cutoff': 30, 'dual': 8, 'pseudo': '031US'},\n 'Se': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Br': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Kr': {'cutoff': 100, 'dual': 8, 'pseudo': '031US'},\n 'Rb': {'cutoff': 50, 'dual': 4, 'pseudo': 'SG15'},\n 'Sr': {'cutoff': 35, 'dual': 8, 'pseudo': '100US'},\n 'Y': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Zr': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Nb': {'cutoff': 35, 'dual': 8, 'pseudo': '031PAW'},\n 'Mo': {'cutoff': 35, 'dual': 4, 'pseudo': 'SG15'},\n 'Tc': {'cutoff': 30, 'dual': 4, 'pseudo': 'SG15'},\n 'Ru': {'cutoff': 40, 'dual': 4, 'pseudo': 'SG15'},\n 'Rh': {'cutoff': 45, 'dual': 8, 'pseudo': '100PAW'},\n 'Pd': {'cutoff': 55, 'dual': 8, 'pseudo': '100PAW'},\n 'Ag': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Cd': {'cutoff': 40, 'dual': 8, 'pseudo': '031US'},\n 'In': {'cutoff': 35, 'dual': 8, 'pseudo': '031US'},\n 'Sn': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Sb': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Te': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'I': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Xe': {'cutoff': 120, 'dual': 8, 'pseudo': '100US'},\n 'Cs': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Ba': {'cutoff': 40, 'dual': 4, 'pseudo': 'SG15'},\n 'Hf': {'cutoff': 35, 'dual': 8, 'pseudo': '031US'},\n 'Ta': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'W': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Re': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Os': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Ir': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Pt': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Au': {'cutoff': 45, 'dual': 4, 'pseudo': 'SG15'},\n 'Hg': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Tl': {'cutoff': 30, 'dual': 8, 'pseudo': '031US'},\n 'Pb': {'cutoff': 40, 'dual': 8, 'pseudo': '031PAW'},\n 'Bi': {'cutoff': 35, 'dual': 8, 'pseudo': '031PAW'},\n 'Po': {'cutoff': 45, 'dual': 8, 'pseudo': '100US'},\n 'Rn': {'cutoff': 45, 'dual': 8, 'pseudo': '100US'},\n 'La': {'cutoff': 55, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Ce': {'cutoff': 45, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Pr': {'cutoff': 50, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Nd': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Sm': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Eu': {'cutoff': 55, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Tb': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Dy': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Ho': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Er': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Tm': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Yb': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Lu': {'cutoff': 45, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n }\n }", "def test_get_label_line(self):\r\n\r\n sample_id = \"Sample1\"\r\n fasta_label = \"ABCD1234 region=1 length=254\"\r\n bc = \"AAAA\"\r\n corrected_bc = \"AAAT\"\r\n num_errors = 1\r\n actual_label = get_label_line(sample_id, fasta_label, bc, corrected_bc,\r\n num_errors)\r\n\r\n expected_label = \"Sample1 ABCD1234 orig_bc=AAAA new_bc=AAAT bc_diffs=1\"\r\n self.assertEqual(actual_label, expected_label)", "def Label(self) -> str:", "def set_label(termtype, timeperiod):\n label = 'Graph these comma-separated noun phrases (yearly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Yearly' \\\n else 'Graph these comma-separated noun phrases (monthly frequencies):' if termtype == 'Noun phrases' and timeperiod == 'Monthly' \\\n else 'Graph these comma-separated entity mentions (yearly frequencies):' if termtype == 'Entity mentions' and timeperiod == 'Yearly' \\\n else 'Graph these comma-separated entity mentions (monthly frequencies):' if termtype == 'entity mentions' and timeperiod == 'Monthly' \\\n else 'Enter a phrase and get similar terms and the distribution of its \"cluster\"'\n return label", "def label(tree):\n return tree[0]", "def housepredict(intent_request):\r\n \r\n \r\n location_zip = get_slots(intent_request)[\"location\"]\r\n housetype_zip = get_slots(intent_request)[\"housetype\"]\r\n source = intent_request['invocationSource']\r\n \r\n print('received request: ' + str(intent_request))\r\n print (\"housetype\",housetype_zip)\r\n print (\"location1\",location_zip)\r\n\r\n if source == 'DialogCodeHook':\r\n # Perform basic validation on the supplied input slots.\r\n # Use the elicitSlot dialog action to re-prompt for the first violation detected.\r\n slots = get_slots(intent_request)\r\n print('slots are' ,str(slots)) \r\n validation_result = validate_housepred(location_zip)\r\n if not validation_result['isValid']:\r\n slots[validation_result['violatedSlot']] = None\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n validation_result['violatedSlot'],\r\n validation_result['message'])\r\n\t\t\r\n validation_result2 = validate_housepred_hstyp(housetype_zip)\r\n if not validation_result2['isValid']:\r\n slots[validation_result2['violatedSlot']] = None\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n validation_result2['violatedSlot'],\r\n validation_result2['message'])\r\n\r\n # Pass the price of the flowers back through session attributes to be used in various prompts defined\r\n # on the bot model.\r\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\r\n if location_zip is not None and housetype_zip is not None:\r\n output_session_attributes['Price'] = house_price_pred(location_zip,housetype_zip)#len(location_zip)*5#house_price_pred(location_zip,housetype_zip) \r\n #price = house_price_pred(location_zip,housetype_zip)# Elegant pricing model\r\n\t\t\t\r\n return delegate(output_session_attributes, get_slots(intent_request))\r\n\r\n # Order the flowers, and rely on the goodbye message of the bot to define the message to the end user.\r\n # In a real bot, this would likely involve a call to a backend service.\r\n print(intent_request['sessionAttributes']['Price']) \r\n return close(intent_request['sessionAttributes'],\r\n 'Fulfilled',\r\n {'contentType': 'PlainText',\r\n 'content': 'Approx. next year growth prediction for {hstyp} in {loc} is {prc}%'.format(hstyp=housetype_zip,loc=location_zip,prc=intent_request['sessionAttributes']['Price'])})", "def test_get_prior_string_dict(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices({'asdfa': 0.10, 2: 0.20, 3: 0.30, 'lalala': 0.40}, \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )", "def getLabel(*args):", "def getLabel(*args):", "def getLabel(*args):", "def __rechaindict__(c):\n from TriggerMenu.menu.DictFromChainName import DictFromChainName\n dfcn = DictFromChainName()\n\n pl1 = []\n for pch in c['chainParts']:\n pl1.append(pch['L1item'])\n\n newname = c['chainName'].replace('dv_','').replace('TestChain','j')\n nchlist = [ newname ,c['chainCounter'],c['L1item'],pl1,c['stream'],\n c['groups'],c['EBstep'] ]\n \n return dfcn.getChainDict(nchlist)", "def plot_hypnogram(stages, labeldict=None, title=None, epochlen=30, ax=None,\n verbose=True, xlabel=True, ylabel=True, **kwargs,):\n\n if labeldict is None:\n labeldict = {}\n _defaultdict = {-1: 'A', 0:'Wake', 4:'REM', 1:'S1', 2:'S2', 3:'SWS', 5:'Artefact'}\n if set(stages) == set([0, 1]):\n labeldict = {0:'Wake', 1:'Sleep'}\n elif set(stages) == set([0, 1, 2]):\n labeldict = {0:'Wake', 2:'REM', 1:'NREM'}\n else:\n for stage in _defaultdict:\n if stage in stages:\n labeldict[stage] = _defaultdict[stage]\n if verbose: print('Assuming {}'.format(labeldict))\n\n # check if all stages that are in the hypnogram have a corresponding label in the dict\n for stage in np.unique(stages):\n if not stage in labeldict:\n print('WARNING: {} is in stages, but not in labeldict, stage will be ??'.format(stage))\n\n # create the label order\n labels = [labeldict[l] for l in labeldict]\n labels = sorted(set(labels), key=labels.index)\n\n # we iterate through the stages and fetch the label for this stage\n # then we append the position on the plot of this stage via the labels-dict\n x = []\n y = []\n rem_start = []\n rem_end = []\n for i in np.arange(len(stages)):\n s = stages[i]\n label = labeldict.get(s)\n if label is None:\n p = 99\n if '??' not in labels: labels.append('??')\n else :\n p = -labels.index(label)\n \n # make some red line markers for REM, mark beginning and end of REM\n if 'REM' in labels:\n if label=='REM' and len(rem_start)==len(rem_end):\n rem_start.append(i-2)\n elif label!='REM' and len(rem_start)>len(rem_end):\n rem_end.append(i-1)\n if label=='REM' and i==len(stages)-1:\n rem_end.append(i+1)\n \n if i!=0:\n y.append(p)\n x.append(i-1)\n y.append(p)\n x.append(i)\n \n assert len(rem_start)==len(rem_end), 'Something went wrong in REM length calculation'\n\n x = np.array(x)*epochlen\n y = np.array(y)\n y[y==99] = y.min()-1 # make sure Unknown stage is plotted below all else\n\n if ax is None:\n plt.figure()\n ax = plt.gca()\n formatter = matplotlib.ticker.FuncFormatter(lambda s, x: time.strftime('%H:%M', time.gmtime(s)))\n \n ax.plot(x,y, **kwargs)\n ax.set_xlim(0, x[-1])\n ax.xaxis.set_major_formatter(formatter)\n \n ax.set_yticks(np.arange(len(np.unique(labels)))*-1)\n ax.set_yticklabels(labels)\n ax.set_xticks(np.arange(0,x[-1],3600))\n if xlabel: plt.xlabel('Time after recording start')\n if ylabel: plt.ylabel('Sleep Stage')\n if title is not None:\n plt.title(title)\n\n try:\n warnings.filterwarnings(\"ignore\", message='This figure includes Axes that are not compatible')\n plt.tight_layout()\n except Exception: pass\n\n # plot REM in RED here\n for start, end in zip(rem_start, rem_end):\n height = -labels.index('REM')\n ax.hlines(height, start*epochlen, end*epochlen, color='r',\n linewidth=4, zorder=99)", "def final_result(self, hyp, confidence):\n print(\"Final:\"+hyp)", "def handle(self, *args, **options):\n chains = sorted(plugin.BY_REQUIREMENTS.keys())\n for chain in chains:\n print(chain, \"chain:\")\n reqs = sorted(\"{} -> {}\".format(\", \".join(sorted(k)) or 'START',\n \", \".join(sorted(v))) for k, v in\n plugin.BY_REQUIREMENTS[chain].iteritems())\n for req in reqs:\n print(\"-\", req)", "def get_kit_string(currentStep):\n kit_string = currentStep.udf.get(\"ONT prep kit\")\n\n if currentStep.udf.get(\"ONT expansion kit\") != \"None\":\n kit_string += f\" {currentStep.udf.get('ONT expansion kit')}\"\n\n return kit_string", "def name(self):\n return self.viztrail.name", "def processSystematic(observable, xsecType, xsecLevel, systematic, histNominal):\n varHists = []\n linkStr = \"\"\n singlePointSystematics = [\"ERDON\", \"ERDONRETUNE\", \"GLUONMOVETUNE\", \"BFRAG_PETERSON\"]\n\n sPS = 0\n\n if any(singlePointSystematic in systematic for singlePointSystematic in singlePointSystematics):\n sPS = 1\n\n linkStr = \"_\"\n variations = [\"\"]\n for variation in variations:\n path = directory_base + xsec_type + \"_\" + xsec_level + directory_tail + systematic + linkStr + variation + \"/combinedUnfolded/Hyp\" + observable + \"Results.txt\"\n inputfile = open(path, 'r').readlines()\n bins = []\n for line in inputfile:\n bins.append(float(line.split( )[3]))\n bins.append(float(line.split( )[5]))\n bins = sorted(bins)\n binsArray = array('f',bins)\n histNameUp = systematic + \"_UP\" \n histNameDown = systematic + \"_DOWN\" \n histUp = TH1F(histNameUp, histNameUp, len(bins)-1, binsArray)\n histDown = TH1F(histNameDown, histNameDown, len(bins)-1, binsArray)\n histUpFinal = TH1F(\"\", \"\", len(bins)-1, binsArray)\n histDownFinal = TH1F(\"\", \"\", len(bins)-1, binsArray)\n \n ibin = 0\n\n for line in inputfile:\n nomBin = histNominal.GetBinContent(ibin+1)\n nomBinCenter = histNominal.GetBinCenter(ibin+1)\n unc = float(line.split( )[7])\n if systematic == \"DY\":\n print \"DY UP = \" + str(1.0 + unc)\n print \"DY DOWN = \" + str(1.0 - unc)\n\n\n histUp.SetBinContent(ibin+1, 1.0 + unc)\n histDown.SetBinContent(ibin+1,1.0 - unc)\n ibin = ibin + 1 \n\n histUpVis = histUp.Clone()\n histDownVis = histDown.Clone()\n histUpFinal = histUp.Clone()\n histDownFinal = histDown.Clone()\n\n if systematic == \"PDF\":\n histUpFinal, histDownFinal = reNormalise(histNominal, histUpVis, histDownVis)\n\n return (histUpFinal, histDownFinal)", "def get_labels(rf_pipeline):\n return rf_pipeline.stages[0].labels", "def _repr_(self):\n return \"Category of hyperbolic models of {}\".format(self.base())", "def label(cmd):\r\n cmd = cmd.replace('make][.DP', 'make1][.NP')\r\n cmd = cmd.replace('make][.SC', 'make2][.SC')\r\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\r\n cmd = '[result ' + cmd + ']' #dummy function for plop\r\n return cmd", "def get_match_label(match):\n # Define variables\n home_goals = match['home_team_goal']\n away_goals = match['away_team_goal']\n\n label = pd.Series()\n label['match_api_id'] = match['match_api_id']\n\n # Identify match label\n if home_goals > away_goals:\n label['label'] = \"Win\"\n if home_goals == away_goals:\n label['label'] = \"Draw\"\n if home_goals < away_goals:\n label['label'] = \"Defeat\"\n\n # Return label (match_api_id; label)\n return label", "def main(_):\n hps = LM.get_default_hparams().parse(FLAGS.hpconfig)\n hps._set(\"num_gpus\", FLAGS.num_gpus)\n print ('*****HYPER PARAMETERS*****')\n print (hps)\n print ('**************************')\n\n vocab = Vocabulary.from_file(os.path.join(FLAGS.datadir, \"vocabulary.txt\"))\n\n if FLAGS.mode == \"train\":\n #hps.batch_size = 256\n dataset = Dataset(vocab, os.path.join(FLAGS.datadir, \"train.txt\"))\n run_train(dataset, hps, os.path.join(FLAGS.logdir, \"train\"), ps_device=\"/gpu:0\")\n elif FLAGS.mode.startswith(\"eval\"):\n data_dir = os.path.join(FLAGS.datadir, \"eval.txt\")\n #predict_model = prediction.Model('/dir/ckpt',os.path.join(FLAGS.datadir, \"vocabulary.txt\"), hps)\n\n dataset = Dataset(vocab, data_dir, deterministic=True)\n prefix_words = \"<brk>\".split()\n predict_model = predict.Model(hps, FLAGS.logdir, FLAGS.datadir)\n print ('start input')\n out = predict_model.predictnextkwords(prefix_words, FLAGS.num_sen)\n for row in out:\n print(' '.join(row) + \"\\n\")\n print(\"len_out: \" + str(len(out)))\n #prediction.topkwords(prefix_words, dataset, hps, FLAGS.logdir, FLAGS.mode)\n #sentence_ppl(prefix_words,dataset, hps, FLAGS.logdir, FLAGS.mode)\n #print vocab\n #dataset = Dataset(vocab, os.path.join(FLAGS.datadir, \"eval.txt\"))\n #run_eval(dataset, hps, FLAGS.logdir, FLAGS.mode, FLAGS.eval_steps)", "def get_new_word(key, chains):\n values = chains[key]\n return choice(values)", "def parse_logic(self, logic):\n if 'xxx' in logic.conf:\n # self.function(logic['name'])\n pass", "def forward(self, waveforms: Tensor) -> str:\n logits, _ = self.model(waveforms) # [batch, num_seq, num_label]\n best_path = torch.argmax(logits[0], dim=-1) # [num_seq,]\n prev = ''\n hypothesis = ''\n for i in best_path:\n char = self.labels[i]\n if char == prev:\n continue\n if char == '<s>':\n prev = ''\n continue\n hypothesis += char\n prev = char\n return hypothesis.replace('|', ' ')", "def make_text(chains):\n\n key = random.choice(chains.keys())\n return_string = key[0] + \" \" + key[1]\n\n while chains[key] != None: #chains.get(key[-1]) != None: \n next_word = random.choice(chains[key])\n return_string = return_string + \" \" + next_word\n key = (key[1],next_word)\n\n return return_string", "def get_chain_name (chain):\n if \"-\" in chain.id:\n id_chain=chain.id[-1]\n else:\n id_chain=chain.id\n return id_chain", "def _get_label(self):\n return self.label", "def get_clarifications_socialiqa(ex, nlp, comet_model):\n context = ex['context']\n question = ex['question']\n\n question_to_comet_relation = {\n \"What will [NAME] want to do next?\": \"xWant\",\n \"What will [NAME] want to do after?\": \"xWant\",\n \"How would [NAME] feel afterwards?\": \"xReact\",\n \"How would [NAME] feel as a result?\": \"xReact\",\n \"What will [NAME] do next?\": \"xReact\",\n \"How would [NAME] feel after?\": \"xReact\",\n \"How would you describe [NAME]?\": \"xAttr\",\n \"What kind of person is [NAME]?\": \"xAttr\",\n \"How would you describe [NAME] as a person?\": \"xAttr\",\n \"Why did [NAME] do that?\": \"xIntent\",\n \"Why did [NAME] do this?\": \"xIntent\",\n \"Why did [NAME] want to do this?\": \"xIntent\",\n \"What does [NAME] need to do beforehand?\": \"xNeed\",\n \"What does [NAME] need to do before?\": \"xNeed\",\n \"What does [NAME] need to do before this?\": \"xNeed\",\n \"What did [NAME] need to do before this?\": \"xNeed\",\n \"What will happen to [NAME]?\": \"xEffect\",\n \"What will happen to [NAME] next?\": \"xEffect\"\n }\n\n clarifications = []\n personx, _ = get_personx(nlp, context)\n relation = question_to_comet_relation.get(re.sub(personx, \"[NAME]\", question, flags=re.I), None)\n\n if relation is not None:\n outputs = {relation: comet_model.predict(context, relation, num_beams=5)}\n\n prefix = CATEGORY_TO_PREFIX[relation]\n for out_event in outputs[relation]:\n if out_event != \"none\" and out_event != \"\":\n if not out_event.lower().startswith(\"person\") and not out_event.lower().startswith(\"other\"):\n out_event = \" \".join((prefix, out_event))\n\n out_event = re.sub(\"personx\", personx, out_event, flags=re.I)\n out_event = re.sub(\"person x\", personx, out_event, flags=re.I)\n out_event = re.sub(\"persony\", \"others\", out_event, flags=re.I)\n out_event = re.sub(\"person y\", \"others\", out_event, flags=re.I)\n\n clarifications.append((question, out_event))\n\n return clarifications", "def label(efo):\n url = 'https://www.ebi.ac.uk/ols/api/ontologies/efo/terms?iri=http://www.ebi.ac.uk/efo/'+efo\n try:\n response = requests.get(url).json()\n except:\n return \"\"\n if 'error' in response.keys():\n return \"\"\n return response['_embedded']['terms'][0]['label']", "def chain_run(ctx, chain_name, mine, verbosity):\n logger = logging.getLogger('populus.cli.chain.run')\n project = ctx.obj['PROJECT']\n\n chain = project.get_chain(chain_name)\n\n if isinstance(chain, BaseGethChain):\n chain.geth.register_stdout_callback(logger.info)\n chain.geth.register_stderr_callback(logger.error)\n\n with chain:\n try:\n while True:\n time.sleep(0.2)\n except KeyboardInterrupt:\n pass", "def getRaceLabel(x,binary=False):\r\n ID = x.split(\"/\")[-1].split(\"_\")[0]\r\n label = truth[truth.DummyID == int(ID)]['Medview_Race'].values[0]\r\n\r\n if label == 'African American':\r\n return 0\r\n elif label == \"White\":\r\n return 1\r\n else:\r\n return 2", "def pipeline(self):\n predicted, real = self.surface_segmentation()\n # print(predicted[0:10])\n # print(len(predicted))\n test_file = \"../morphology/\" + self.language + \"/\" + self.language + \".clean.test.conll\"\n input_file = open(os.path.join(sys.path[0], test_file), 'r')\n segmented_words = []\n\n # Only one entry per word for dictionary\n\n words = []\n labels = []\n for line in input_file.readlines():\n tmp = line.rstrip('\\n').split(\" | \")[0]\n label_arr = line.rstrip('\\n').split(\" | \")[2]\n label_arr = get_labels(label_arr)\n if tmp not in words:\n words.append(tmp)\n labels.append(label_arr)\n\n segmented_words = []\n for word, label in zip(words, predicted):\n tmp = []\n for i in range(len(label)):\n if label[i] == \"S\" or label[i] == \"E\":\n tmp.append(word[i])\n tmp.append(\"-\")\n else:\n tmp.append(word[i])\n tmp = \"\".join(tmp).rstrip(\"-\")\n segmented_words.append(tmp)\n\n features = surface_labelled_data_preparation_pipeline(segmented_words)\n predicted, test = self.__surface_labelled_segmentation_pipeline(features)\n return predicted, labels", "def get_clarifications_piqa(ex, nlp, comet_model):\n # Questions are usually like \"how would you do something?\"\n personx = \"you\"\n\n input_event = ex[\"goal\"].replace(\"?\", \"\")\n outputs = {category: comet_model.predict(input_event, category, num_beams=5) for category in comet_model.categories}\n\n # We only care about preconditions and postconditions for X\n relevant_categories = [\"xIntent\", \"xNeed\", \"xEffect\", \"xWant\"]\n curr_events = []\n for category in relevant_categories:\n prefix = CATEGORY_TO_PREFIX[category]\n for out_event in outputs[category]:\n if out_event != \"none\" and out_event != \"\":\n if not out_event.lower().startswith(\"person\") and not out_event.lower().startswith(\"other\"):\n out_event = \" \".join((prefix, out_event))\n\n out_event = re.sub(\"personx\", personx, out_event, flags=re.I)\n out_event = re.sub(\"person x\", personx, out_event, flags=re.I)\n out_event = re.sub(\"persony\", \"others\", out_event, flags=re.I)\n out_event = re.sub(\"person y\", \"others\", out_event, flags=re.I)\n\n question = CATEGORY_TO_QUESTION[category].replace(\"PersonX\", personx)\n curr_events.append((question, out_event))\n\n return curr_events", "def get_label(self):\n return self.job[self.label_key]", "def lhco_line(self):\n if not self.check_def(['eta','phi','pt','mass','pid']): \n sys.exit('Particle error: some attribute not defined')\n\n jet=[1,2,3,4,5,6,21]\n inv_list=[12,14,16,18,1000022,1000023,1000024,1000025,1000035]\n\n #define pid-> type\n pid_to_type={11:1,-11:1,13:2,-13:2,15:3,-15:3,22:0}\n for data in jet:\n pid_to_type[data]=4\n pid_to_type[-data]=4\n for data in inv_list:\n pid_to_type[data]=6\n pid_to_type[-data]=6\n\n\n \n type=''\n for key in pid_to_type.keys():\n if self.pid==key:\n type=pid_to_type[key]\n break\n \n if type=='':\n print 'Warning unknown type'\n return ''\n\n text =' '+str(type) #type LHCO\n text+=' '+str(self.eta) #ETA\n text+=' '+str(self.phi) #PHI\n text+=' '+str(self.pt) #PT\n text+=' '+str(self.mass) #JMASS\n if self.pid in [11,13]: #NTRK\n text+=' -1' \n else:\n text+=' 1'\n if self.pid in [-5,5]: #BTAG\n text+=' 2'\n else:\n text+=' 0'\n text+=' 0' #HAD/EM\n text+=' 0' #DUMMY 1\n text+=' 0' #DUMMY 2\n \n return text", "def get_chain(self, chain_id):\n if self.chain_dict.has_key(chain_id):\n return self.chain_dict[chain_id]\n return None", "def label(self) -> str:\n return self[\"label\"]", "def label(cmd):\n cmd = cmd.replace('make][.DP', 'make1][.NP')\n cmd = cmd.replace('make][.SC', 'make2][.SC')\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\n cmd = '[result ' + cmd + ']' #dummy function for plop\n return cmd", "def SetupFcn(self):\n return _hypre.HypreParaSails_SetupFcn(self)", "def generate_HLD(component, output):\n component.delete_none_attrs()\n yaml.indent(mapping=2, sequence=4, offset=2)\n d = component.asdict()\n\n yaml.dump(d, output)" ]
[ "0.67336076", "0.6324823", "0.5675857", "0.56554145", "0.5556557", "0.54489195", "0.5372017", "0.53109175", "0.5194049", "0.509463", "0.50940394", "0.49333954", "0.49087635", "0.4816282", "0.47896782", "0.47874323", "0.4760651", "0.47558823", "0.47320828", "0.47163337", "0.4697964", "0.4697964", "0.4697964", "0.46477467", "0.46188495", "0.4604326", "0.45948538", "0.45567554", "0.4532131", "0.4518247", "0.45069382", "0.4505159", "0.4496154", "0.4493104", "0.44868314", "0.4484499", "0.44842538", "0.44788465", "0.4477625", "0.4468817", "0.4454167", "0.44472125", "0.44438758", "0.44295356", "0.4416616", "0.44085002", "0.4408289", "0.4407519", "0.4402998", "0.43985268", "0.43980908", "0.43762022", "0.43733582", "0.4373203", "0.43682143", "0.43631634", "0.43624112", "0.43573672", "0.43348658", "0.433402", "0.4316019", "0.4310848", "0.4306008", "0.42983785", "0.42978182", "0.42926103", "0.42795235", "0.42795235", "0.42795235", "0.42770424", "0.4275674", "0.42737427", "0.42713505", "0.42654365", "0.42651907", "0.42640334", "0.42553523", "0.42533186", "0.42526105", "0.4250961", "0.42506745", "0.42496133", "0.42416823", "0.42362565", "0.42317078", "0.42307195", "0.42296165", "0.42284736", "0.42284212", "0.42218295", "0.4220016", "0.42185876", "0.42176226", "0.4216664", "0.42116132", "0.42020443", "0.42009687", "0.42005658", "0.42001638", "0.418759" ]
0.6201193
2
(Set, float, float, int, str) > list Filters a set of Products according to the parameters. This function is responsible to determine if filtering with tags should be applied or not.
def get_matching_products(products, lat, lng, radius, tags): if tags: tag_list = tags.split(',') return list([ product for product in products if is_matching_product_with_tags( product, lat, lng, radius, tag_list ) ]) else: return list([ product for product in products if is_matching_product( product, lat, lng, radius ) ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_generic(products, listings, result=None):\n print \"Apply Generic Filtering \"\n if result == None:\n result = {}\n matched_listings = []\n for alist in listings:\n manufacturer, renamed_manufacturer = find_manufacturer(products, alist)\n if manufacturer == False:\n continue\n for product in products[manufacturer]:\n product = product[0] # get product information all in lower case\n if not does_list_contains_model(\\\n alist, product['model'], manufacturer):\n continue\n if product['product_name'] not in result:\n result[product['product_name']] = []\n for matched_list in listings[alist]:\n matched_manufacturer =\\\n matched_list['manufacturer'].lower()\n if manufacturer not in matched_manufacturer and\\\n matched_manufacturer not in renamed_manufacturer:\n continue\n result[product['product_name']].append(matched_list)\n matched_listings.append(alist)\n remove_matched_list(listings, matched_listings)\n length_listings(listings)\n return result", "def filter_queryset(self, queryset):\n query_params = self.request.query_params\n # validate query parameters\n exception_response = ParamsCheck.validate(\n query_params, APIParams.products_list_params\n )\n if exception_response:\n return exception_response\n\n products_qs = self.get_queryset() # all\n\n category = query_params.get(\"category\", None)\n exclude_ingredients = query_params.get(\"exclude_ingredient\", None)\n exclude_ingredients = self._clean_string(exclude_ingredients)\n include_ingredients = query_params.get(\"include_ingredient\", None)\n include_ingredients = self._clean_string(include_ingredients)\n\n # filtering part\n if category is not None:\n products_qs = products_qs.filter(category=category)\n for each in include_ingredients:\n products_qs = products_qs.filter(ingredients__name=each)\n for each in exclude_ingredients:\n products_qs = products_qs.exclude(ingredients__name=each)\n\n return products_qs", "def filter_products(self, products, mrp_only=False, extension=None, **filters):\n\n filterMask = np.full(len(products), True, dtype=bool)\n\n # Applying the special filters (mrp_only and extension)\n if mrp_only:\n filterMask &= (products['productGroupDescription'] == \"Minimum Recommended Products\")\n\n if extension:\n mask = np.full(len(products), False, dtype=bool)\n for elt in extension:\n mask |= [False if isinstance(x, np.ma.core.MaskedConstant) else x.endswith(elt)\n for x in products[\"productFilename\"]]\n filterMask &= mask\n\n # Applying the rest of the filters\n for colname, vals in filters.items():\n\n if type(vals) == str:\n vals = [vals]\n\n mask = np.full(len(products), False, dtype=bool)\n for elt in vals:\n mask |= (products[colname] == elt)\n\n filterMask &= mask\n\n return products[np.where(filterMask)]", "def filter(self, filters):", "def test_tag_filter(self):\n request = RequestFactory().get('/?tags=foo&tags=bar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters['tags__slug__in'], ['foo', 'bar'])", "def filter_queryset(self, queryset):\n tags = self.request.GET.getlist(\"tag\")\n if tags:\n for tag in tags:\n queryset = queryset.filter(tag__tag=tag)\n return super().filter_queryset(queryset)", "def filter_by_tag(self, tags):\n\n if isinstance(tags, string_types):\n message = \"tags should be a list or None, got tags={}\".format(tags)\n raise TypeError(message)\n\n data_collection = DataCollection()\n for item in self.iteritems():\n if tags == [] or tags == None or all([tag in item.tags for tag in tags]):\n data_collection.add_data(item)\n return data_collection", "def filter_queryset(self, request, queryset, view):\n # filter by tags if available.\n tags = request.query_params.get(\"tags\", None)\n\n if tags and isinstance(tags, six.string_types):\n tags = tags.split(\",\")\n return queryset.filter(tags__name__in=tags)\n\n return queryset", "def test_filter_remove_only_bad_products(self):\n list_of_products = [self.good_product, self.bad_product]\n self.assertEqual(\n ProductValidator().filter(list_of_products),\n [self.good_product])", "def apply(self, catalog):\n out = []\n for product in catalog:\n valid = True\n for key, filtr in self.filters.items():\n valid = valid and filtr(product[key])\n if valid:\n out.append(product)\n out = sorted(out, key=lambda x: x['onDemandUsdPrice'])\n return self.limit_size(out)", "def check_intersection(product_list, product):\n best_products = []\n if len(product_list) > 0:\n\n for current_product in product_list:\n intersection_product = set(current_product.categories.all()) & set(\n product.categories.all())\n\n if len(intersection_product) >= 1:\n best_products.append(current_product)\n\n return best_products", "def filter_category_products(products, searchterm='', for_sale=None, **kwargs):\n\n searchterm = searchterm.lower()\n filtered_products = []\n for product in products:\n if searchterm not in product['name'].lower():\n continue\n if for_sale and not product['records']:\n continue\n filtered_products.append(product)\n return filtered_products", "def test_set_tag_filters(self):\n filters = QueryFilterCollection()\n\n url = \"?\"\n query_params = self.mocked_query_params(url, OCPTagView)\n handler = OCPTagQueryHandler(query_params)\n tag_keys = handler.get_tag_keys(filters=False)\n\n filter_key = tag_keys[0]\n\n filter_value = \"filter\"\n group_by_key = tag_keys[1]\n\n group_by_value = \"group_By\"\n\n url = f\"?filter[tag:{filter_key}]={filter_value}&group_by[tag:{group_by_key}]={group_by_value}\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n filters = handler._set_tag_filters(filters)\n\n expected = f\"\"\"<class 'api.query_filter.QueryFilterCollection'>: (AND: ('pod_labels__{filter_key}__icontains', '{filter_value}')), (AND: ('pod_labels__{group_by_key}__icontains', '{group_by_value}')), \"\"\" # noqa: E501\n\n self.assertEqual(repr(filters), expected)", "def clean(self, products):\n clean_products = []\n clean_categories = set()\n for product in products:\n if self.is_valid(product):\n product[\"categories\"] = [\n cat.strip().lower().capitalize()\n for cat in product[\"categories\"].split(\",\")\n ]\n clean_products.append(product)\n clean_categories |= set(product[\"categories\"])\n return clean_categories, clean_products", "def test_tag_filter(self):\n request = RequestFactory().get('/?search=foobar')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertEquals(filter.qs.filters['name__icontains'], 'foobar')\n self.assertEquals(filter.qs.filters['status__startswith'], 'foobar')", "def filter(\n self, items: Iterable[Product], spec: Specification\n ) -> Generator[Product, None, None]:\n return (item for item in items if spec.is_satisfied(item))", "def _fuzzy_products(self, package: ImagePackage) -> typing.List[str]:\n\n products = {package.name}\n # TODO: add the generic product generation code (including nomatch exclusions here)\n return list(products)", "def call(self):\r\n clean_products = []\r\n\r\n for category in CATEGORIES:\r\n print(f\"Chargement des produits de type {category}\")\r\n api_url = SEARCH_API_URL + \\\r\n (f\"?search_terms={category}\"\r\n \"&search_tag=category&sort_by=unique_scans_n\"\r\n \"&page_size=1000&json=1\")\r\n json_response = requests.get(api_url).json()\r\n products = json_response[\"products\"]\r\n\r\n for product in products:\r\n clean_product = {\r\n k: v for k, v in product.items()\r\n if k in FIELD_NEEDED and v != ''}\r\n clean_products.append(clean_product)\r\n\r\n return clean_products", "def filter(self, filters:list)->list:\n for item in self.list:\n use_item = True\n for filter in filters:\n filter_key, filter_value, filter_type = filter\n if filter_type == \"<\" and item[filter_key] >= filter_value:\n use_item = False\n break\n elif filter_type == \">\" and item[filter_key] <= filter_value:\n use_item = False\n break\n elif filter_type == \"<=\" and item[filter_key] > filter_value:\n use_item = False\n break\n elif filter_type == \">=\" and item[filter_key] < filter_value:\n use_item = False\n break\n elif filter_type == \"=\" and not item[filter_key] == filter_value:\n use_item = False\n break\n if use_item:\n yield item", "def filter(self, artifacts: ArtifactsList) -> ArtifactsList:\n print(self.my_param)\n return artifacts", "def filter_products(self, products: list):\n # Get all products that has no parent\n independent = [product for product in products if product[\"parent_id\"] is None]\n # Get all products that has parent\n dependent = [\n product for product in products if product[\"parent_id\"] is not None\n ]\n # Sort dependent products by parent_id, so that a child will be always\n # inserted after the parent\n dependent = sorted(dependent, key=lambda item: item[\"parent_id\"])\n # Saves the total of objects\n self.total = len(independent) + len(dependent)\n return independent, dependent", "def filter_by_color(self, products, color):\n for p in products:\n # Selects products of the specified color\n if p.color == color:\n yield p", "def get_all_products(request, *args, query_str=''):\n\n active_filters = []\n products = Product.objects.all()\n product_fields = (\n (\"size\", \"options\"),\n (\"price\", \"range\"),\n (\"colours\", \"options\"),\n (\"year\", \"range\"),\n (\"collection\", \"equals\")\n )\n field_ranges = []\n for field, filter_type in product_fields:\n if filter_type == \"range\":\n (min_val) = products.filter().values_list(field).order_by(field)[0]\n (max_val) = products.filter().values_list(field).order_by\\\n (f'-{field}')[0]\n obj = {}\n obj['min_val'] = int(min_val[0])\n obj['max_val'] = int(max_val[0])\n obj['field'] = field\n field_ranges.append(obj)\n\n # if filter_type == \"options\":\n\n\n\n if request.GET:\n for key in request.GET:\n if \"__range\" in key:\n val = request.GET.getlist(key)\n val[:] = [int(x) for x in val]\n active_filters.append(\n [key.split(\"__\")[0], key.split(\"__\")[1], val]\n )\n obj = {}\n obj[key] = val\n query = Q(**obj)\n products = products.filter(query)\n\n\n if 'collection' in request.GET:\n collection_pk = request.GET['collection']\n if not collection_pk or not collection_pk.isnumeric():\n if query:\n return redirect(\n reverse('products'),\n kwargs={'query_str': query}\n )\n else:\n return redirect(reverse('products'))\n\n products = products.filter(collection=collection_pk)\n\n if 'q' in request.GET:\n query = request.GET['q']\n query_str = query\n if not query:\n return redirect(reverse('products'))\n\n queries = Q(display_name__icontains=query) | \\\n Q(name__icontains=query)\n products = products.filter(queries)\n\n\n context = {\n 'products': products,\n 'MEDIA_URL': settings.MEDIA_URL,\n 'search_term': query_str,\n 'filters': product_fields,\n 'field_ranges': field_ranges,\n 'active_filters': active_filters\n }\n\n return render(request, 'products/products.html', context)", "def _apply_filters(self, metadata):\n if \"keywords\" in self.filters:\n if not metadata.keywords:\n return False\n if not all(keyword in metadata.keywords for keyword in self.filters[\"keywords\"]):\n return False\n if \"features\" in self.filters:\n if not metadata.features:\n return False\n if not all(feature in metadata.features for feature in self.filters[\"features\"]):\n return False\n if \"authors\" in self.filters:\n if not metadata.authors:\n return False\n if not all(author in metadata.authors for author in self.filters[\"authors\"]):\n return False\n if \"version\" in self.filters:\n if not metadata.pylith_version:\n return False\n for verMeta in metadata.pylith_version:\n if not eval(\"{ver} {verMeta}\".format(ver=self.filters[\"version\"], verMeta=verMeta)):\n return False\n return True", "def filter(self, *args, **kwargs):", "def filterPick(list, filter, classification):\n y = []\n for job in list:\n x = [(job, classification) for l in job for m in (filter(l),) if m]\n y.append(x)\n return y", "def type_filter(self, items, types=None):", "def extract(self, filter_by='relevance', all_pages=False, limit=None):\r\n\r\n page = 1\r\n total_of_pages = 1\r\n products_list = []\r\n\r\n while page <= total_of_pages:\r\n\r\n products_code = None\r\n while products_code is None:\r\n soup = self.__olx_requests(filter_by, page)\r\n products_code = soup.find('div', {'class': \"sc-1fcmfeb-0 WQhDk\"})\r\n\r\n if page == 1 and all_pages is True and limit is None:\r\n max_pages = self.__number_of_pages(soup)\r\n if max_pages is not None:\r\n total_of_pages = max_pages\r\n\r\n elif page == 1 and all_pages is False and isinstance(limit, int):\r\n max_pages = self.__number_of_pages(soup)\r\n if max_pages is not None and limit <= max_pages:\r\n total_of_pages = limit\r\n else:\r\n total_of_pages = 1\r\n\r\n # Individual product - TAG\r\n for tags_products in [\"sc-1fcmfeb-2 ggOGTJ\", \"sc-1fcmfeb-2 hFOgZc\"]:\r\n for each_product in products_code.findAll('li', {'class': tags_products}):\r\n\r\n # Each product dictionary\r\n product_dict = {}\r\n\r\n # Name\r\n PRODUCT_NAME_TAG = 'fnmrjs-8 kRlFBv'\r\n if each_product.find('div', {'class': PRODUCT_NAME_TAG}) is not None:\r\n product_name = each_product.find('div', {'class': PRODUCT_NAME_TAG}).text\r\n if 'Anunciante online' in product_name:\r\n product_name = product_name.replace('Anunciante online', '')\r\n product_dict['Name'] = product_name\r\n else:\r\n continue\r\n\r\n # ID\r\n product_id = each_product.find('a', {'data-lurker-detail': 'list_id'})['data-lurker_list_id']\r\n product_dict['ID'] = product_id\r\n\r\n # Image\r\n PRODUCT_IMAGE_TAG = 'fnmrjs-5 jksoiN'\r\n product_img = each_product.find('div', {'class': PRODUCT_IMAGE_TAG}).find('img')['src']\r\n product_dict['Image'] = product_img\r\n\r\n # Price\r\n PRODUCT_PRICE_TAG = 'fnmrjs-15 clbSMi'\r\n if each_product.find('div', {'class': PRODUCT_PRICE_TAG}).text:\r\n product_price = each_product.find('div', {'class': PRODUCT_PRICE_TAG}).text\r\n product_dict['Price'] = re.findall('R\\$ (\\d*,?\\.?\\d*)|$', product_price)[0].replace('.', '')\r\n else:\r\n product_dict['Price'] = '-'\r\n\r\n # Date\r\n PRODUCT_DATE_TAG = 'fnmrjs-18 gMKELN'\r\n product_date = each_product.find('div', {'class': PRODUCT_DATE_TAG}).text\r\n if 'Hoje' in product_date:\r\n product_date = product_date.replace('Hoje', datetime.date.today().strftime(\"%d/%m \"))\r\n product_dict['Date'] = product_date\r\n\r\n elif 'Ontem' in product_date:\r\n product_date = product_date.replace('Ontem', (datetime.date.today() - datetime.timedelta(days=1)).strftime(\"%d/%m \"))\r\n product_dict['Date'] = product_date\r\n\r\n else:\r\n product_date = product_date.replace(re.findall(' ([a-z]*)\\d*', product_date)[0], months[re.findall(' ([a-z]*)\\d*', product_date)[0]]).replace(' ', r'/', 1)\r\n product_dict['Date'] = product_date\r\n\r\n # Location\r\n PRODUCT_LOCATION_PARENT_TAG = 'fnmrjs-21 bktOWr'\r\n PRODUCT_LOCATION_CHILD_TAG = 'fnmrjs-13 hdwqVC'\r\n product_location = each_product.find('div', {'class': PRODUCT_LOCATION_PARENT_TAG}).find('p', {'class': PRODUCT_LOCATION_CHILD_TAG}).text\r\n product_dict['City'] = re.findall('(.*\\w*),|$', product_location)[0]\r\n product_dict['Neighborhood'] = re.findall(r',(.*\\w*) - |$', product_location)[0].strip()\r\n product_dict['State'] = states_ddd[re.findall(r'DDD (\\d*)|$', product_location)[0]]\r\n\r\n # Link\r\n product_link = each_product.find('a', {'data-lurker-detail': 'list_id'})['href']\r\n product_dict['Link'] = product_link\r\n\r\n # List of Products\r\n products_list.append(product_dict)\r\n\r\n # Next page\r\n page += 1\r\n\r\n return products_list", "def order_filter(self,elements):", "def get_queryset(self):\n tags = self.request.query_params.get('tags')\n ingredient = self.request.query_params.get('ingredient')\n queryset = self.queryset\n if tags:\n tags_id = self._params_to_int(tags)\n queryset = queryset.filter(tags__id__in =tags_id)\n if ingredient:\n ingredient_id = self._params_to_int(ingredient)\n queryset = queryset.filter(ingredient__id__in = ingredient_id)\n\n return queryset.filter(user = self.request.user)", "def get_queryset(self):\n tags = self.request.query_params.get('tags')\n categories = self.request.query_params.get('categories')\n user = self.request.query_params.get('user')\n queryset = self.queryset\n\n if tags:\n tags_title = self._params(tags)\n queryset = queryset.filter(tags__title__in=tags_title)\n\n if categories:\n categories_title = self._params(categories)\n queryset = queryset.filter(categories__title__in=categories_title)\n\n if user:\n user_id = self._params_to_ints(user)\n queryset = queryset.filter(user__id__in=user_id)\n return queryset", "def testFiltrerTag(self):\n\t\ttags = (\n\t\t\t \t('in,', 'in'),\n\t\t\t \t('casse-tete', 'casse-tete'),\n\t\t\t \t)\n\t\t\n\t\tf = Flickr()\n\t\tfor tag, tag_filter in tags:\n\t\t\tresult = f.filtrer_tag(tag)\n\t\t\tself.assertEqual(result, tag_filter)", "def FilterToKnownTags(self, tags: Iterable[str]) -> Set[str]:\n return self._GetKnownTags() & set(tags)", "def filter_for_tags(cls, image: np.ndarray, tags: Set[Tag]) -> np.ndarray:\n masks = []\n for tag in tags:\n color = tag.color;\n red_mask = image[:, :, 0] == color[0]\n green_mask = image[:, :, 1] == color[1]\n blue_mask = image[:, :, 2] == color[2]\n masks.append(np.logical_and(red_mask, green_mask, blue_mask))\n\n final_mask = np.logical_not(np.logical_or.reduce(masks))\n image[final_mask] = 255\n return image", "def filter_by_size(self, products, size):\n for p in products:\n # Selects products of the specified size\n if p.size == size:\n yield p", "def queryset(self, request, queryset):\n for tag in get_resource_tags():\n if self.value() == tag[0]:\n return queryset.filter(tags__slug__iexact=tag[0])", "def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))", "def sort_products(param: str, querySet: QuerySet):\n products_list = list(querySet)\n products_list.sort(\n key=lambda product: (-product.calculate_score(param), product.price)\n )\n return products_list", "def filter_all(_):\n return True", "def is_matching_product_with_tags(product, lat, lng, radius, tags):\n return vincenty(\n (lat, lng),\n (product.shop.lat, product.shop.lng)\n ).meters <= radius and any(tag in product.shop.tags for tag in tags)", "def test_filter_recipe_by_tags(self):\n recipe1 = sample_recipe(user=self.user,title='chicken curry')\n recipe2 = sample_recipe(user=self.user,title='mutton curry')\n recipe3 = sample_recipe(user=self.user,title='milk dish')\n tag1 = sample_tag(user=self.user,name='chicken')\n tag2 = sample_tag(user=self.user,name='mutton')\n recipe1.tag.add(tag1)\n recipe2.tag.add(tag2)\n\n res = self.client.get(RECIPE_URL,{'tag':f'{tag1.id},{tag2.id}'})\n s1 = RecipeSerializer(recipe1)\n s2 = RecipeSerializer(recipe2)\n s3 = RecipeSerializer(recipe3)\n self.assertIn(s1.data,res.data)\n self.assertIn(s2.data,res.data)\n self.assertNotIn(s3.data,res.data)", "def get_all_products(self):\n\t\tpass", "def test_filter_reecipe_by_tag(self):\n recipe1 = sample_recipe(user=self.user, title=\"fish curry\")\n recipe2 = sample_recipe(user=self.user, title='mutton curry')\n\n tag1 = sample_tag(user=self.user, name='fish')\n tag2 = sample_tag(user=self.user, name='mutton')\n\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag2)\n recipe3 = sample_recipe(user = self.user, title = 'Chicken curry')\n\n res = self.client.get(RECIPE_URL,\n {'tags' : f'{tag1.id},{tag2.id}'})\n seralizer1 = Recipeserializer(recipe1)\n seralizer2 = Recipeserializer(recipe2)\n seralizer3 = Recipeserializer(recipe3)\n self.assertIn(seralizer1.data, res.data)\n self.assertIn(seralizer2.data, res.data)\n self.assertNotIn(seralizer3.data, res.data)", "def filter_stacks(stacks) -> dict:\n if not app_config.TAGS:\n return stacks\n keep = list()\n for stack in stacks[\"Stacks\"]:\n for tag in stack[\"Tags\"]:\n for filter_tag in app_config.TAGS:\n if filter_tag in tag.values():\n keep.append(stack)\n stacks[\"Stacks\"] = keep\n return stacks", "def test_no_op(self):\n request = RequestFactory().get('/?tags=')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertNotIn('tags__slug__in', filter.qs.filters)", "def search_by_product(self, **query):\n for product, datasets in self._do_search_by_product(query):\n yield product, self._make_many(datasets, product)", "def filter(self, *args):\n return _libsbml.ElementFilter_filter(self, *args)", "def by_tags(self, tags):\n if tags:\n self._filters.append(\n models.Note.tags.any(models.Tag.id.in_(tags)),\n )\n return self", "def get_filters(self):", "def doFiltering(self, searchfunc, filters=None):\n F=[]\n for f in self.filters:\n F.append(f.getFilter())\n #print F\n sets = []\n for f in F:\n col, val, op, boolean = f\n names = searchfunc(col, val, op)\n sets.append((set(names), boolean))\n names = sets[0][0]\n for s in sets[1:]:\n b=s[1]\n if b == 'AND':\n names = names & s[0]\n elif b == 'OR':\n names = names | s[0]\n elif b == 'NOT':\n names = names - s[0]\n names = list(names)\n self.updateResults(len(names))\n return names", "def test_list_products_filtered_by_category(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?category=1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 2')", "def filter_scalings(scaling_list, scaling_type):\n return filter(\n lambda _f: True if scaling_type in _f[\"runname\"] else False,\n scaling_list)", "def filter_and(filters):\n def filt(item):\n for f in filters:\n if not f(item):\n return False\n return True\n return filt", "def filter_by_param(self, pars):\n\n assert(type(pars) == dict)\n for par, vals in pars.items():\n vals = [type(self.sim_confs[0][par])(v) for v in vals]\n self.sim_confs = [conf for conf in self.sim_confs if conf[par] in vals]\n groups = []\n for group in self.sim_groups:\n filt_group = [conf for conf in group if conf[par] in vals]\n groups.append(filt_group)\n self.sim_groups = groups\n assert(len(self.sim_confs) >= 1)\n return self.sim_confs, self.sim_groups", "def filter_element_and(mt_list, elem_list):\r\n return [mt for mt in mt_list if all(e in mt['pretty_formula'] for e in elem_list)]", "def FilterItems(self):\r\n\t\treturn self._get_attribute('filterItems')", "def itemFilter(*args, byBin: Union[AnyStr, List[AnyStr], bool]=\"\", byName: Union[AnyStr,\n bool]=\"\", byScript: Union[AnyStr, bool]=\"\", byType: Union[AnyStr, List[AnyStr],\n bool]=\"\", category: Union[AnyStr, List[AnyStr], bool]=\"\", classification:\n Union[AnyStr, bool]=\"\", clearByBin: bool=True, clearByType: bool=True,\n difference: Union[List[AnyStr, AnyStr], bool]=None, exists: bool=True,\n intersect: Union[List[AnyStr, AnyStr], bool]=None, listBuiltInFilters: bool=True,\n listOtherFilters: bool=True, listUserFilters: bool=True, negate: bool=True,\n parent: Union[AnyStr, bool]=\"\", pythonModule: Union[AnyStr, bool]=\"\",\n secondScript: Union[AnyStr, bool]=\"\", text: Union[AnyStr, bool]=\"\", union:\n Union[List[AnyStr, AnyStr], bool]=None, uniqueNodeNames: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def filter(self, included_suites=None, included_tests=None,\n included_tags=None, excluded_tags=None):\n self.visit(Filter(included_suites, included_tests,\n included_tags, excluded_tags))", "def tag_filters(self) -> typing.Optional[typing.Mapping[str,typing.Any]]:\n return self._values.get('tag_filters')", "def tag_filters(self) -> typing.Optional[typing.Mapping[str,typing.Any]]:\n return self._values.get('tag_filters')", "def test_list_products_filtered_by_keyword(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?name=1')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 1')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 1')", "def filter(self, filter_dict):\n pass", "def test_filter_recipes_by_tags(self):\n recipe1 = sample_recipe(user=self.user, title='Vegetable Curry')\n recipe2 = sample_recipe(user=self.user, title='blaaaa')\n tag1 = sample_tag(user=self.user, name='Vegan')\n tag2 = sample_tag(user=self.user, name='Vegetarian')\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag2)\n recipe3 = sample_recipe(user=self.user, title='fish & chips')\n\n res = self.client.get(\n RECIPE_URL,\n {'tags': f'{tag1.id}, {tag2.id}'} # the way we designed our function in views.py to filter by tags is by passing a get parameter with comma separated string of the tags ids we wanna filter by\n )\n\n serializer1 = RecipeSerializer(recipe1)\n serializer2 = RecipeSerializer(recipe2)\n serializer3 = RecipeSerializer(recipe3)\n\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n self.assertNotIn(serializer3.data, res.data) # since recipe3 has no tag", "def filter_tag(tags=None):\n tagdict = defaultdict(list)\n Besarkecil = lambda f: ' '.join(re.findall('[A-Z][^A-Z]*', f))\n for obj in list(tags):\n if len(obj.split(':')) == 2:\n k, v = obj.split(':')\n # filtering key Besarkecil, lowercase\n k = str(Besarkecil(k)).lower()\n # print(k)\n if k in ['cari', 'jadwal', 'keberangkatan', 'maskapai', 'type', 'ibadah', 'jumlah hari', 'rute', 'tour']:\n res = re.findall(r\"(^[A-Z][^A-Z]+)|([^\\W\\d_]+|[\\d+]+)\", v)\n arres = []\n for resple in res:\n arres.append(filter(None, resple)[0])\n # print([e for e in resple])\n # print(' '.join(arres))\n tagdict[k].append(' '.join(arres))\n return tagdict", "def get_tags_for_tags_and_malware_family_fields(tags: Optional[list], is_malware_family=False):\n if not tags:\n return None\n results = []\n for item in tags:\n results.append(item.get('tag_name'))\n results.append(item.get('public_tag_name'))\n for alias in item.get('aliases', []):\n results.append(alias)\n if not is_malware_family:\n for group in item.get('tagGroups', [{}]):\n results.append(group.get('tag_group_name'))\n # Returns a list without duplicates and empty elements\n return list(set(filter(None, results)))", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"filters\")", "def post_process(klass, results, unsafe_filters):\n logging.info(u'DatastoreModel.post_process() handled unsafe filters:')\n logging.info(u'{}'.format(unsafe_filters))\n all_matching_sets = []\n for k, v in unsafe_filters.items():\n matches = set([e for e in results if getattr(e, k) in v])\n all_matching_sets.append(matches)\n return set.intersection(*all_matching_sets)", "def dgfilter(*args, attribute: AnyStr=\"\", list: bool=True, logicalAnd: List[AnyStr,\n AnyStr]=None, logicalNot: AnyStr=\"\", logicalOr: List[AnyStr, AnyStr]=None, name:\n AnyStr=\"\", node: AnyStr=\"\", nodeType: AnyStr=\"\", plug: AnyStr=\"\",\n **kwargs)->AnyStr:\n pass", "def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]", "def test_filter_mixed_function(self):\n for none_type in (False, True):\n for all_type in (False, True):\n for any_type in (False, True, None):\n result = none_type is False and all_type is True \\\n and (any_type is None or any_type is True)\n self._test_filter(none_type, all_type, any_type, result)", "def test_filter_recipes_by_tags(self):\n recipe1 = sample_reteta(user=self.user, title='Thai vegetable curry')\n recipe2 = sample_reteta(user=self.user, title='Aubergine with tahini')\n tag1 = sample_tag(user=self.user, name='Vegan')\n tag2 = sample_tag(user=self.user, name='Vegetarian')\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag2)\n # recipe3 = sample_reteta(user=self.user, title='Fasole si carnati')\n\n res = self.client.get(\n RETETA_URL,\n {'tags': f'{tag1.id},{tag2.id}'}\n )\n\n serializer1 = RetetaSerializer(recipe1)\n serializer2 = RetetaSerializer(recipe2)\n # serializer3 = RetetaSerializer(recipe3)\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n # self.assertNotIn(serializer3.data, res.data)", "def filter_tag(tags=None):\n tagdict = defaultdict(list)\n Besarkecil = lambda f: ' '.join(re.findall('[A-Z][^A-Z]*', f))\n for obj in list(tags):\n if len(obj.split(':')) == 2:\n k, v = obj.split(':')\n # filtering key Besarkecil, lowercase\n k = str(Besarkecil(k)).lower()\n # print(k)\n if k in ['cari', 'jadwal', 'keberangkatan', 'maskapai', 'type', 'ibadah', 'jumlah hari', 'rute',\n 'tour']:\n res = re.findall(r\"(^[A-Z][^A-Z]+)|([^\\W\\d_]+|[\\d+]+)\", v)\n arres = []\n for resple in res:\n arres.append(filter(None, resple)[0])\n # print([e for e in resple])\n # print(' '.join(arres))\n tagdict[k].append(' '.join(arres))\n return tagdict", "def test_filter_services_by_tag(self):\n\n service1 = sample_services(user=self.user, title='Mechanical Work 1')\n service2 = sample_services(user=self.user, title='Mechanical Work 2')\n service3 = sample_services(user=self.user, title='Mechanical Work 3')\n\n tag1 = sample_tag(user=self.user, name='Mech1')\n tag2 = sample_tag(user=self.user, name='Mech2')\n tag3 = sample_tag(user=self.user, name='mech3')\n\n service1.tags.add(tag1)\n service2.tags.add(tag2)\n service3.tags.add(tag3)\n\n res = self.client.get(\n SERVICES_URL,\n {'tags': f'{tag1.id},{tag2.id},{tag3.id}'}\n )\n\n serializer1 = ServiceSerializer(service1)\n serializer2 = ServiceSerializer(service2)\n serializer3 = ServiceSerializer(service3)\n\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n self.assertNotIn(serializer3.data, res.data)", "def filter_queryset(self, queryset):\n \n queryset = super().filter_queryset(queryset)\n\n # List of StockItem objects to match against\n items = self.get_items()\n\n # We wish to filter by stock items\n if len(items) > 0:\n \"\"\"\n At this point, we are basically forced to be inefficient,\n as we need to compare the 'filters' string of each label,\n and see if it matches against each of the requested items.\n\n TODO: In the future, if this becomes excessively slow, it\n will need to be readdressed.\n \"\"\"\n\n # Keep track of which labels match every specified stockitem\n valid_label_ids = set()\n \n for label in queryset.all():\n\n matches = True\n\n # Filter string defined for the StockItemLabel object\n try:\n filters = InvenTree.helpers.validateFilterString(label.filters)\n except ValidationError:\n continue\n\n for item in items:\n\n item_query = StockItem.objects.filter(pk=item.pk)\n\n try:\n if not item_query.filter(**filters).exists():\n matches = False\n break\n except FieldError:\n matches = False\n break\n\n # Matched all items\n if matches:\n valid_label_ids.add(label.pk)\n else:\n continue\n\n # Reduce queryset to only valid matches\n queryset = queryset.filter(pk__in=[pk for pk in valid_label_ids])\n\n return queryset", "def apply_feature_filter(self):\n self.features = set()\n for language in self.data.values():\n features_in_data = set(language.keys())\n features_to_keep = features_in_data & self.feature_filter\n self.features |= features_to_keep\n features_to_remove = features_in_data - features_to_keep\n for feat in features_to_remove:\n language.pop(feat)\n self.features = sorted(list(self.features))", "def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]", "def filter_entries_by_tag(search, entry) -> bool:\n tags = entry.tags\n search_words = search.strip().translate(str.maketrans(\"&|\", \" \")).split()\n\n if \"&\" in search:\n search_type = \"AND\"\n else:\n search_type = \"OR\"\n\n for word in search_words:\n if word.lower() in tags:\n if search_type == \"OR\":\n return True\n\n elif search_type == \"AND\":\n return False\n\n if search_type == \"OR\":\n return False\n\n else:\n return True", "def test_filter_multiple(self):\n self.es.register_filter(foo=False, bar='baz')\n self.assertFalse(self.es.streamfilter(self.data))\n self.es.filter = {'all': [], 'any': [], 'none': []}\n self.es.register_filter(foo=True, bar='baz')\n self.assertTrue(self.es.streamfilter(self.data))\n # check whether filter functions are different\n f, g = self.es.filter['all']\n c = {'foo': True}\n self.assertNotEqual(f(c), g(c))\n c = {'bar': 'baz'}\n self.assertNotEqual(f(c), g(c))", "def filter(\n self,\n surfaces: Union[list, int] = None,\n volumes: Union[list, int] = None,\n fields: Union[list, str] = None,\n instances: DerivedQuantity = None,\n ):\n # ensure arguments are list\n if surfaces is not None and not isinstance(surfaces, list):\n surfaces = [surfaces]\n if volumes is not None and not isinstance(volumes, list):\n volumes = [volumes]\n if fields is not None and not isinstance(fields, list):\n fields = [fields]\n if instances is not None and not isinstance(instances, list):\n instances = [instances]\n\n quantities = []\n\n # iterate through derived_quantities\n for quantity in self.derived_quantities:\n # initialise flags to False\n match_surface, match_volume, match_field, match_instance = (\n False,\n False,\n False,\n False,\n )\n\n # check if matches surface\n if surfaces is not None:\n if hasattr(quantity, \"surface\") and quantity.surface in surfaces:\n match_surface = True\n else:\n match_surface = True\n\n # check if matches volume\n if volumes is not None:\n if hasattr(quantity, \"volume\") and quantity.volume in volumes:\n match_volume = True\n else:\n match_volume = True\n\n # check if matches field\n if fields is not None:\n if quantity.field in fields:\n match_field = True\n else:\n match_field = True\n\n # check if matches instance\n if instances is not None:\n if isinstance(quantity, tuple(instances)):\n match_instance = True\n else:\n match_instance = True\n\n # if all flags are True, append to the list\n if match_surface and match_volume and match_field and match_instance:\n quantities.append(quantity)\n\n if len(quantities) == 1:\n quantities = quantities[0]\n return quantities", "def filter(self, *args, **kwargs):\n self._not_support_combined_queries(\"filter\")\n return self._filter_or_exclude(False, args, kwargs)", "def add_products(self, products):\n return [self.add_product(product) for product in set(products)]", "def filter_queryset(self, queryset, view=None):\n queryset = super().filter_queryset(queryset.only(\"id\", \"shared\"))\n form_pk = self.kwargs.get(self.lookup_field)\n\n if form_pk:\n try:\n int(form_pk)\n except ValueError as e:\n if form_pk == self.public_data_endpoint:\n queryset = self._get_public_forms_queryset()\n else:\n raise ParseError(_(f\"Invalid pk {form_pk}\")) from e\n else:\n queryset = self._filtered_or_shared_queryset(queryset, form_pk)\n else:\n tags = self.request.query_params.get(\"tags\")\n not_tagged = self.request.query_params.get(\"not_tagged\")\n\n if tags and isinstance(tags, six.string_types):\n tags = tags.split(\",\")\n queryset = queryset.filter(tags__name__in=tags)\n if not_tagged and isinstance(not_tagged, six.string_types):\n not_tagged = not_tagged.split(\",\")\n queryset = queryset.exclude(tags__name__in=not_tagged)\n\n return queryset", "def test_filter_recipe_by_tag(self):\n tag1 = sample_tag(self.user, name='Indian')\n tag2 = sample_tag(self.user, name='Breakfast')\n recipe1 = sample_recipe(self.user, title='Curry')\n recipe2 = sample_recipe(self.user, title=\"bacon pie\")\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag2)\n recipe3 = sample_recipe(self.user)\n\n res = self.client.get(\n RECIPE_URL,\n {'tags': f'{tag1.id},{tag2.id}'}\n )\n serializer1 = RecipeSerializer(recipe1)\n serializer2 = RecipeSerializer(recipe2)\n serializer3 = RecipeSerializer(recipe3)\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n self.assertNotIn(serializer3.data, res.data)", "def test_get_project_list_with_tag_filter(self):\n # Add test projects.\n tag = 'tag1'\n projects_with_tag = [\n add_project(title='1', description='1', tags=[tag]),\n add_project(title='2', description='2', tags=[tag]),\n ]\n project_without_tag = add_project(title='3', description='3', tags=[])\n\n result = get_project_list(tag=tag)\n result_projects = result['projects'].object_list\n\n # Make sure only projects with tag are retrieved.\n for project_with_tag in projects_with_tag:\n self.assertTrue(project_with_tag in result_projects)\n self.assertFalse(project_without_tag in result_projects)\n self.assertEqual(len(result_projects), len(projects_with_tag))\n self.assertTrue(result['filtered'])\n self.assertEqual(result['tag'], tag)", "def filter_queryset(self, queryset):\n\n queryset = super().filter_queryset(queryset)\n\n items = self.get_items()\n\n if len(items) > 0:\n \"\"\"At this point, we are basically forced to be inefficient:\n\n We need to compare the 'filters' string of each report template,\n and see if it matches against each of the requested items.\n\n In practice, this is not too bad.\n \"\"\"\n\n valid_report_ids = set()\n\n for report in queryset.all():\n matches = True\n\n try:\n filters = InvenTree.helpers.validateFilterString(report.filters)\n except ValidationError:\n continue\n\n for item in items:\n item_query = self.ITEM_MODEL.objects.filter(pk=item.pk)\n\n try:\n if not item_query.filter(**filters).exists():\n matches = False\n break\n except FieldError:\n matches = False\n break\n\n # Matched all items\n if matches:\n valid_report_ids.add(report.pk)\n\n # Reduce queryset to only valid matches\n queryset = queryset.filter(pk__in=list(valid_report_ids))\n\n return queryset", "def filter(request):\n product = Product.objects.filter(name__icontains=request.GET['q']).filter(brand__icontains=request.GET['brand']).filter(year__icontains=request.GET['year'])\n return render(request, \"search_results.html\", {\"products\": product})", "def _getWhereClause(self, tagIDs):\n objectIDs = self._criteria.get('objectIDs')\n where = [TagValue.tagID == Tag.id]\n if objectIDs:\n where.append(TagValue.objectID.is_in(objectIDs))\n if tagIDs:\n where.append(Tag.id.is_in(tagIDs))\n createdBeforeTime = self._criteria.get('createdBeforeTime')\n if createdBeforeTime:\n where.append(TagValue.creationTime < createdBeforeTime)\n return where", "def filter_or(filters):\n def filt(item):\n for f in filters:\n if f(item):\n return True\n return False\n return filt", "def tags(catalog,lista,tag):\n final=lt.newList(datastructure='ARRAY_LIST')\n i=it.newIterator(lista)\n while it.hasNext(i):\n vid=it.next(i)\n if tag in vid['tags']:\n lt.addLast(final,vid)\n return final", "def filter_queryset(self, queryset):\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n # assert isinstance(queryset, models.QuerySet), \\\n # \"Expected '%s.%s' to return a QuerySet, but got a %s instead.\" \\\n # % (type(self).__name__, name, type(queryset).__name__)\n return queryset", "def filter(self, **args ):\n query = TXLOG.select('*')\n for key, value in args.items():\n if '__' in key:\n key, op = key.split('__')\n else:\n op = 'eq'\n\n if not key in self.schema:\n raise BadArgument(\"Key %s not a valid argument\" % key )\n\n if not isinstance(value, basestring ):\n value = str(value)\n\n query = query.where({key:value}, self.operators[op])\n\n items = query.list()\n return items", "def filter_queryset(self, queryset):\n params = self.request.query_params\n\n queryset = super().filter_queryset(queryset)\n\n # Filter by 'build'\n build = params.get('build', None)\n\n if build is not None:\n\n try:\n build = Build.objects.get(pk=build)\n\n queryset = queryset.filter(stock_item__build=build)\n\n except (ValueError, Build.DoesNotExist):\n pass\n\n # Filter by stock item\n item = params.get('stock_item', None)\n\n if item is not None:\n try:\n item = StockItem.objects.get(pk=item)\n\n items = [item]\n\n # Do we wish to also include test results for 'installed' items?\n include_installed = str2bool(params.get('include_installed', False))\n\n if include_installed:\n # Include items which are installed \"underneath\" this item\n # Note that this function is recursive!\n installed_items = item.get_installed_items(cascade=True)\n\n items += list(installed_items)\n\n queryset = queryset.filter(stock_item__in=items)\n\n except (ValueError, StockItem.DoesNotExist):\n pass\n\n return queryset", "def _filter(self, probs: Tensor, ids: Tensor) -> Tuple[Tensor, List[int]]:\n raise NotImplementedError", "def test_filtering_recipes_with_tags(self):\n recipe1 = create_sample_recipe(user=self.user, title=\"Chicken tikka\")\n recipe2 = create_sample_recipe(user=self.user, title=\"Italian pasta\")\n recipe3 = create_sample_recipe(user=self.user, title=\"Vegan dessert\")\n\n tag1 = create_sample_tag(user=self.user, name=\"Grill\")\n tag2 = create_sample_tag(user=self.user, name=\"Light food\")\n\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag2)\n\n res = self.client.get( RECIPE_URL, {\"tags\": f\"{tag1.id}, {tag2.id}\"})\n\n serializer1 = RecipeSerializer(recipe1)\n serializer2 = RecipeSerializer(recipe2)\n serializer3 = RecipeSerializer(recipe3)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n self.assertNotIn(serializer3.data, res.data)", "def get_visible_products(self):\r\n all_products = billing.loading.get_products(hidden=True)\r\n public_products = billing.loading.get_products()\r\n subscribed_product_types = ProductType.objects \\\r\n .filter(subscriptions__billing_account=self) \\\r\n .distinct()\r\n subscribed_products = set(pt.get_product_class() for pt in subscribed_product_types)\r\n visible_products = set(public_products).union(subscribed_products)\r\n return [p for p in all_products if p in visible_products]", "def search(self, cr, user, args, offset=0, limit=None, order=None,\n context=None, count=False):\n prod_obj = self.pool['product.product']\n op_map = {'=': 'in', '!=': 'not in', '<>': 'not in',\n 'in': 'in', 'not in': 'not in'}\n #TODO - support between combination of <,<= and >, >=\n args2 = args[:]\n arg_offset = 0\n res = False\n for idx, arg in enumerate(args2):\n if arg[0] == 'product_id':\n product_ids = prod_obj.search(cr, user,\n [('id', arg[1], arg[2]),\n ('is_multi_variants', '=', True)],\n context=context)\n tmpl_ids = [prod.product_tmpl_id.id for prod in\n prod_obj.browse(cr, user, product_ids,\n context=context)]\n if tmpl_ids:\n prod_ids = prod_obj.search(cr, user,\n [('product_tmpl_id', 'in',\n tmpl_ids)])\n operator = op_map.get(arg[1], 'in')\n if idx > 0 and args2[idx - 1] == '!':\n operator = operator == 'in' and 'not in' or 'in'\n extra_args = ['|', '&',\n ('bom_template', '=', True),\n ('product_id', operator, prod_ids)]\n args = (args[:idx + arg_offset] + extra_args +\n args[idx + arg_offset:])\n arg_offset += len(extra_args)\n try:\n res = super(BomTemplate, self).search(cr, user, args, offset=offset,\n limit=limit, order=order,\n context=context, count=count)\n except:\n _logger.exception('Extended search failed for MRP BoM with domain '\n '%s. Performing standard search with %s' %\n (args, args2))\n if not res:\n res = super(BomTemplate, self).search(cr, user, args2, offset=offset,\n limit=limit, order=order,\n context=context, count=count)\n return res", "def _filter(\n self,\n data: List[dict],\n filters: List[Tuple[str, SupportedFilterType]],\n state_dataclass: StateSchema,\n detail: bool,\n ) -> List[dict]:\n filters = _convert_filters_type(filters, state_dataclass)\n result = []\n for datum in data:\n match = True\n for filter_column, filter_predicate, filter_value in filters:\n filterable_columns = state_dataclass.filterable_columns()\n filter_column = filter_column.lower()\n if filter_column not in filterable_columns:\n raise ValueError(\n f\"The given filter column {filter_column} is not supported. \"\n f\"Supported filter columns: {filterable_columns}\"\n )\n\n if filter_column not in datum:\n match = False\n elif filter_predicate == \"=\":\n if isinstance(filter_value, str) and isinstance(\n datum[filter_column], str\n ):\n # Case insensitive match for string filter values.\n match = datum[filter_column].lower() == filter_value.lower()\n else:\n match = datum[filter_column] == filter_value\n elif filter_predicate == \"!=\":\n if isinstance(filter_value, str) and isinstance(\n datum[filter_column], str\n ):\n match = datum[filter_column].lower() != filter_value.lower()\n else:\n match = datum[filter_column] != filter_value\n else:\n raise ValueError(\n f\"Unsupported filter predicate {filter_predicate} is given. \"\n \"Available predicates: =, !=.\"\n )\n\n if not match:\n break\n\n if match:\n result.append(filter_fields(datum, state_dataclass, detail))\n return result", "def test_list_products_filtered_by_selling_status(self):\n self._require_login(self.user1)\n response = self.client.get('/api/1.0/products/?selling=3')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.__len__(), 1)\n self.assertEqual(response.data[0]['name'], 'Producto 2')\n self.assertEqual(response.data[0]['description'], 'Descripcion producto 2')", "def check_products(self, adi):\r\n results = []\r\n products = self.get_products(adi)\r\n for product in products[\"data\"][\"products\"]:\r\n print(\"Checking product '{}'... \".format(product[\"name\"]), end='')\r\n detail = self.get_product_detail(adi, product_id=product[\"productId\"], product_name=product[\"name\"])\r\n if self.rf.valid_product_detail(detail):\r\n print(\"Valid.\")\r\n result = \"Available\"\r\n else:\r\n print(\"INVALID.\")\r\n result = \"Not available\"\r\n results.append([product[\"name\"], result])\r\n return results" ]
[ "0.66341794", "0.6542159", "0.63417774", "0.6290791", "0.61444324", "0.61051065", "0.5993559", "0.5917431", "0.5875063", "0.5852951", "0.578841", "0.5734029", "0.57259727", "0.5694228", "0.5673258", "0.565935", "0.5637606", "0.56313235", "0.5576161", "0.5563928", "0.5539448", "0.552675", "0.5516025", "0.5515226", "0.5494058", "0.5465211", "0.5446835", "0.5442008", "0.5441432", "0.54033357", "0.54023135", "0.5377015", "0.53703684", "0.53654724", "0.5352439", "0.5351586", "0.5349544", "0.5341778", "0.53317755", "0.53189117", "0.53069746", "0.5306867", "0.5302087", "0.52998185", "0.52819955", "0.52810895", "0.52605677", "0.5250497", "0.5245583", "0.52449286", "0.5241019", "0.5229809", "0.52260363", "0.5207155", "0.5202673", "0.51941913", "0.5168955", "0.5166475", "0.5162977", "0.5162977", "0.5141813", "0.513734", "0.5133896", "0.51291704", "0.51181054", "0.5116665", "0.5116665", "0.51119196", "0.510481", "0.5097499", "0.50931436", "0.5089226", "0.50891185", "0.5086877", "0.50854343", "0.5072374", "0.50717646", "0.5070128", "0.5069506", "0.5066296", "0.5045647", "0.50440836", "0.50415945", "0.50388145", "0.5035905", "0.5031814", "0.50238466", "0.5023284", "0.50174785", "0.5016951", "0.5013485", "0.5003747", "0.5001859", "0.5001404", "0.49994662", "0.49992457", "0.4998505", "0.49932528", "0.49914837", "0.49890947" ]
0.6570375
1
(Product, float, float, radius) > boolean Check if the coordinates of a shop is within a radius (in meters) using the Vincenty's formulae.
def is_matching_product(product, lat, lng, radius): return vincenty( (lat, lng), (product.shop.lat, product.shop.lng) ).meters <= radius
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_matching_product_with_tags(product, lat, lng, radius, tags):\n return vincenty(\n (lat, lng),\n (product.shop.lat, product.shop.lng)\n ).meters <= radius and any(tag in product.shop.tags for tag in tags)", "def __contains__(self, position):\n return sum([(c1 - c2) ** 2 for (c1, c2) in zip(self.position, position)]) <= self.radius", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def __contains__(self, other):\n x, y = other\n return self.radius >= sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def contains(self, position):\n return np.linalg.norm(position - self._center) < self._radius", "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "def isInCircle(self,x1,y1,radius1):\r\n if(distance(self.x,x1,self.y,y1) < (self.radius+radius1)):\r\n return True\r\n return False", "def objects_radius(self, centre, radius):", "def checkBounds(x,y,z,center,radius):\n r2 = (x-center[0])**2 + (y-center[1])**2# + (z-center[0])**2\n if r2 < radius**2:\n return True\n else:\n return False", "def contains(self, loc): \n return loc.distance(self.center) <= self.radius", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def is_point_inside_hypersphere(point: np.array, c: List[float], r: float) -> bool:\n return np.linalg.norm(point - c) < r", "def inside(x, y, primitive):\n\n # You should implement your inside test here for all shapes\n # for now, it only returns a false test\n\n if primitive[\"shape\"] == \"circle\":\n dist_sqr = ((primitive[\"center\"][0] - x) ** 2 +\n (primitive[\"center\"][1] - y) ** 2)\n\n return dist_sqr <= primitive[\"radius\"] ** 2\n else:\n return winding_number(x, y, primitive)\n\n return False", "def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radius\n assert returned_rad != center.get_x()\n assert returned_rad != center.get_y()", "def sphere_isclose(c1, c2, *args, **kwargs):\n return np.isclose(c1.radius, c2.radius, *args, **kwargs) and np.allclose(\n c1.center, c2.center, *args, **kwargs\n )", "def containsPos(self, obst_pos, aerial_pos):\n dist_to_center = obst_pos.distanceTo(aerial_pos)\n return dist_to_center <= self.sphere_radius", "def check_point_in_detector(p, radius=radius, height=height, distance=distance):\r\n if p[0]**2 + p[1]**2 <= radius**2: # Are the x and y coordinates in the circle?\r\n if (p[2] >= distance) and (p[2] <= height+distance): # Is the z coordinate between the distance and the height?\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def are_close(coord1, coord2, tolerance=10):\n return vincenty(coord1, coord2).meters < tolerance", "def incircle(self,xpos,ypos,cellx,celly):\n xcell, ycell = self.getcellcenter(cellx,celly)\n if ((xpos - xcell)**2 + (ypos - ycell)**2) < self.crad2:\n return True\n return False\n\n return cellx, celly", "def WhereAreYou(CurLongitude,CurLatitude,LocationLongitude,LocationLatitude,LocationRadius):\n # Calculate the great circle distance between two points\n # on the earth (specified in decimal degrees)\n # 将十进制度数转化为弧度\n CurLongitude,CurLatitude,LocationLongitude,LocationLatitude = map(radians, [float(CurLongitude),float(CurLatitude),float(LocationLongitude),float(LocationLatitude)])\n\n # haversine公式\n dlon = LocationLongitude - CurLongitude\n dlat = LocationLatitude - CurLatitude\n a = sin(dlat / 2) ** 2 + cos(CurLatitude) * cos(LocationLatitude) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n distance = c * r * 1000\n if(distance < float(LocationRadius)):\n return True\n else:\n return False", "def __contains__(self, point): \n corners = self.corners\n\n if isinstance(point, tuple):\n from pyresample.spherical_geometry import Coordinate\n retval = planar_point_inside(Coordinate(*point), corners)\n else:\n retval = planar_point_inside(point, corners)\n\n #print ' retval from FALSE CORNERS contains '+str(retval)\n\n return retval", "def get_radius(self):", "def within_radius(self, radius=5.0):\n\n return GeoEntry.within_radius(self.primary_geocode, radius, ['all',])", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def is_point_inside_hypermoon(point: np.array, c: Tuple[List[float]], r: Tuple[float]) -> bool:\n return is_point_inside_hypersphere(point, c[0], r[0]) and not is_point_inside_hypersphere(point, c[1], r[1])", "def vincenty(p1, p2):\n # Note: GeoPy expects (latitude, longitude) pairs.\n return geopy.distance.vincenty(\n (p1.y, p1.x),\n (p2.y, p2.x)\n ).miles", "def check_coordinates(X, Y):\n\n # Accounting for elliptical Jupiter disk\n Y *= 1.071374\n\n return sqrt(X ** 2 + Y ** 2)", "def is_clicked(vtx_x, vtx_y, mouse_x, mouse_y, radius):\n return math.sqrt(((mouse_x - vtx_x) ** 2) + ((mouse_y - vtx_y) ** 2)) < radius", "def is_point_inside_hypercube(point: List[float], c: List[float], r: float) -> bool:\n diff = np.subtract(point, c)\n return np.all(np.absolute(diff) <= r)", "def dans_cercle(self, r, x, y):\r\n self.r_num(r)\r\n valid = (isinstance(x, int) or isinstance(x, float)) and \\\r\n (isinstance(y, int) or isinstance(y, float))\r\n if valid:\r\n if sqrt(x**2+y**2)<self.r:\r\n return True\r\n else:\r\n return False\r\n else:\r\n raise TypeError", "def containsPos(self, aerial_pos):\n # Check altitude of position\n aerial_alt = aerial_pos.altitude_msl\n if (aerial_alt < 0 or aerial_alt > self.cylinder_height):\n return False\n # Check lat/lon of position\n dist_to_center = self.gps_position.distanceTo(aerial_pos.gps_position)\n if dist_to_center > self.cylinder_radius:\n return False\n # Both within altitude and radius bounds, inside cylinder\n return True", "def check_in(x, y, R=Re):\n r = np.sqrt(x ** 2 + y ** 2)\n return r <= R", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def conf_test(self, trial_coords: np.ndarray) -> bool:\n #N = len(trial_coords)\n r2: float = 0\n i =0\n r2 = np.inner(trial_coords, trial_coords)\n if (r2> self.m_radius2):\n return False\n return True", "def is_inside(self, mX, mY, point):\n return (math.sqrt((point[0] - mX) * (point[0] - mX)\n + (point[1] - mY) * (point[1] - mY)) <= 2)", "def area_of_circle(radius):\n return radius", "def main():\n pos_x1 = float(input())\n pos_y1 = float(input())\n rad_1 = float(input())\n pos_x2 = float(input())\n pos_y2 = float(input())\n rad_2 = float(input())\n\n if ((pos_x1 - pos_x2) ** 2 + (pos_y1 - pos_y2) ** 2) ** 0.5 < rad_1 + rad_2:\n print(\"Yes\")\n else:\n print(\"No\")", "def __contains__(self, point):\n #### Original \n from pyresample.spherical_geometry import point_inside, Coordinate\n corners = self.corners\n\n if isinstance(point, tuple):\n return point_inside(Coordinate(*point), corners)\n else:\n return point_inside(point, corners)\n #### End Original\n #from .spherical import SphPolygon\n #log.info('RUNNING SPHERICAL in __contains__')\n #sphpoly = SphPolygon(corners)\n #return sphpoly.intersection(SphPolygon(point), sphpoly)", "def isInternal(self, aPoint):\n if compute_distance(self.center, aPoint) <= self.radius:\n return True\n else:\n return False", "def within(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoWithin(self, right).to_expr()", "def shops_within_radius(self, lat, lng, radius, tags=None):\n center_point = geoindex.GeoPoint(lat, lng)\n points = self.geoindex.get_nearest_points(center_point, radius, 'km')\n\n def tags_filter(shops):\n for shop in shops:\n for tag in tags:\n if tag in shop['tags']:\n yield shop\n break\n\n def get_shops():\n for point, distance in points:\n point.ref['distance'] = distance\n yield point.ref\n\n if tags:\n return tags_filter(get_shops())\n else:\n return get_shops()", "def isKelvin(self):\n return _libsbml.Unit_isKelvin(self)", "def isoutside(coords, shape):\n # Label external pores for trimming below\n if len(shape) == 1: # Spherical\n # Find external points\n r = np.sqrt(np.sum(coords**2, axis=1))\n Ps = r > shape[0]\n elif len(shape) == 2: # Cylindrical\n # Find external pores outside radius\n r = np.sqrt(np.sum(coords[:, [0, 1]]**2, axis=1))\n Ps = r > shape[0]\n # Find external pores above and below cylinder\n if shape[1] > 0:\n Ps = Ps + (coords[:, 2] > shape[1])\n Ps = Ps + (coords[:, 2] < 0)\n else:\n pass\n elif len(shape) == 3: # Rectilinear\n shape = np.array(shape, dtype=float)\n try:\n lo_lim = shape[:, 0]\n hi_lim = shape[:, 1]\n except IndexError:\n lo_lim = np.array([0, 0, 0])\n hi_lim = shape\n Ps1 = np.any(coords > hi_lim, axis=1)\n Ps2 = np.any(coords < lo_lim, axis=1)\n Ps = Ps1 + Ps2\n return Ps", "def FindPointsWithinRadius(self, p_float, , vtkIdList):\n ...", "def in_area_of_interest (lat, long, min_lat, max_lat, min_long, max_long):\n\n if lat is None or long is None:\n return False\n\n lat = float(lat)\n long = float(long)\n\n if ((lat >= min_lat and lat <= max_lat) and\n (long >= min_long and long <= max_long)):\n return True\n\n return False", "def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False", "def in_square(self, point):\n size = self.size\n centre = self.centre\n # Find the upper and lower bounds for the square in-terms of x and y\n lower_x, upper_x = centre.x - size / 2, centre.x + size / 2\n lower_y, upper_y = centre.y - size / 2, centre.y + size / 2\n # Equals with lower bounds only\n return (lower_x <= point.x < upper_x) and (lower_y < point.y <= upper_y)", "def pointPotential(x,y,q,posx,posy):\n k = 8.99e9\n V = (k * q) / (sqrt(x**2 + (y - sqrt((posx**2 + posy**2)))**2))\n return V", "def test_circumference_area(self):\n self.assertEqual(9.425, circumference_area(self.values['radius']))", "def sphere_area(radius : number) -> number:\n area = 4*pi*radius*radius\n return area", "def is_inside(self, points):\n points = np.atleast_2d(points) - self.centroid\n return np.logical_and(\n np.linalg.norm(points, axis=-1) <= self.radius,\n # At present circles are not orientable, so the z position must\n # match exactly.\n np.isclose(points[:, 2], 0),\n )", "def brute_force(savedPnts, unitRadius, point):\n for pnt in savedPnts:\n d = distance(pnt, point)\n if d < unitRadius: return False\n return True", "def __ge__(self, other):\n return self.x ** 2 + self.y ** 2 >= other.x ** 2 + other.y ** 2", "def contains_pt(self, pt):\n x, y = pt\n if not self.x - self.radius < x < self.x + self.radius:\n return False\n if not self.y - self.radius < y < self.y + self.radius:\n return False\n return True", "def _intersected(positions, radius):\n P1 = positions[0]\n P2 = positions[1]\n P3 = positions[2]\n temp1 = P2 - P1\n e_x = temp1 / np.linalg.norm(temp1)\n temp2 = P3 - P1\n i = np.dot(e_x, temp2)\n temp3 = temp2 - i * e_x\n e_y = temp3 / np.linalg.norm(temp3)\n e_z = np.cross(e_x, e_y)\n d = np.linalg.norm(P2 - P1)\n j = np.dot(e_y, temp2) \n x = d / 2\n y = (-2*i*x + i*i + j*j) / (2*j)\n temp4 = radius**2 - x*x - y*y\n if temp4 < 0:\n return False\n return True", "def is_perfect_square():", "def check_me(triplet, list_of_coords):\n c = True\n for element in list_of_coords:\n if (float(triplet[0])*0.99 <= float(element[0]) <= float(triplet[0])*1.01):\n if (float(triplet[1])*0.99 <= float(element[1]) <= float(triplet[1])*1.01):\n if (float(triplet[2])*0.99 <= float(element[2]) <= float(triplet[2])*1.01):\n c = False\n return c", "def is_inside(inner_path, outer_path):\r\n if not hasattr(inner_path, 'bounding_box'):\r\n inner_path.bounding_box = CutPlanner.bounding_box(inner_path)\r\n if not hasattr(outer_path, 'bounding_box'):\r\n outer_path.bounding_box = CutPlanner.bounding_box(outer_path)\r\n if outer_path.bounding_box[0] > inner_path.bounding_box[0]:\r\n # outer minx > inner minx (is not contained)\r\n return False\r\n if outer_path.bounding_box[1] > inner_path.bounding_box[1]:\r\n # outer miny > inner miny (is not contained)\r\n return False\r\n if outer_path.bounding_box[2] < inner_path.bounding_box[2]:\r\n # outer maxx < inner maxx (is not contained)\r\n return False\r\n if outer_path.bounding_box[3] < inner_path.bounding_box[3]:\r\n # outer maxy < inner maxy (is not contained)\r\n return False\r\n if outer_path.bounding_box == inner_path.bounding_box:\r\n if outer_path == inner_path: # This is the same object.\r\n return False\r\n if not hasattr(outer_path, 'vm'):\r\n outer_path = Polygon([outer_path.point(i / 100.0, error=1e4) for i in range(101)])\r\n vm = VectorMontonizer()\r\n vm.add_cluster(outer_path)\r\n outer_path.vm = vm\r\n for i in range(101):\r\n p = inner_path.point(i / 100.0, error=1e4)\r\n if not outer_path.vm.is_point_inside(p.x, p.y):\r\n return False\r\n return True", "def parallel_radius(self, lat):\n\n return EARTH_RADIUS * lat.cos()", "def get_radius(self):\r\n return 1", "def inCircleFast(self, tri, p):\n center, radius = self.circles[tri]\n return np.sum(np.square(center - p)) <= radius", "def sphereArea(radius):\n area = 4 * math.pi * radius ** 2\n return area", "def vincenty(lon0, lat0, a1, s):\n\n lon0 = np.deg2rad(lon0)\n lat0 = np.deg2rad(lat0)\n a1 = np.deg2rad(a1)\n s = np.deg2rad(s)\n\n sina = np.cos(lat0) * np.sin(a1)\n\n num1 = np.sin(lat0) * np.cos(s) + np.cos(lat0) * np.sin(s) * np.cos(a1)\n den1 = np.sqrt(\n sina**2 + (np.sin(lat0) * np.sin(s) - np.cos(lat0) * np.cos(a1)) ** 2\n )\n lat = np.rad2deg(np.arctan2(num1, den1))\n\n num2 = np.sin(s) * np.sin(a1)\n den2 = np.cos(lat0) * np.cos(s) - np.sin(lat0) * np.sin(s) * np.cos(a1)\n L = np.arctan2(num2, den2)\n lon = np.rad2deg(lon0 + L)\n\n return lon, lat", "def great_circle(a: Point, b: Point) -> Km:\n\n lat1, lng1, lat2, lng2 = map(radians, [a.latitude, a.longitude, b.latitude, b.longitude])\n sin_lat1, sin_lat2 = map(sin, [lat1, lat2])\n cos_lat1, cos_lat2 = map(cos, [lat1, lat2])\n delta_lng = lng2 - lng1\n cos_delta_lng, sin_delta_lng = cos(delta_lng), sin(delta_lng)\n\n d = atan2(\n sqrt((cos_lat2 * sin_delta_lng) ** 2 + (cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_delta_lng) ** 2),\n sin_lat1 * sin_lat2 + cos_lat1 * cos_lat2 * cos_delta_lng,\n )\n\n return Km(6371.009 * d) # Radius of earth in kilometers is 6371", "def circles_collide(x1: float, y1: float, r1: float, x2: float, y2: float, r2: float) -> bool:\n return distance_between_sq(x1, y1, x2, y2) <= (r1 + r2)**2", "def Distance(VCoords, SCoords):\r\n #Convert to radians\r\n lat1,lon1,lat2,lon2 = map(math.radians, VCoords+SCoords)\r\n #Apply Haversine formula\r\n dlon = lon2-lon1\r\n dlat = lat2-lat1\r\n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\r\n c = 2 * math.asin(math.sqrt(a))\r\n #Earth radius = 6371km\r\n return 6371 * c", "def validate_pos(game: TowerDefenceSolver, position: Tuple[int, int], purchases_list: Purchases) -> bool:\n if (\n position[0] < 0\n or position[1] < 0\n or position[0] >= game.map_height\n or position[1] >= game.map_width\n or position in game.path\n ):\n return False\n\n for purchase in purchases_list:\n if purchase[\"coords\"] == position:\n return False\n\n return True", "def _greatCircleDistance(self, long1, lat1, long2, lat2):\n # convert decimal degrees to radians \n long1, lat1, long2, lat2 = map(radians, [float(long1), float(lat1), float(long2), float(lat2)])\n # haversine formula \n dlon = long2 - long1\n #print(long2)\n #print(long1) \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n #print(c*r)\n return c * r", "def test_update_radius():\n center = Coordinates(1, 1)\n rad1 = 20.3\n speed = 30\n\n i = Intersection(center, rad1, speed)\n\n assert i.get_radius() == 20.3\n\n i.update_radius(56.5)\n\n assert i.get_radius() == 56.5", "def test_get_center():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_center = get_center(center, radius, 25)\n\n assert returned_center.get_x() == center.get_x()\n assert returned_center.get_y() == center.get_y()", "def isInside(self, position, maxDimLens):\n dim = len(position);\n deltas = [0] * dim;\n distSqr = 0;\n for i in range(0, dim):\n deltas[i] = math.fabs(position[i] - self.mSample[i]);\n if math.fabs(maxDimLens[i] - deltas[i]) < deltas[i]:\n deltas[i] = math.fabs(maxDimLens[i] - deltas[i]);\n distSqr += deltas[i]**2;\n\n if distSqr < ( self.mRadius**2 ):\n return True;\n else:\n return False;", "def check_location(location):\r\n if location.latitude > 35 and location.latitude < 39 and location.longitude > -123 and location.longitude < -120:\r\n return True\r\n else:\r\n return False", "def get_uvcircle(Grid):\n \n# center of circulation\n loc=-67.5;lac=41.5; \n dx=(Grid['lonc']-loc)*Grid['coslatc']\n dy=(Grid['latc']-lac)\n di=np.sqrt(dx*dx+dy*dy)\n an=np.angle(dx+1j*dy)\n# velocity is linearly increasing with distance \n# 0.1 m/s at 1 deg distance away from center \n# cyclonic gyre \n u=-0.1*di*np.sin(an)\n v= 0.1*di*np.cos(an)\n# adjust the velocity so that the rotation will be perfect \n# on lon-lat plane\n u=u*Grid['coslatc']/np.cos(lac*np.pi/180) \n \n return u,v", "def HasPoint(self, vtkAMRBox, , , p_float_6, p_float_7, p_float_8):\n ...", "def test_get_neighborhood_radius_consistent():\r\n grid_spacing = random.uniform(1e-6, 10.0)\r\n center = numpy.random.random(random.randint(1, 3))\r\n\r\n # Find points with radius neighborhood\r\n radius = random.uniform(_distance_to_nearest(grid_spacing, center), grid_spacing*5)\r\n points = ill.get_neighborhood_radius(grid_spacing, center, radius)\r\n\r\n # Every points found within this radius, should be in the points of a larger radius\r\n outer_points = ill.get_neighborhood_radius(grid_spacing, center,\r\n radius+random.uniform(0.0, grid_spacing*5))\r\n\r\n for point in points:\r\n assert point in outer_points", "def my_constraint_function(candidate):\r\n # In this case, we'll just say that the point has to lie \r\n # within a circle centered at (0, 0) of radius 1.\r\n if candidate[0]**2 + candidate[1]**2 > 1:\r\n return 1\r\n else:\r\n return 0", "def point_in_ellipse(point: Vector, center: Vector, angle: float, length: float, width: float) -> bool:\n c, s = np.cos(angle), np.sin(angle)\n r = np.matrix([[c, -s], [s, c]])\n ru = r.dot(point - center)\n return np.sum(np.square(ru / np.array([length, width]))) < 1", "def within_radius(latlong, radius=5.0, ctype_fields=None):\n\n radius = float(radius) # may come through as str\n\n if type(latlong) == GeoEntry:\n latlong = [latlong.latitude, latlong.longitude]\n\n # These are our 4 points (N/S/E/W) We use this to build a bounding box\n HEADINGS = enumerate([0, math.pi/2, math.pi, 3*math.pi/2])\n\n try:\n geoentry = GeoEntry.objects.get(latitude=latlong[0], longitude=latlong[1])\n except GeoEntry.DoesNotExist:\n # Maybe handle more gracefully?\n raise Exception('No GeoEntry matching query')\n except GeoEntry.MultipleObjectsReturned:\n # In this case lets use the first one instead for now\n geoentry = GeoEntry.objects.filter(latitude=latlong[0], longitude=latlong[1])[0]\n\n source_lat = GeoEntry.degrees_to_radians(latlong[0])\n source_long = GeoEntry.degrees_to_radians(latlong[1])\n distance = GeoEntry.miles_to_radians(radius)\n\n boundries = []\n\n for (cnt, heading) in HEADINGS:\n target_lat = GeoEntry.calculate_latitude(source_lat, distance, heading)\n target_long = GeoEntry.calculate_longitude(target_lat, source_long, distance, heading)\n\n boundries.append([GeoEntry.radians_to_degrees(target_lat), \\\n GeoEntry.radians_to_degrees(target_long)])\n\n entries = GeoEntry.objects.all().select_related().filter(latitude__lte=str(boundries[0][0]),\n latitude__gte=str(boundries[2][0]), longitude__gte=str(boundries[1][1]),\n longitude__lte=str(boundries[3][1]))\n\n entry_data = list()\n for entry in entries:\n obj = entry.content_object\n ctype_dict = None,\n if obj and ctype_fields:\n if (len(ctype_fields)==1 and ctype_fields[0]=='all'):\n ctype_list = list()\n for field in obj._meta.fields:\n field_data = getattr(obj, field.name)\n if type(field_data) == unicode:\n ctype_list.append([field.name, field_data.encode('utf-8')])\n else:\n ctype_list.append([field.name, str(field_data)])\n ctype_dict = dict(ctype_list)\n else:\n ctype_dict = dict([[field, getattr(obj, field)] for field in ctype_fields])\n entry_data.append(dict(\n distance=entry.distance_to_latlong((latlong[0], latlong[1])),\n content_type=entry.content_type.pk,\n object_id=entry.object_id,\n object_data=ctype_dict,\n latitude=str(entry.latitude),\n longitude=str(entry.longitude),\n )\n )\n entry_data.sort() # return orderd by distance\n # now fix the bounding box SQL to limit within our radius\n sorted_data = [elem for elem in entry_data if elem['distance']<radius]\n return sorted_data", "def hitTest( a, b ):\n r = a.radius + b.radius\n x = abs( a.x - b.x )\n y = abs( a.y - b.y )\n if x <= r and y <= r and x*x + y*y <= r*r:\n return 1\n return 0", "def lat_lon_box(lat, dist):\n r_earth = 6371.\n d_2r = dist/(2.*r_earth)\n dlat = 2. * (d_2r)\n dlon = 2. * np.arcsin((np.sin(d_2r))/(np.cos(lat)))\n dlat *= 180./np.pi\n dlon *= 180./np.pi\n return abs(dlat), abs(dlon)", "def in_box(point, c1, c2):\n c1x, c1y = c1\n c2x, c2y = c2\n x, y = point\n return min(c1x, c2x) <= x <= max(c1x, c2x) and min(c1y, c2y) <= y <= max(c1y, c2y)", "def isinsidearcXY(c,p):\n\n x = c[0]\n r = c[1][0]\n if dist(x,p) > r:\n return False\n if iscircle(c):\n return True\n start = c[1][1]%360.0\n end = c[1][2]%360.0\n if end < start:\n end+= 360.0\n p2 = sub(p,x)\n ang = (atan2(p2[1],p2[0]) % pi2)*360/pi2\n\n if end <= 360.0:\n return (ang >= start and ang <= end)\n else:\n return ang >= start or ang <= (end-360.0)", "def has_intersection(self, obj):\r\n obj_x, obj_y = obj.get_location()\r\n x = self.__x\r\n y = self.__y\r\n # Distance formula\r\n distance = sqrt((obj_x - x) ** 2 + (obj_y - y) ** 2)\r\n if distance <= obj.get_radius() + self.__radius:\r\n return True\r\n return False", "def is_point_in_box(x, y, bbox):\n if x < 200 and y < 200:\n return True\n return False", "def intersects( self, sphere, dim ):\n nearest = self.__findFarestPoint__( sphere.center );\n dist = utility.euclideanDistSqr( nearest, sphere.center );\n if( dist < sphere.radius**2 ):\n return True;\n else:\n return False;", "def _generate_boxcar_volume(x, radius, center):\n\n # Form cubic position array for x, y, z\n X_cube = x.copy()\n\n\n # Find all points inside boxcar inside the cube\n vol = np.sqrt((X_cube - center) ** 2 / radius ** 2)\n vol = vol <= 1\n\n return vol.astype(float)", "def find_channel_neighbors(geom, radius):\n return (squareform(pdist(geom)) <= radius)", "def find_channel_neighbors(geom, radius):\n return (squareform(pdist(geom)) <= radius)", "def calcul_v_sphere(r):\n volume = 4/3 * math.pi * (r ** 3)\n return volume", "def vincenty(lat1, lon1, lat2, lon2,\n r_major=6378.1370, r_minor=6356.752314, r_sphere=None):\n lat1 = m.radians(lat1)\n lat2 = m.radians(lat2)\n lon1 = m.radians(lon1)\n lon2 = m.radians(lon2)\n \n if (r_sphere is not None):\n r_major = r_sphere\n r_minor = r_sphere\n f = 0.0\n else:\n f = (r_major-r_minor)/r_major\n \n U1 = m.atan((1.0-f) * m.tan(lat1))\n U2 = m.atan((1.0-f) * m.tan(lat2))\n L = lon2 - lon1\n \n epsilon = 1E-12 # Accuracy (10E-12 -> ~ 0.06mm)\n max_iter = 500\n lam = L\n \n cU1 = m.cos(U1)\n cU2 = m.cos(U2)\n sU1 = m.sin(U1)\n sU2 = m.sin(U2)\n \n for i in range(max_iter):\n lam_old = lam\n sLam = m.sin(lam)\n cLam = m.cos(lam)\n sin_sig = m.sqrt((cU2*sLam)**2 + (cU1*sU2 - sU1*cU2*cLam)**2)\n cos_sig = sU1*sU2 + cU1*cU2*cLam\n sig = m.atan2(sin_sig,cos_sig)\n sin_alp = (cU1*cU2*sLam) / sin_sig\n cos2_alp = 1.0 - sin_alp**2\n if (cos2_alp == 0.0):\n # equitorial line\n cos_2sigm = 100\n C = 0.0\n else:\n cos_2sigm = cos_sig - (2.0*sU1*sU2)/cos2_alp\n C = f/16.0 * cos2_alp * (4.0 + f*(4.0-3.0*cos2_alp))\n lam = L + (1.0 - C) * f * sin_alp * \\\n (sig + C * sin_sig * (cos_2sigm + C * cos_sig * \\\n (-1.0 + 2.0 * cos_2sigm**2)))\n if ((m.fabs(lam - lam_old)) <= epsilon):\n # Found a solution in i iters...\n break\n elif (i == max_iter):\n # Catch the out of iters case, never seen this.\n raise Exception(\"Failed to solve for distance\")\n \n usq = cos2_alp * ((r_major**2 - r_minor**2) / r_minor**2)\n A = 1 + usq/16384 * (4096 + usq*(-768 + usq*(320 - 175*usq)))\n B = usq/1024 * (256 + usq*(-128 + usq*(74 - 47*usq)))\n del_sig = B * sin_sig * (cos_2sigm + 0.25*B*(cos_sig*( \\\n -1 + 2*cos_2sigm**2) - (1.0/6.0)*B*cos_2sigm * ( \\\n -3 + 4*sin_sig**2) * (-3 + 4 * cos_2sigm**2)))\n s = r_minor * A * (sig - del_sig)\n alp1 = m.atan2(cU2*m.sin(lam),(cU1*sU2-sU1*cU2*m.cos(lam)))\n alp2 = m.atan2(cU1*m.sin(lam),(cU1*sU2*m.cos(lam)-sU1*cU2))\n\n return (s, m.degrees(alp1), m.degrees(alp2))", "def in_box(x, y):\n if self.zoom_box and x <= self.zoom_box_max_x and \\\n x >= self.zoom_box_min_x and y >= self.zoom_box_min_y and \\\n y <= self.zoom_box_max_y:\n return True\n else:\n return False", "def inside( self, point ):\n for i in range( 0, len(point) ):\n if math.fabs( self.center[i] - point[i] ) > self.dimLens[i]/2.0:\n return False;\n return True;", "def is_in_lon_lat(coords):\n\n bounds = make_bounding_box_array(coords)\n xbounds = np.array([bounds[0], bounds[2]])\n ybounds = np.array([bounds[1], bounds[3]])\n return np.all(xbounds) < 180.0 and np.all(xbounds > -180.0) and \\\n np.all(ybounds) < 90.0 and np.all(ybounds > -90)", "def hit(bx, by, r, px, py,h):\n if bx >= px:\n distance = bx - px\n else:\n distance = px - bx\n if py<=by and by<=py+h and distance <= r:\n return True\n else:\n return False", "def test_get_kilometers() -> None:\n kilometers = location_util.vincenty(COORDINATES_PARIS, COORDINATES_NEW_YORK)\n assert round(kilometers, 2) == DISTANCE_KM", "def dist_vincenty(lat1, lon1, lat2, lon2, iterations=20):\r\n if lat1 < -90 or lat1 > 90 or lat2 < -90 or lat2 > 90 or lon1 < -180 or lon1 > 180 or lon2 < -180 or lon2 > 180:\r\n raise ValueError(\r\n \"Latitude values shoulds range from (-90,90) and longitude from (-180,180) but one of the input values is out of bounds. Latitude_1: %f, Logitude_1: %f, Latitude_2: %f, Logitude_2: %f\" %\r\n (lat1, lon1, lat2, lon2))\r\n\r\n major, minor, f = 6378137, 6356752.314245, 1 / 298.257223563\r\n\r\n lat1, lng1, lat2, lng2 = radians(\r\n lat1), radians(lon1), radians(lat2), radians(lon2)\r\n delta_lng = lng2 - lng1\r\n reduced_lat1, reduced_lat2 = atan(\r\n (1 - f) * tan(lat1)), atan((1 - f) * tan(lat2))\r\n\r\n sin_reduced1, cos_reduced1 = sin(reduced_lat1), cos(reduced_lat1)\r\n sin_reduced2, cos_reduced2 = sin(reduced_lat2), cos(reduced_lat2)\r\n\r\n lambda_lng = delta_lng\r\n lambda_prime = 2 * pi\r\n while abs(lambda_lng - lambda_prime) > 10e-12 and iterations > 0:\r\n sin_lambda_lng, cos_lambda_lng = sin(lambda_lng), cos(lambda_lng)\r\n\r\n sin_sigma = sqrt(\r\n (cos_reduced2 * sin_lambda_lng) ** 2 +\r\n (cos_reduced1 * sin_reduced2 -\r\n sin_reduced1 * cos_reduced2 * cos_lambda_lng) ** 2\r\n )\r\n if sin_sigma == 0:\r\n return 0 # Coincident points\r\n\r\n cos_sigma = (\r\n sin_reduced1 * sin_reduced2 +\r\n cos_reduced1 * cos_reduced2 * cos_lambda_lng\r\n )\r\n sigma = atan2(sin_sigma, cos_sigma)\r\n\r\n sin_alpha = (cos_reduced1 * cos_reduced2 * sin_lambda_lng / sin_sigma)\r\n cos_sq_alpha = 1 - sin_alpha ** 2\r\n\r\n if cos_sq_alpha != 0:\r\n cos2_sigma_m = cos_sigma - 2 * \\\r\n (sin_reduced1 * sin_reduced2 / cos_sq_alpha)\r\n else:\r\n cos2_sigma_m = 0.0 # Equatorial line\r\n\r\n C = f / 16. * cos_sq_alpha * (4 + f * (4 - 3 * cos_sq_alpha))\r\n\r\n lambda_prime = lambda_lng\r\n lambda_lng = (\r\n delta_lng + (1 - C) * f * sin_alpha * (\r\n sigma + C * sin_sigma * (\r\n cos2_sigma_m + C * cos_sigma * (-1 + 2 * cos2_sigma_m ** 2)\r\n )\r\n )\r\n )\r\n iterations -= 1\r\n\r\n if iterations == 0:\r\n raise ValueError(\"Vincenty formula failed to converge!\")\r\n\r\n u_sq = cos_sq_alpha * (major ** 2 - minor ** 2) / minor ** 2\r\n A = 1 + u_sq / 16384. * (4096 + u_sq * (-768 + u_sq * (320 - 175 * u_sq)))\r\n B = u_sq / 1024. * (256 + u_sq * (-128 + u_sq * (74 - 47 * u_sq)))\r\n delta_sigma = B * sin_sigma * (\r\n cos2_sigma_m + B / 4. * (cos_sigma * (-1 + 2 * cos2_sigma_m ** 2) -\r\n B / 6. * cos2_sigma_m * (-3 + 4 * sin_sigma ** 2) *\r\n (-3 + 4 * cos2_sigma_m ** 2))\r\n )\r\n s = minor * A * (sigma - delta_sigma)\r\n\r\n return round(s, 3) # round to 1mm precision\r", "def is_inside(self, x: int, y: int) -> bool:\n pass", "def is_point_within(self, x, y):\n return abs(x - self._x_position) <= self._x_length / 2 and abs(y - self._y_position) <= self._y_length / 2", "def in_woolsey_fire_area(lat, long):\n min_lat = 33.856\n max_lat = 34.211\n min_long = -118.7114\n max_long = -118.2511\n\n return in_area_of_interest(lat, long, min_lat, max_lat, min_long, max_long)" ]
[ "0.64729494", "0.59081954", "0.5859355", "0.58463377", "0.5835103", "0.5793143", "0.5749297", "0.5747117", "0.571432", "0.5694992", "0.5625121", "0.55783933", "0.5532883", "0.5531398", "0.55265725", "0.5519289", "0.54891366", "0.5458684", "0.54542017", "0.5451329", "0.53815925", "0.5368734", "0.53533536", "0.5344653", "0.5337585", "0.5330999", "0.53292596", "0.5327822", "0.5281993", "0.527632", "0.52735966", "0.5269682", "0.52684206", "0.52648", "0.5231576", "0.52148795", "0.5184256", "0.51838636", "0.5178946", "0.5169917", "0.5168901", "0.5161907", "0.5155475", "0.5154727", "0.51539874", "0.51479965", "0.5140828", "0.51128596", "0.5112121", "0.5096806", "0.5091921", "0.5088492", "0.5076165", "0.50644964", "0.5055583", "0.5042755", "0.5042422", "0.5030619", "0.5025518", "0.5024839", "0.50214154", "0.501631", "0.50141555", "0.50037056", "0.50034815", "0.5001366", "0.49843", "0.49586815", "0.49437466", "0.49362272", "0.49278477", "0.4926819", "0.49258405", "0.4924768", "0.49194354", "0.49169534", "0.4915518", "0.49011132", "0.48934844", "0.48847508", "0.4884049", "0.48769063", "0.4876478", "0.4872266", "0.48717093", "0.48668104", "0.48648778", "0.48599103", "0.48599103", "0.48493335", "0.4841431", "0.48378548", "0.48362762", "0.48337093", "0.48309854", "0.48256713", "0.48179892", "0.48160893", "0.48124367", "0.4809426" ]
0.7570688
0
(Product, float, float, radius, list) > boolean Check if the coordinates of a shop is within a radius (in meters) using the Vincenty's formulae and if the shop contains any of the tags provided.
def is_matching_product_with_tags(product, lat, lng, radius, tags): return vincenty( (lat, lng), (product.shop.lat, product.shop.lng) ).meters <= radius and any(tag in product.shop.tags for tag in tags)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_matching_product(product, lat, lng, radius):\n return vincenty(\n (lat, lng),\n (product.shop.lat, product.shop.lng)\n ).meters <= radius", "def shops_within_radius(self, lat, lng, radius, tags=None):\n center_point = geoindex.GeoPoint(lat, lng)\n points = self.geoindex.get_nearest_points(center_point, radius, 'km')\n\n def tags_filter(shops):\n for shop in shops:\n for tag in tags:\n if tag in shop['tags']:\n yield shop\n break\n\n def get_shops():\n for point, distance in points:\n point.ref['distance'] = distance\n yield point.ref\n\n if tags:\n return tags_filter(get_shops())\n else:\n return get_shops()", "def __contains__(self, position):\n return sum([(c1 - c2) ** 2 for (c1, c2) in zip(self.position, position)]) <= self.radius", "def get_matching_products(products, lat, lng, radius, tags):\n if tags:\n tag_list = tags.split(',')\n return list([\n product for product in products\n if is_matching_product_with_tags(\n product,\n lat,\n lng,\n radius,\n tag_list\n )\n ])\n else:\n return list([\n product for product in products\n if is_matching_product(\n product,\n lat,\n lng,\n radius\n )\n ])", "def contains(self, position):\n return np.linalg.norm(position - self._center) < self._radius", "def __contains__(self, other):\n x, y = other\n return self.radius >= sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def objects_radius(self, centre, radius):", "def contains(self, loc): \n return loc.distance(self.center) <= self.radius", "def FindPointsWithinRadius(self, p_float, , vtkIdList):\n ...", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def containsPos(self, obst_pos, aerial_pos):\n dist_to_center = obst_pos.distanceTo(aerial_pos)\n return dist_to_center <= self.sphere_radius", "def checkBounds(x,y,z,center,radius):\n r2 = (x-center[0])**2 + (y-center[1])**2# + (z-center[0])**2\n if r2 < radius**2:\n return True\n else:\n return False", "def isInCircle(self,x1,y1,radius1):\r\n if(distance(self.x,x1,self.y,y1) < (self.radius+radius1)):\r\n return True\r\n return False", "def check_point_in_detector(p, radius=radius, height=height, distance=distance):\r\n if p[0]**2 + p[1]**2 <= radius**2: # Are the x and y coordinates in the circle?\r\n if (p[2] >= distance) and (p[2] <= height+distance): # Is the z coordinate between the distance and the height?\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def inside(x, y, primitive):\n\n # You should implement your inside test here for all shapes\n # for now, it only returns a false test\n\n if primitive[\"shape\"] == \"circle\":\n dist_sqr = ((primitive[\"center\"][0] - x) ** 2 +\n (primitive[\"center\"][1] - y) ** 2)\n\n return dist_sqr <= primitive[\"radius\"] ** 2\n else:\n return winding_number(x, y, primitive)\n\n return False", "def is_point_inside_hypersphere(point: np.array, c: List[float], r: float) -> bool:\n return np.linalg.norm(point - c) < r", "def _intersected(positions, radius):\n P1 = positions[0]\n P2 = positions[1]\n P3 = positions[2]\n temp1 = P2 - P1\n e_x = temp1 / np.linalg.norm(temp1)\n temp2 = P3 - P1\n i = np.dot(e_x, temp2)\n temp3 = temp2 - i * e_x\n e_y = temp3 / np.linalg.norm(temp3)\n e_z = np.cross(e_x, e_y)\n d = np.linalg.norm(P2 - P1)\n j = np.dot(e_y, temp2) \n x = d / 2\n y = (-2*i*x + i*i + j*j) / (2*j)\n temp4 = radius**2 - x*x - y*y\n if temp4 < 0:\n return False\n return True", "def isoutside(coords, shape):\n # Label external pores for trimming below\n if len(shape) == 1: # Spherical\n # Find external points\n r = np.sqrt(np.sum(coords**2, axis=1))\n Ps = r > shape[0]\n elif len(shape) == 2: # Cylindrical\n # Find external pores outside radius\n r = np.sqrt(np.sum(coords[:, [0, 1]]**2, axis=1))\n Ps = r > shape[0]\n # Find external pores above and below cylinder\n if shape[1] > 0:\n Ps = Ps + (coords[:, 2] > shape[1])\n Ps = Ps + (coords[:, 2] < 0)\n else:\n pass\n elif len(shape) == 3: # Rectilinear\n shape = np.array(shape, dtype=float)\n try:\n lo_lim = shape[:, 0]\n hi_lim = shape[:, 1]\n except IndexError:\n lo_lim = np.array([0, 0, 0])\n hi_lim = shape\n Ps1 = np.any(coords > hi_lim, axis=1)\n Ps2 = np.any(coords < lo_lim, axis=1)\n Ps = Ps1 + Ps2\n return Ps", "def sphere_isclose(c1, c2, *args, **kwargs):\n return np.isclose(c1.radius, c2.radius, *args, **kwargs) and np.allclose(\n c1.center, c2.center, *args, **kwargs\n )", "def inside_unit_circle(point):\n distance = math.sqrt(point[0] ** 2 + point[1] ** 2)\n return distance < 1", "def get_radius(self):", "def incircle(self,xpos,ypos,cellx,celly):\n xcell, ycell = self.getcellcenter(cellx,celly)\n if ((xpos - xcell)**2 + (ypos - ycell)**2) < self.crad2:\n return True\n return False\n\n return cellx, celly", "def __contains__(self, point): \n corners = self.corners\n\n if isinstance(point, tuple):\n from pyresample.spherical_geometry import Coordinate\n retval = planar_point_inside(Coordinate(*point), corners)\n else:\n retval = planar_point_inside(point, corners)\n\n #print ' retval from FALSE CORNERS contains '+str(retval)\n\n return retval", "def is_point_inside_hypermoon(point: np.array, c: Tuple[List[float]], r: Tuple[float]) -> bool:\n return is_point_inside_hypersphere(point, c[0], r[0]) and not is_point_inside_hypersphere(point, c[1], r[1])", "def __contains__(self, point):\n #### Original \n from pyresample.spherical_geometry import point_inside, Coordinate\n corners = self.corners\n\n if isinstance(point, tuple):\n return point_inside(Coordinate(*point), corners)\n else:\n return point_inside(point, corners)\n #### End Original\n #from .spherical import SphPolygon\n #log.info('RUNNING SPHERICAL in __contains__')\n #sphpoly = SphPolygon(corners)\n #return sphpoly.intersection(SphPolygon(point), sphpoly)", "def containsPos(self, aerial_pos):\n # Check altitude of position\n aerial_alt = aerial_pos.altitude_msl\n if (aerial_alt < 0 or aerial_alt > self.cylinder_height):\n return False\n # Check lat/lon of position\n dist_to_center = self.gps_position.distanceTo(aerial_pos.gps_position)\n if dist_to_center > self.cylinder_radius:\n return False\n # Both within altitude and radius bounds, inside cylinder\n return True", "def within_radius(self, radius=5.0):\n\n return GeoEntry.within_radius(self.primary_geocode, radius, ['all',])", "def is_clicked(vtx_x, vtx_y, mouse_x, mouse_y, radius):\n return math.sqrt(((mouse_x - vtx_x) ** 2) + ((mouse_y - vtx_y) ** 2)) < radius", "def HasPoint(self, vtkAMRBox, , , p_float_6, p_float_7, p_float_8):\n ...", "def intersects( self, sphere, dim ):\n nearest = self.__findFarestPoint__( sphere.center );\n dist = utility.euclideanDistSqr( nearest, sphere.center );\n if( dist < sphere.radius**2 ):\n return True;\n else:\n return False;", "def isInside(self, position, maxDimLens):\n dim = len(position);\n deltas = [0] * dim;\n distSqr = 0;\n for i in range(0, dim):\n deltas[i] = math.fabs(position[i] - self.mSample[i]);\n if math.fabs(maxDimLens[i] - deltas[i]) < deltas[i]:\n deltas[i] = math.fabs(maxDimLens[i] - deltas[i]);\n distSqr += deltas[i]**2;\n\n if distSqr < ( self.mRadius**2 ):\n return True;\n else:\n return False;", "def within(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoWithin(self, right).to_expr()", "def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radius\n assert returned_rad != center.get_x()\n assert returned_rad != center.get_y()", "def contains_pt(self, pt):\n x, y = pt\n if not self.x - self.radius < x < self.x + self.radius:\n return False\n if not self.y - self.radius < y < self.y + self.radius:\n return False\n return True", "def is_point_inside_hypercube(point: List[float], c: List[float], r: float) -> bool:\n diff = np.subtract(point, c)\n return np.all(np.absolute(diff) <= r)", "def IsValid(self, *args):\n return _Bnd.Bnd_Sphere_IsValid(self, *args)", "def intersects(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoIntersects(self, right).to_expr()", "def validate_pos(game: TowerDefenceSolver, position: Tuple[int, int], purchases_list: Purchases) -> bool:\n if (\n position[0] < 0\n or position[1] < 0\n or position[0] >= game.map_height\n or position[1] >= game.map_width\n or position in game.path\n ):\n return False\n\n for purchase in purchases_list:\n if purchase[\"coords\"] == position:\n return False\n\n return True", "def test_get_neighborhood_radius_consistent():\r\n grid_spacing = random.uniform(1e-6, 10.0)\r\n center = numpy.random.random(random.randint(1, 3))\r\n\r\n # Find points with radius neighborhood\r\n radius = random.uniform(_distance_to_nearest(grid_spacing, center), grid_spacing*5)\r\n points = ill.get_neighborhood_radius(grid_spacing, center, radius)\r\n\r\n # Every points found within this radius, should be in the points of a larger radius\r\n outer_points = ill.get_neighborhood_radius(grid_spacing, center,\r\n radius+random.uniform(0.0, grid_spacing*5))\r\n\r\n for point in points:\r\n assert point in outer_points", "def is_bound(pos1, el1, pos2, el2):\n threshold = 0.1\n if el1 == 'H' or el2 == 'H':\n threshold = 0.2\n if np.linalg.norm(np.array(pos1) - np.array(pos2)) < covalence_radius[el1] + covalence_radius[el2] + threshold:\n return True\n return False", "def within_radius(latlong, radius=5.0, ctype_fields=None):\n\n radius = float(radius) # may come through as str\n\n if type(latlong) == GeoEntry:\n latlong = [latlong.latitude, latlong.longitude]\n\n # These are our 4 points (N/S/E/W) We use this to build a bounding box\n HEADINGS = enumerate([0, math.pi/2, math.pi, 3*math.pi/2])\n\n try:\n geoentry = GeoEntry.objects.get(latitude=latlong[0], longitude=latlong[1])\n except GeoEntry.DoesNotExist:\n # Maybe handle more gracefully?\n raise Exception('No GeoEntry matching query')\n except GeoEntry.MultipleObjectsReturned:\n # In this case lets use the first one instead for now\n geoentry = GeoEntry.objects.filter(latitude=latlong[0], longitude=latlong[1])[0]\n\n source_lat = GeoEntry.degrees_to_radians(latlong[0])\n source_long = GeoEntry.degrees_to_radians(latlong[1])\n distance = GeoEntry.miles_to_radians(radius)\n\n boundries = []\n\n for (cnt, heading) in HEADINGS:\n target_lat = GeoEntry.calculate_latitude(source_lat, distance, heading)\n target_long = GeoEntry.calculate_longitude(target_lat, source_long, distance, heading)\n\n boundries.append([GeoEntry.radians_to_degrees(target_lat), \\\n GeoEntry.radians_to_degrees(target_long)])\n\n entries = GeoEntry.objects.all().select_related().filter(latitude__lte=str(boundries[0][0]),\n latitude__gte=str(boundries[2][0]), longitude__gte=str(boundries[1][1]),\n longitude__lte=str(boundries[3][1]))\n\n entry_data = list()\n for entry in entries:\n obj = entry.content_object\n ctype_dict = None,\n if obj and ctype_fields:\n if (len(ctype_fields)==1 and ctype_fields[0]=='all'):\n ctype_list = list()\n for field in obj._meta.fields:\n field_data = getattr(obj, field.name)\n if type(field_data) == unicode:\n ctype_list.append([field.name, field_data.encode('utf-8')])\n else:\n ctype_list.append([field.name, str(field_data)])\n ctype_dict = dict(ctype_list)\n else:\n ctype_dict = dict([[field, getattr(obj, field)] for field in ctype_fields])\n entry_data.append(dict(\n distance=entry.distance_to_latlong((latlong[0], latlong[1])),\n content_type=entry.content_type.pk,\n object_id=entry.object_id,\n object_data=ctype_dict,\n latitude=str(entry.latitude),\n longitude=str(entry.longitude),\n )\n )\n entry_data.sort() # return orderd by distance\n # now fix the bounding box SQL to limit within our radius\n sorted_data = [elem for elem in entry_data if elem['distance']<radius]\n return sorted_data", "def has_intersection(self, obj):\r\n obj_x, obj_y = obj.get_location()\r\n x = self.__x\r\n y = self.__y\r\n # Distance formula\r\n distance = sqrt((obj_x - x) ** 2 + (obj_y - y) ** 2)\r\n if distance <= obj.get_radius() + self.__radius:\r\n return True\r\n return False", "def hit(bx, by, r, px, py,h):\n if bx >= px:\n distance = bx - px\n else:\n distance = px - bx\n if py<=by and by<=py+h and distance <= r:\n return True\n else:\n return False", "def in_area_of_interest (lat, long, min_lat, max_lat, min_long, max_long):\n\n if lat is None or long is None:\n return False\n\n lat = float(lat)\n long = float(long)\n\n if ((lat >= min_lat and lat <= max_lat) and\n (long >= min_long and long <= max_long)):\n return True\n\n return False", "def isContainedWithin(self,other):\n retVal = True\n bounds = self.points\n\n if( isinstance(other,Feature) ): # another feature do the containment test\n retVal = other.contains(self)\n elif( isinstance(other,tuple) and len(other)==3 ): # a circle\n #assume we are in x,y, r format\n rr = other[2]*other[2] # radius squared\n x = other[0]\n y = other[1]\n for p in bounds:\n test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))\n if( test > rr ):\n retVal = False\n break\n elif( isinstance(other,tuple) and len(other)==4 and # a bounding box\n ( isinstance(other[0],float) or isinstance(other[0],int))): # we assume a tuple of four is (x,y,w,h)\n retVal = ( self.maxX() <= other[0]+other[2] and\n self.minX() >= other[0] and\n self.maxY() <= other[1]+other[3] and\n self.minY() >= other[1] )\n elif(isinstance(other,list) and len(other) > 2 ): # an arbitrary polygon\n #everything else ....\n retVal = True\n for p in bounds:\n test = self._pointInsidePolygon(p,other)\n if(not test):\n retVal = False\n break\n\n else:\n logger.warning(\"SimpleCV did not recognize the input type to features.contains. This method only takes another blob, an (x,y) tuple, or a ndarray type.\")\n retVal = False\n return retVal", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def WhereAreYou(CurLongitude,CurLatitude,LocationLongitude,LocationLatitude,LocationRadius):\n # Calculate the great circle distance between two points\n # on the earth (specified in decimal degrees)\n # 将十进制度数转化为弧度\n CurLongitude,CurLatitude,LocationLongitude,LocationLatitude = map(radians, [float(CurLongitude),float(CurLatitude),float(LocationLongitude),float(LocationLatitude)])\n\n # haversine公式\n dlon = LocationLongitude - CurLongitude\n dlat = LocationLatitude - CurLatitude\n a = sin(dlat / 2) ** 2 + cos(CurLatitude) * cos(LocationLatitude) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n distance = c * r * 1000\n if(distance < float(LocationRadius)):\n return True\n else:\n return False", "def check_me(triplet, list_of_coords):\n c = True\n for element in list_of_coords:\n if (float(triplet[0])*0.99 <= float(element[0]) <= float(triplet[0])*1.01):\n if (float(triplet[1])*0.99 <= float(element[1]) <= float(triplet[1])*1.01):\n if (float(triplet[2])*0.99 <= float(element[2]) <= float(triplet[2])*1.01):\n c = False\n return c", "def contains(self,other):\n retVal = False\n\n bounds = self.points\n if( isinstance(other,Feature) ):# A feature\n retVal = True\n for p in other.points: # this isn't completely correct - only tests if points lie in poly, not edges.\n p2 = (int(p[0]),int(p[1]))\n retVal = self._pointInsidePolygon(p2,bounds)\n if( not retVal ):\n break\n # a single point\n elif( (isinstance(other,tuple) and len(other)==2) or ( isinstance(other,np.ndarray) and other.shape[0]==2) ):\n retVal = self._pointInsidePolygon(other,bounds)\n\n elif( isinstance(other,tuple) and len(other)==3 ): # A circle\n #assume we are in x,y, r format\n retVal = True\n rr = other[2]*other[2]\n x = other[0]\n y = other[1]\n for p in bounds:\n test = ((x-p[0])*(x-p[0]))+((y-p[1])*(y-p[1]))\n if( test < rr ):\n retVal = False\n break\n\n elif( isinstance(other,tuple) and len(other)==4 and ( isinstance(other[0],float) or isinstance(other[0],int))):\n retVal = ( self.maxX() <= other[0]+other[2] and\n self.minX() >= other[0] and\n self.maxY() <= other[1]+other[3] and\n self.minY() >= other[1] )\n elif(isinstance(other,list) and len(other) >= 4): # an arbitrary polygon\n #everything else ....\n retVal = True\n for p in other:\n test = self._pointInsidePolygon(p,bounds)\n if(not test):\n retVal = False\n break\n else:\n logger.warning(\"SimpleCV did not recognize the input type to features.contains. This method only takes another blob, an (x,y) tuple, or a ndarray type.\")\n return False\n\n return retVal", "def within(point: tuple, box: tuple) -> bool:\r\n \r\n return box[0] < point[0] < box[2] and box[1] < point[1] < box[3]", "def are_close(coord1, coord2, tolerance=10):\n return vincenty(coord1, coord2).meters < tolerance", "def covers(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoCovers(self, right).to_expr()", "def stores_in_given_radius_of_given_postcode(request):\n\n radius = request.POST.get('radius')\n if radius:\n radius = Decimal(radius)\n else:\n return Response('Radius is required', status=status.HTTP_404_NOT_FOUND)\n\n postcode = request.POST.get('postcode')\n if postcode:\n postcode = postcode.replace(' ', '')\n else:\n return Response('Postcode is required', status=status.HTTP_404_NOT_FOUND)\n\n stores_in_given_radius = []\n source_latitude, source_longitude = get_lat_and_lon_of_given_postcode(postcode)\n\n for store in Store.objects.order_by('-latitude'):\n if store.latitude and store.longitude:\n if check_if_in_radius(store, source_latitude, source_longitude, radius):\n stores_in_given_radius.append(store)\n\n serializer = StoreSerializer(stores_in_given_radius, many=True)\n return Response(serializer.data)", "def brute_force(savedPnts, unitRadius, point):\n for pnt in savedPnts:\n d = distance(pnt, point)\n if d < unitRadius: return False\n return True", "def test_is_on_intersection():\n center = Coordinates(1, 1)\n radius = 10\n\n i = Intersection(center, radius, 20)\n\n in_circle = Coordinates(2, 2)\n not_in_circle = Coordinates(100, 150)\n before_circumference = Coordinates(1, 10.9)\n on_circumference = Coordinates(1, 11)\n after_circumference = Coordinates(1, 11.1)\n\n assert is_on_intersection(i, in_circle)\n assert is_on_intersection(i, on_circumference)\n assert is_on_intersection(i, before_circumference)\n assert not is_on_intersection(i, not_in_circle)\n assert not is_on_intersection(i, after_circumference)", "def _check_overlap(self, points, radius):\n dists = distance.cdist(points, points, 'euclidean')\n dists = dists[np.nonzero(dists)]\n\n return np.any(dists < 2.0 * radius)", "def circles_collide(x1: float, y1: float, r1: float, x2: float, y2: float, r2: float) -> bool:\n return distance_between_sq(x1, y1, x2, y2) <= (r1 + r2)**2", "def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False", "def test_update_radius():\n center = Coordinates(1, 1)\n rad1 = 20.3\n speed = 30\n\n i = Intersection(center, rad1, speed)\n\n assert i.get_radius() == 20.3\n\n i.update_radius(56.5)\n\n assert i.get_radius() == 56.5", "def radius(x) :\r\n return Feature(x, \"radius\")", "def contains(self, Union, QPointF=None, QPoint=None): # real signature unknown; restored from __doc__\n return False", "def is_inside(self, points):\n points = np.atleast_2d(points) - self.centroid\n return np.logical_and(\n np.linalg.norm(points, axis=-1) <= self.radius,\n # At present circles are not orientable, so the z position must\n # match exactly.\n np.isclose(points[:, 2], 0),\n )", "def contains ( self, pos ):\n dr2 = (pos[0, :]-self.x)**2 + (pos[1, :]-self.y)**2\n # which points are in the circle?\n if self.include_border:\n inds = (dr2 - self.r**2) < self.abs_tol\n else:\n inds = (dr2 - self.r**2) < -self.abs_tol\n \n \n # if there's no poit inside\n if ~inds.any() and self.default_nearest: \n inds[argmin(dr2)] = True\n \n return inds", "def is_inside(self, x: int, y: int) -> bool:\n pass", "def check_in(x, y, R=Re):\n r = np.sqrt(x ** 2 + y ** 2)\n return r <= R", "def testSphereRadius(self):\n sp = nigel.SphereSelection(nb, radius=10)\n self.assertEqual(sp.n, 9)", "def dans_cercle(self, r, x, y):\r\n self.r_num(r)\r\n valid = (isinstance(x, int) or isinstance(x, float)) and \\\r\n (isinstance(y, int) or isinstance(y, float))\r\n if valid:\r\n if sqrt(x**2+y**2)<self.r:\r\n return True\r\n else:\r\n return False\r\n else:\r\n raise TypeError", "def hitTest( a, b ):\n r = a.radius + b.radius\n x = abs( a.x - b.x )\n y = abs( a.y - b.y )\n if x <= r and y <= r and x*x + y*y <= r*r:\n return 1\n return 0", "def is_inside(inner_path, outer_path):\r\n if not hasattr(inner_path, 'bounding_box'):\r\n inner_path.bounding_box = CutPlanner.bounding_box(inner_path)\r\n if not hasattr(outer_path, 'bounding_box'):\r\n outer_path.bounding_box = CutPlanner.bounding_box(outer_path)\r\n if outer_path.bounding_box[0] > inner_path.bounding_box[0]:\r\n # outer minx > inner minx (is not contained)\r\n return False\r\n if outer_path.bounding_box[1] > inner_path.bounding_box[1]:\r\n # outer miny > inner miny (is not contained)\r\n return False\r\n if outer_path.bounding_box[2] < inner_path.bounding_box[2]:\r\n # outer maxx < inner maxx (is not contained)\r\n return False\r\n if outer_path.bounding_box[3] < inner_path.bounding_box[3]:\r\n # outer maxy < inner maxy (is not contained)\r\n return False\r\n if outer_path.bounding_box == inner_path.bounding_box:\r\n if outer_path == inner_path: # This is the same object.\r\n return False\r\n if not hasattr(outer_path, 'vm'):\r\n outer_path = Polygon([outer_path.point(i / 100.0, error=1e4) for i in range(101)])\r\n vm = VectorMontonizer()\r\n vm.add_cluster(outer_path)\r\n outer_path.vm = vm\r\n for i in range(101):\r\n p = inner_path.point(i / 100.0, error=1e4)\r\n if not outer_path.vm.is_point_inside(p.x, p.y):\r\n return False\r\n return True", "def is_point_in_box(x, y, bbox):\n if x < 200 and y < 200:\n return True\n return False", "def inside(self, p: PointType, q: PointType) -> bool:\n\n # XXX re-implement with ccw and a list of points instead of a pair\n\n i = min(p.x, q.x) < self.x < max(p.x, q.x)\n j = min(p.y, q.y) < self.y < max(p.y, q.y)\n\n return i and j", "def get_in_radius(radius):\n def _return_values(user):\n \"\"\"\n :user Dict containing the users information, by keeping this in a nested function if more or less information\n is required to be returned it can be modified in one location rather than multiple locations.\n Returns a predefined dict of values to minimise duplicated code.\n :return: dictionary of user values\n \"\"\"\n return {\"id\":user[\"id\"],\n \"first_name\":user[\"first_name\"],\n \"last_name\": user[\"last_name\"]\n }\n\n users_in_range = [] # dictionary to store user information as we only want first name and last name.\n london = (51.30, 0.5) # Create a position point for london using its latitude and longitude\n\n # ** Note: 'City' is not included in the data returned by the users end point ut it is if you call the\n # users individually i could do this using for loop but that would cause 1000 calls to the API each time\n # the end point is called so instead i've opted to do 2 calls and parsing the data in the API.\n # This should minimise the number of requests being sent to the API.\n\n # First i will get all the users and compute their current distance from london and checking if that is within\n # the radius specified by the end user (radius component of the url), Then i will get all users listed as being\n # in the city of london and checking if those customers are already in the list by creating a list of ids.\n\n # If they are in the list they are discarded if they are not then their first name, last name and id are added\n # to the array, since the requirements did not specify what information was to be returned only those three values\n # are returned (This minimises the data protection implications)\n\n url = \"/users\"\n response = requests.get(f\"{host}{url}\")\n\n for user in json.loads(response.text):\n # Creation location point for the current user and use haversine to compute the distance between the user and\n # london in miles\n user_location = (float(user[\"latitude\"]), float(user[\"longitude\"]))# (lat, lon)\n distance = haversine(london, user_location, unit='mi')\n\n # if the distance is 50 miles or less then add the users first and last name to the users_in_range dict using\n if distance <= float(radius):\n users_in_range.append(_return_values(user))\n\n # Get the used defined as 'living in london' this is not clear in the instructions so i have made the 'assumption'\n # That the city value corresponds to their current city of residence.\n url = \"/city/London/users\"\n response = requests.get(f\"{host}{url}\")\n\n # Parse through the list or returned users and filter entries which already exist and append ones that dont to the\n # list to be returned\n for user in json.loads(response.text):\n if not user[\"id\"] in [user[\"id\"] for user in users_in_range]:\n users_in_range.append(_return_values(user))\n\n # convert the list into a json payload and return using\n return json.dumps(users_in_range)", "def containsManyPos(self, aerial_pos_list):\n # Get boundary points\n ordered_pts = self.boundary_pts.order_by('order')\n path_pts = [[wpt.position.gps_position.latitude,\n wpt.position.gps_position.longitude]\n for wpt in ordered_pts]\n # First check enough points to define a polygon\n if len(path_pts) < 3:\n return [False] * len(aerial_pos_list)\n\n # Create path to use for testing polygon inclusion\n path_pts.append(path_pts[0])\n path = mplpath.Path(np.array(path_pts))\n\n # Test each aerial position for altitude\n results = list()\n for aerial_pos in aerial_pos_list:\n # Check altitude bounds\n alt = aerial_pos.altitude_msl\n altitude_check = (alt <= self.altitude_msl_max\n and alt >= self.altitude_msl_min)\n results.append(altitude_check)\n\n # Create a list of positions to test whether inside polygon\n polygon_test_point_ids = [cur_id\n for cur_id in range(len(aerial_pos_list))\n if results[cur_id]]\n if len(polygon_test_point_ids) == 0:\n return results\n polygon_test_points = [[aerial_pos_list[cur_id].gps_position.latitude,\n aerial_pos_list[cur_id].gps_position.longitude]\n for cur_id in polygon_test_point_ids]\n\n # Test each point for inside polygon\n polygon_test_results = path.contains_points(\n np.array(polygon_test_points))\n for test_id in range(len(polygon_test_point_ids)):\n cur_id = polygon_test_point_ids[test_id]\n results[cur_id] = (polygon_test_results[test_id] == True)\n\n return results", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def inCircleFast(self, tri, p):\n center, radius = self.circles[tri]\n return np.sum(np.square(center - p)) <= radius", "def check_coordinates(X, Y):\n\n # Accounting for elliptical Jupiter disk\n Y *= 1.071374\n\n return sqrt(X ** 2 + Y ** 2)", "def in_water(latitude: float, longitude: float) -> bool:\n path = os.path.abspath('water_polygons.shp')\n with fiona.open(path) as fiona_collection:\n box_detail = 0.0001\n point = Point(longitude, latitude)\n # here we filter to only scan results near the point in question.\n for record in fiona_collection.filter(bbox=(\n longitude+box_detail, latitude+box_detail,\n longitude-box_detail, latitude-box_detail)):\n if record['geometry']:\n shape = asShape(record['geometry'])\n if shape.contains(point):\n return True\n return False", "def isInArea(self, width, height, depth):\n if self.position[0] + self.radius > width / -2 and self.position[0] + self.radius < width / 2:\n if self.position[1] + self.radius > height / -2 and self.position[1] + self.radius < height / 2:\n if self.position[2] + self.radius > -depth and self.position[2] + self.radius < depth:\n return True\n else:\n return False", "def contains_properly(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoContainsProperly(self, right).to_expr()", "def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1", "def conf_test(self, trial_coords: np.ndarray) -> bool:\n #N = len(trial_coords)\n r2: float = 0\n i =0\n r2 = np.inner(trial_coords, trial_coords)\n if (r2> self.m_radius2):\n return False\n return True", "def vincenty(p1, p2):\n # Note: GeoPy expects (latitude, longitude) pairs.\n return geopy.distance.vincenty(\n (p1.y, p1.x),\n (p2.y, p2.x)\n ).miles", "def area_of_circle(radius):\n return radius", "def sphereArea(radius):\n area = 4 * math.pi * radius ** 2\n return area", "def point_in_ellipse(point: Vector, center: Vector, angle: float, length: float, width: float) -> bool:\n c, s = np.cos(angle), np.sin(angle)\n r = np.matrix([[c, -s], [s, c]])\n ru = r.dot(point - center)\n return np.sum(np.square(ru / np.array([length, width]))) < 1", "def in_geofence(self, coordinates):\n\t\tcoords_transformed = ogr.Geometry(ogr.wkbPoint)\n\t\tcoords_transformed.AddPoint(*coordinates)\n\t\treturn self.polygon.Contains(coords_transformed)", "def is_inside(self, mX, mY, point):\n return (math.sqrt((point[0] - mX) * (point[0] - mX)\n + (point[1] - mY) * (point[1] - mY)) <= 2)", "def Check(self, vtkGenericCell, p_float):\n ...", "def isPrice(price, high, low):\n return (price >= low) and (price <= high)", "def check_inside(self, pos):\n x,y = pos\n return x >= self.posx and x <= self.posx + self.sizex and y >= self.posy and y <= self.posy + self.sizey", "def sphere_area(radius : number) -> number:\n area = 4*pi*radius*radius\n return area", "def calculate(self, real_estate, min_length, min_area, length_unit, area_unit):\n geometry_types = Config.get('geometry_types')\n line_types = geometry_types.get('line').get('types')\n polygon_types = geometry_types.get('polygon').get('types')\n point_types = geometry_types.get('point').get('types')\n if self.published:\n intersection = self.geom.intersection(real_estate.limit)\n # TODO upon update to Shapely 1.7, a check for result.is_emtpy will be needed (see PR#1037)\n # differentiate between Points and MultiPoint\n if not intersection.is_empty:\n result = self._extract_collection(intersection)\n if self.geom.type not in point_types + line_types + polygon_types:\n supported_types = ', '.join(point_types + line_types + polygon_types)\n raise AttributeError(\n u'The passed geometry is not supported: {type}. It should be one of: {types}'.format(\n type=self.geom.type, types=supported_types\n )\n )\n elif self.geom.type in point_types:\n if result.type == point_types[1]:\n # If it is a multipoint make a list and count the number of elements in the list\n self._nr_of_points = len(list(result.geoms))\n self._test_passed = True\n elif result.type == point_types[0]:\n # If it is a single point the number of points is one\n self._nr_of_points = 1\n self._test_passed = True\n elif self.geom.type in line_types and result.type in line_types:\n self._units = length_unit\n length_share = result.length\n if length_share >= min_length:\n self._length_share = length_share\n self._test_passed = True\n elif self.geom.type in polygon_types and result.type in polygon_types:\n self._units = area_unit\n area_share = result.area\n compensated_area = area_share / real_estate.areas_ratio\n if compensated_area >= min_area:\n self._area_share = compensated_area\n self._test_passed = True\n else:\n # This intersection result should not be used for the OEREB extract:\n # for example, if two polygons are touching each other, the intersection geometry will be\n # the point or linestring representing the touching part.\n log.debug(\n u'Intersection result changed geometry type. '\n u'Original geometry was {0} and result is {1}'.format(\n self.geom.type,\n result.type\n )\n )\n self.calculated = True\n return self._test_passed", "def match(self, pos, radius):\n\n nodes = self._data[:, 2:5]\n distlist = np.squeeze(cdist(pos.reshape(1, 3), nodes))\n if distlist.size == 0:\n return False, -2\n minidx = distlist.argmin()\n minnode = self._data[minidx, 2:5]\n\n # See if either of them can cover each other with a ball of their own\n # radius\n mindist = np.linalg.norm(pos - minnode)\n return radius > mindist or self._data[minidx, 5] > mindist, minidx", "def intersects(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n return False", "def find_channel_neighbors(geom, radius):\n return (squareform(pdist(geom)) <= radius)", "def find_channel_neighbors(geom, radius):\n return (squareform(pdist(geom)) <= radius)", "def is_on(self, obj1_loc, obj1_dims, obj2_loc, obj2_dims):\n VERT_MEASUREMENT_TOLERANCE = self.VERT_MEASUREMENT_TOLERANCE\n result = None\n obj1_x = obj1_loc[0]\n obj1_y = obj1_loc[1]\n obj1_zmin = obj1_loc[2] - (.5 * obj1_dims[2])\n obj2_xmin, obj2_xmax, obj2_ymin, obj2_ymax, obj2_zmin, obj2_zmax = self.get_corners(obj2_loc, obj2_dims)\n if obj1_x >= obj2_xmin and obj1_x <= obj2_xmax:\n if obj1_y >= obj2_ymin and obj1_y <= obj2_ymax:\n if obj1_zmin >= obj2_zmax-VERT_MEASUREMENT_TOLERANCE and obj1_zmin <= obj2_zmax+VERT_MEASUREMENT_TOLERANCE:\n result = 'on'\n return result", "def test_intersect_volume(self):\n\n intersect_shape = ExtrudeCircleShape(points=[(30, 0)], radius=5, distance=50)\n\n intersected_shape = ExtrudeCircleShape(\n points=[(30, 0)],\n radius=10,\n distance=50,\n intersect=[self.test_shape, intersect_shape],\n )\n\n assert intersected_shape.volume() == pytest.approx(math.pi * 5**2 * 30)" ]
[ "0.7254775", "0.61709523", "0.5765121", "0.57588995", "0.5667875", "0.5657872", "0.5615331", "0.5498272", "0.54953516", "0.5392794", "0.5387623", "0.5384196", "0.53837055", "0.5313938", "0.52996296", "0.5263196", "0.5251948", "0.5240129", "0.5167428", "0.51551384", "0.5082203", "0.5078475", "0.5062163", "0.5049635", "0.50441986", "0.50374484", "0.5028136", "0.50127864", "0.5002548", "0.49691728", "0.49354702", "0.4932225", "0.49271417", "0.49271253", "0.4917652", "0.48700935", "0.48695645", "0.48687702", "0.48685458", "0.4866482", "0.48596692", "0.4849615", "0.48470345", "0.48411787", "0.48253793", "0.48222393", "0.4817893", "0.4797892", "0.4796884", "0.47844997", "0.47828957", "0.47802", "0.47730115", "0.47672617", "0.47622877", "0.47617942", "0.47549403", "0.4748241", "0.47443804", "0.47426128", "0.47390378", "0.473851", "0.47309265", "0.4709521", "0.47035718", "0.4702084", "0.46869373", "0.4681313", "0.468008", "0.46780866", "0.46778128", "0.46771586", "0.46737546", "0.46657267", "0.46582806", "0.46519396", "0.46302435", "0.46300393", "0.46289733", "0.4627504", "0.4606285", "0.4605647", "0.46009275", "0.45958617", "0.4593825", "0.45851424", "0.45813522", "0.45762405", "0.45730323", "0.45709354", "0.45644346", "0.45636636", "0.4562364", "0.45606655", "0.4545459", "0.45420238", "0.45402333", "0.45402333", "0.4539838", "0.45396462" ]
0.78412956
0
Create a copy of a facemap proc file, but pointing to a new video. By default, the new proc file is created in the same folder as the new videofile and named videofile_proc.npy.
def copy_facemap_roi(procfile, videofile, outputfile=None): videodata = np.load(procfile, allow_pickle=True).item() videodata['filenames'] = [[videofile]] if outputfile is None: outputfile = os.path.splitext(videofile)[0]+'_proc.npy' if os.path.isfile(outputfile): print(f'File {outputfile} exists. It will not be overwritten.') return None np.save(outputfile, videodata) return outputfile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_video(input_file, output_file):\n input_video = VideoFileClip(input_file)\n output_video = input_video.fl_image(detect_lane.fit_and_plot)\n output_video.write_videofile(output_file, audio=False)", "def process_video(lane, fname, output):\n\tclip = VideoFileClip(fname)\n\toutput_name = output\n\toutput_clip = clip.fl_image(lane.pipeline)\n\toutput_clip.write_videofile(output_name, audio=False)\n\tprint ('Video processed successfully')", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()", "def process_video(self, tmp_output_folder, video_name, video_num, total_videos):\n vidcap = cv2.VideoCapture(join(tmp_output_folder, video_name))\n print(f\"Processing video {video_num}/{total_videos} with name {video_name} \\n\")\n\n input_length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n frame_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))\n frame_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vidcap.get(cv2.CAP_PROP_FPS))\n\n metadata = []\n faces_all_frames = []\n success, image = vidcap.read()\n count = 0\n frame = 0\n while success:\n if count % self.sample_every == 0:\n height, width = image.shape[:2]\n image = cv2.resize(image, (self.width, self.height), interpolation=cv2.INTER_CUBIC)\n\n # Convert from BGR color (OpenCV) to RGB color (face_recognition)\n rgb_image = image[:, :, ::-1]\n\n # Find all the faces in the current frame of video\n face_locations = face_recognition.face_locations(rgb_image)\n faces = []\n face_num = 0\n # Display the results\n for top, right, bottom, left in face_locations:\n # Draw a box around the face\n faces.append(image[top:bottom, left:right, :].copy())\n metadata.append(\n f\"{video_name},frame-{count}.face-{face_num}.jpg,{count},{face_num},{input_length},{fps},{frame_width},{frame_height},{top},{right},{bottom},{left}\\n\")\n face_num += 1\n faces_all_frames.append(faces)\n\n frame += 1\n success, image = vidcap.read()\n count += 1\n video_num += 1\n vidcap.release()\n\n with open(f\"{self.output_folder}/faces-pickle/{video_name}.pkl\", \"wb\") as f_out:\n pickle.dump(faces_all_frames, f_out)\n return metadata", "def process_video(input_file, output_file):\n with open('all-features-rbf-svm.p', 'rb') as svm_fd:\n clf = pickle.load(svm_fd)\n with open('all-features-scaler.p', 'rb') as scaler_fd:\n hog_scaler = pickle.load(scaler_fd)\n hog_parameters = HogParameters(orientations=18, pixels_per_cell=8, cells_per_block=2)\n clip = VideoFileClip(input_file)\n test_clip = clip.fl_image(\n lambda frame: process_frame(frame, clf=clf, norm_scaler=hog_scaler, hog_parameters=hog_parameters, spatial_size=(16, 16), hist_bins=32))\n test_clip.write_videofile(output_file, audio=False)", "def convert_video(video_file, output_file_name):\n video_stream = cv2.VideoCapture(video_file)\n total_frames = video_stream.get(cv2.CAP_PROP_FRAME_COUNT)\n background = get_median_frame(video_stream)\n video_stream.release()\n #reopen for processing:\n video_stream = cv2.VideoCapture(video_file)\n #ready an output writer\n writer = cv2.VideoWriter(output_file_name, \n cv2.VideoWriter_fourcc(*\"MP4V\"), fps,(1080,1920)) #(1920,1080))\n frameCnt=0\n pos = [] #Array for the coordinates\n while(frameCnt < total_frames-1):\n frameCnt+=1\n ret, frame = video_stream.read()\n dframe = background_subtraction(frame,background)\n cnts = find_contours(dframe)\n x,y = find_lowest_contour(cnts)\n pos.append([x,y])\n if len(pos): \n cv2.polylines(frame,np.int32([pos]),False,(0, 255, 0),2)\n writer.write(cv2.resize(frame, (1080,1920))) ## size probably shoudn't be fixed.\n writer.release()\n video_stream.release()\n return pos", "def create_movie(name, folder):\n cmd = [\"ffmpeg\", \"-framerate\", \"1\", \"-i\", folder + \"/pic%04d.png\", \"-c:v\",\n \"libx264\", \"-r\", \"30\", \"-pix_fmt\", \"yuv420p\", name]\n return subprocess.call(cmd)", "def local_video(**kwargs):\n output_dir = run_video_preprocess(\n video_file=input_video,\n roi_locations=kwargs[\"roi_locations\"],\n preprocess_analysis=kwargs[\"preprocess_analysis\"],\n database=False\n )\n\n run_analysis_pipeline(\n preprocess_analysis=kwargs[\"preprocess_analysis\"],\n json_filepath=output_dir,\n )", "def make_video(queue, video_file, destination, face_locations, face_encodings, match_encodings, settings):\n trackers = [] # list of tracker objects, one for each matched face\n # get video\n video = cv2.VideoCapture(video_file) # input VideoCapture object\n frame_rate = video.get(cv2.CAP_PROP_FPS) # frames per second in input video\n width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) # width of input video frame\n height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) # height of input video frame\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) # number of frames in input video\n # get settings\n track_period = settings[\"track_period\"] # track period from settings\n tolerance = settings[\"tolerance\"] # face matching tolerance from settings\n blur_method = settings[\"blur_method\"] # type of blurring from settings\n blur_intensity = settings[\"blur_intensity\"] # blurring filter size from settings\n display_output = settings[\"display_output\"] # flag indicating whether to display output video from settings\n # initialize writer\n out = video_utils.initialize_writer(destination, (width, height), frame_rate) # VideoWriter object\n for i in range(frame_count):\n ret, img = video.read() # ret indicates if frame was read correctly, img is last read frame\n if i % track_period == 0: # frame for detection\n current_frame_encodings = np.array(face_encodings[i // track_period]) # array of encodings for faces in current frame\n matched_indices, matched_encodings = recognition.match_faces(current_frame_encodings, np.array(match_encodings), tolerance) # indices of matched faces from current frame and their encodings\n matched_locations = [face_locations[i // track_period][k] for k in matched_indices] # locations of matched faces from current frame\n trackers = tracking.start_trackers(img, matched_locations) # list of tracker objects, one for each matched face\n else: # frame for tracking\n matched_locations = tracking.update_locations(trackers, img) # updated locations of matched faces from current frame\n # generate blurred image\n blurred = None # object holding image with blurred faces\n if blur_method == \"pixelate\":\n blurred = blur_methods.pixelated(img, matched_locations, blur_intensity)\n elif blur_method == \"blur\":\n blurred = blur_methods.blurred(img, matched_locations, blur_intensity)\n elif blur_method == \"blacken\":\n blurred = blur_methods.blackened(img, matched_locations)\n out.write(blurred)\n\n out.release()\n queue.put(0)\n if display_output:\n video_utils.display_video(destination)", "def process_video(self, input_path, output_path, debug=False):\n clip = VideoFileClip(input_path)\n if debug:\n test_clip = clip.fl_image(self.process_image_debug)\n else:\n test_clip = clip.fl_image(self.process_image)\n test_clip.write_videofile(output_path)", "def preprocess_sample(file, params):\n\n videoFile = file + \".mp4\"\n audioFile = file + \".wav\"\n roiFile = file + \".png\"\n visualFeaturesFile = file + \".npy\"\n\n roiSize = params[\"roiSize\"]\n normMean = params[\"normMean\"]\n normStd = params[\"normStd\"]\n vf = params[\"vf\"]\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n #Extract the audio from the video file using the FFmpeg utility and save it to a wav file.\n v2aCommand = \"ffmpeg -y -v quiet -i \" + videoFile + \" -ac 1 -ar 16000 -vn \" + audioFile\n os.system(v2aCommand)\n\n\n #for each frame, resize to 224x224 and crop the central 112x112 region\n captureObj = cv.VideoCapture(videoFile)\n roiSequence = list()\n while (captureObj.isOpened()):\n ret, frame = captureObj.read()\n if ret == True:\n grayed = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n grayed = grayed/255\n grayed = cv.resize(grayed, (224,224))\n roi = grayed[int(112-(roiSize/2)):int(112+(roiSize/2)), int(112-(roiSize/2)):int(112+(roiSize/2))]\n roiSequence.append(roi)\n else:\n break\n captureObj.release()\n cv.imwrite(roiFile, np.floor(255*np.concatenate(roiSequence, axis=1)).astype(np.int))\n\n\n #normalise the frames and extract features for each frame using the visual frontend\n #save the visual features to a .npy file\n inp = np.stack(roiSequence, axis=0)\n inp = np.expand_dims(inp, axis=[1,2])\n inp = (inp - normMean)/normStd\n inputBatch = torch.from_numpy(inp)\n inputBatch = (inputBatch.float()).to(device)\n vf.eval()\n with torch.no_grad():\n outputBatch = vf(inputBatch)\n out = torch.squeeze(outputBatch, dim=1)\n out = out.cpu().numpy()\n np.save(visualFeaturesFile, out)\n return", "def analyze_video(vidNum_iter, config, pointInds_toUse, pts_spaced, session): # function needed for multiprocessing\n\n optic = config['Optic']\n\n numVids = session['num_vids']\n path_vid_allFiles = session['videos']\n lk_names = [key for key in optic.keys() if 'lk_' in key]\n lk_params = {k.split('lk_')[1]: (tuple(optic[k]) if type(optic[k]) is list else optic[k]) \\\n for k in lk_names}\n\n vid = imageio.get_reader(path_vid_allFiles[vidNum_iter], 'ffmpeg')\n # metadata = vid.get_meta_data()\n\n path_vid = path_vid_allFiles[vidNum_iter] # get path of the current vid\n video = cv2.VideoCapture(path_vid) # open the video object with openCV\n numFrames = int(video.get(\n cv2.CAP_PROP_FRAME_COUNT)) # get frame count of this vid GENERALLY INACCURATE. OFF BY AROUND -25 frames\n\n frameToSet = 0\n frame = vid.get_data(\n frameToSet) # Get a single frame to use as the first 'previous frame' in calculating optic flow\n new_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n old_frame = new_frame_gray\n\n displacements_tmp = np.zeros((pts_spaced.shape[0], 2, np.uint64(numFrames + (numVids * 1000)))) * np.nan\n\n print(' ', end='', flush=True)\n text = \"progresser #{}\".format(vidNum_iter)\n print(f'\\n Calculating displacement field: video # {vidNum_iter + 1}/{numVids}')\n\n for iter_frame, new_frame in enumerate(tqdm(vid, total=numFrames, desc=text, position=vidNum_iter)):\n new_frame_gray = cv2.cvtColor(new_frame, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\n ##calculate optical flow\n pointInds_new, status, error = cv2.calcOpticalFlowPyrLK(old_frame, new_frame_gray, pointInds_toUse, None,\n **lk_params) # Calculate displacement distance between STATIC/ANCHORED points and the calculated new points. Also note the excluded 'NextPts' parameter. Could be used for fancier tracking\n\n ## Calculate displacement and place into variable 'displacements' (changes in size every iter)\n if iter_frame == 0:\n displacements_tmp[:, :, iter_frame] = np.zeros((pts_spaced.shape[0], 2))\n else:\n displacements_tmp[:, :, iter_frame] = np.single(np.squeeze((\n pointInds_new - pointInds_toUse))) # this is the important variable. Simply the difference in the estimate\n\n old_frame = new_frame_gray # make current frame the 'old_frame' for the next iteration\n\n return displacements_tmp", "def run_video(self, video_path):\n file, ext = os.path.splitext(video_path)\n video_name = file.split('/')[-1]\n out_filename = video_name + '_out' + '.avi'\n\n cap = cv2.VideoCapture(video_path)\n wi = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n he = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(wi, he)\n\n vwriter = cv2.VideoWriter(out_filename, cv2.VideoWriter_fourcc(*'MJPG'), 10, (wi, he))\n counter = 0\n fac = 2\n start = time.time()\n while True:\n ret, image = cap.read()\n\n if ret:\n counter += 1\n\n ## resize image\n\n height, width, channels = image.shape\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n resized_image = cv2.resize(image, target_size, interpolation=cv2.INTER_AREA)\n output = resized_image.copy()\n\n ## get segmentation map\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n ## visualize\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n\n ## overlay on image\n alpha = 0.7\n cv2.addWeighted(seg_image, alpha, output, 1 - alpha, 0, output)\n\n output = cv2.resize(output, (wi, he), interpolation=cv2.INTER_AREA)\n # outimg = 'image_' + str(counter) + '.jpg'\n # cv2.imwrite(os.path.join(os.getcwd(), 'test_out', outimg),output)\n vwriter.write(output)\n else:\n break\n\n end = time.time()\n print(\"Frames and Time Taken: \", counter, end - start)\n cap.release()\n vwriter.release()", "def process_video(filename, args, cfg, net):\n # Split video into frames\n images = split_video(filename)\n # Set output dir\n output_dir = args.output\n # Add brackets and extension to filename\n output_path = create_video_output_path(output_dir, cfg)\n # Get height and width of 1st image\n height, width, _ = check_img_size(images[0]).shape\n # Create VideoWriter object\n video = cv2.VideoWriter(output_path, \n cv2.VideoWriter_fourcc(*'FMP4'), \n cfg['video']['fps'], \n (width, height))\n for image in images:\n # Process frames\n img_steps = process_image(image, cfg, net)\n # Check for --show-detections flag\n output_img = check_if_adding_bboxes(args, img_steps) \n # Write to video\n video.write(output_img) \n # Release video writer object\n video.release()", "def video(perspective_matrix_path, source=\"cam\", save=False, save_path=None, file_name=\"out\", cam_cal=None):\n if not os.path.isfile(perspective_matrix_path):\n raise FileNotFoundError(\"Path to perspective matrix file not exist!\")\n\n with open(perspective_matrix_path, \"rb\") as p:\n perspective_matrix = pickle.load(p)\n M = perspective_matrix[\"M\"]\n Minv = perspective_matrix[\"Minv\"]\n\n if source == \"cam\":\n cap = cv2.VideoCapture(0)\n else:\n if not os.path.isfile(source):\n raise FileNotFoundError(source, \" not Exist!\")\n cap = cv2.VideoCapture(source)\n\n # camera calibration parameters [ mtx , dist]\n mtx = None\n dist = None\n\n out = None\n if save:\n if not os.path.isdir(save_path):\n raise FileNotFoundError(save_path, \" Not Exist!\")\n file_name += \".mp4\"\n out = cv2.VideoWriter(save_path + file_name, -1, 20, (int(cap.get(3)), int(cap.get(4))))\n\n if cam_cal:\n if not os.path.isfile(cam_cal):\n raise FileNotFoundError(cam_cal, \" Not Exist!\")\n\n with open(cam_cal, \"rb\") as p:\n calibration = pickle.load(p)\n mtx = calibration[\"mtx\"]\n dist = calibration[\"dist\"]\n\n left_line = Line(5)\n right_line = Line(5)\n\n while True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n if not ret:\n print(\"Finished..\")\n sys.exit(0)\n\n # cv2 read frame as BGR, convert it to RGB\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n # camera calibration\n if not (mtx is None or dist is None):\n frame = cv2.undistort(frame, mtx, dist, None, mtx)\n\n # get edges in image\n edges = apply_edge_detection(frame)\n\n # transform image to bird view\n warped = warped_img(edges, M)\n\n # init out image which will draw lane line on it then weight it with original frame\n out_img = np.zeros_like(warped)\n if len(warped.shape) == 3 and warped.shape[2] == 3:\n pass\n else:\n out_img = np.dstack((out_img, out_img, out_img))\n\n # if line not detected, apply sliding window\n if not left_line.detected or not right_line.detected:\n leftx, lefty, rightx, righty = sliding_window(warped, 9, 200)\n\n # if already detected apply search around detected line\n else:\n leftx, lefty = search_around_poly(left_line, warped)\n rightx, righty = search_around_poly(right_line, warped)\n\n # will used for plotting line, find x fitted\n ploty = np.linspace(warped.shape[0] // 4, warped.shape[0] - 1, warped.shape[0])\n\n # check if at least 100 pixels detected as line\n if len(leftx) > 100 and len(rightx) > 100:\n\n # make detected flag true\n left_line.detected = True\n right_line.detected = True\n\n left_line.current_x = leftx\n left_line.current_y = lefty\n\n right_line.current_x = rightx\n right_line.current_y = righty\n\n left_line.fit_polynomial(ploty)\n right_line.fit_polynomial(ploty)\n\n else:\n print(\"Line not detected in this frame \")\n # we just draw line form previous frame\n\n # make detected flag true\n left_line.detected = False\n right_line.detected = False\n\n # update Lane line radius\n left_line.radius()\n right_line.radius()\n\n # avg radius of to lines, and plot it\n radius = (left_line.radius_of_curvature + right_line.radius_of_curvature) // 2\n frame = write_text(frame, \"Radius of Curvature = \" + str(radius) + \" M\", pos=(20, 50))\n\n # calculate Alignment ( how much car away from center between Lane lines\n dir = \"Left\" # car far from left or right\n\n left_line.car_offset(frame.shape) # distance from left line\n right_line.car_offset(frame.shape) # distance from right line\n\n distance = round(right_line.line_base_pos - left_line.line_base_pos, 2)\n\n if distance < 0: # car far away from left line not right line\n distance = -distance\n dir = \"Right\"\n frame = write_text(frame, \"Vehicle is {}m {} of center\".format(distance, dir), pos=(20, 80))\n\n # ** plot lane lines on image **\n # left_line.draw_line(out_img, ploty)\n # right_line.draw_line(out_img, ploty)\n\n # color pixel which belong to lane lines\n left_line.color_pixel(out_img, (255, 0, 0))\n right_line.color_pixel(out_img, (255, 100, 0))\n\n # fit green triangle in area between lane lines\n pts_left = np.array([np.transpose(np.vstack([left_line.bestx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_line.bestx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(out_img, np.int_([pts]), (0, 255, 0))\n\n # return image to normal view from bird view\n out_img_undit = warped_img(out_img, Minv)\n\n # weight out_image_undit with original frame\n frame = cv2.addWeighted(out_img_undit, 0.5, frame, 1, 0)\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n cv2.imshow(\"frame\", frame)\n\n # write video\n if save:\n out.write(frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()", "def emv(inputVideoPath, outputVideoPath, maxLevel, freqLow, freqHigh, alpha, chromAttenuation, startFrameNumber, endFrameNumber, lambdaC=-1, app=\"color\", method=\"ideal\", roi=None): \n fps, frames = getVideoFrames(inputVideoPath, startFrameNumber, endFrameNumber)\n if app==\"color\":\n recreateFrames=emvCoreColor(frames, fps, maxLevel, freqLow, freqHigh, alpha, chromAttenuation, method)\n elif app==\"motion\":\n recreateFrames=emvCoreMotion(frames, fps, maxLevel, freqLow, freqHigh, alpha, lambdaC, chromAttenuation, method)\n saveFramesToVideoROI(frames, recreateFrames, outputVideoPath, roi)\n return", "def displacements_monothread(config, pointInds_toUse, pointInds_tracked, pointInds_tracked_tuple, displacements,\n pts_spaced, color_tuples, session):\n\n ## Main loop to pull out displacements in each video\n ind_concat = 0\n fps = 0\n tic_fps = time.time()\n tic_all = time.time()\n\n optic = config['Optic']\n video = config['Video']\n\n vidNums_toUse = optic['vidNums_toUse']\n showVideo_pref = optic['showVideo_pref']\n fps_counterPeriod = video['fps_counterPeriod']\n printFPS_pref = video['printFPS_pref']\n remote = config['General']['remote']\n save_vid = video['save_demo']\n\n Fs = video['Fs']\n vid_width = video['width']\n vid_height = video['height']\n test_len = video['demo_len']\n save_pathFull = str(Path(config['Paths']['viz']) / 'optic_test.avi')\n\n numVids = session['num_vids']\n path_vid_allFiles = session['videos']\n lk_names = [key for key in optic.keys() if 'lk_' in key]\n lk_params = {k.split('lk_')[1]: (tuple(optic[k]) if type(optic[k]) is list else optic[k]) \\\n for k in lk_names}\n\n # Define the codec and create VideoWriter object\n if showVideo_pref and (save_vid or remote):\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n print(f'saving to file {save_pathFull}')\n out = cv2.VideoWriter(save_pathFull, fourcc, Fs, (np.int64(vid_width), np.int64(vid_height)))\n else:\n out = None\n vid_lens = []\n for vidNum_iter in vidNums_toUse:\n vid = imageio.get_reader(path_vid_allFiles[vidNum_iter], 'ffmpeg')\n # metadata = vid.get_meta_data()\n\n path_vid = path_vid_allFiles[vidNum_iter] # get path of the current vid\n video = cv2.VideoCapture(path_vid) # open the video object with openCV\n numFrames_rough = int(video.get(\n cv2.CAP_PROP_FRAME_COUNT)) # get frame count of this vid GENERALLY INACCURATE. OFF BY AROUND -25 frames\n\n frameToSet = 0\n frame = vid.get_data(0) # Get a single frame to use as the first 'previous frame' in calculating optic flow\n new_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n old_frame = new_frame_gray\n\n print(f'\\n Calculating displacement field: video # {vidNum_iter + 1}/{numVids}')\n # while True:\n for iter_frame, new_frame in enumerate(tqdm(vid, total=numFrames_rough)):\n new_frame_gray = cv2.cvtColor(new_frame, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\n ##calculate optical flow\n pointInds_new, status, error = cv2.calcOpticalFlowPyrLK(old_frame, new_frame_gray, pointInds_toUse, None,\n **lk_params) # Calculate displacement distance between STATIC/ANCHORED points and the calculated new points. Also note the excluded 'NextPts' parameter. Could be used for fancier tracking\n\n ## Calculate displacement and place into variable 'displacements' (changes in size every iter) \n if iter_frame == 0:\n displacements[:, :, ind_concat] = np.zeros((pts_spaced.shape[0], 2))\n else:\n displacements[:, :, ind_concat] = np.single(np.squeeze((\n pointInds_new - pointInds_toUse))) # this is the important variable. Simply the difference in the estimate\n\n old_frame = new_frame_gray # make current frame the 'old_frame' for the next iteration\n\n ## below is just for visualization. Nothing calculated is maintained\n if showVideo_pref:\n pointInds_tracked = pointInds_tracked + (\n pointInds_new - pointInds_toUse) # calculate integrated position\n pointInds_tracked = pointInds_tracked - (\n pointInds_tracked - pointInds_toUse) * 0.01 # multiplied constant is the relaxation term. this is just for display purposes. Relaxation term chosen during cleanup will be real\n pointInds = [pointInds_tracked, pointInds_tracked_tuple]\n counters = [iter_frame, vidNum_iter, ind_concat, fps]\n if (remote and iter_frame < test_len) or not remote:\n videos.visualize_progress(config, session, new_frame, pointInds, color_tuples, counters, out)\n\n if (save_vid or remote) and iter_frame == test_len:\n out.release()\n\n k = cv2.waitKey(1) & 0xff\n if k == 27: break\n\n ind_concat = ind_concat + 1\n\n if ind_concat % fps_counterPeriod == 0:\n elapsed = time.time() - tic_fps\n fps = fps_counterPeriod / elapsed\n if printFPS_pref:\n print(fps)\n tic_fps = time.time()\n vid_lens.append(iter_frame+1)\n ## Calculate how long calculation took\n elapsed = time.time() - tic_all\n helpers.print_time('video time elapsed:', elapsed)\n print(f'Capture rate: {round(ind_concat / elapsed, 3)} fps')\n\n numFrames_total = ind_concat - 1\n cv2.destroyAllWindows()\n\n displacements = displacements[:, :, ~np.isnan(displacements[0, 0, :])]\n\n return displacements, numFrames_total, vid_lens", "def write_face_samples(model, output_path, invid):\n \n if not os.path.isdir(output_path) :\n os.mkdir(output_path)\n \n video = mmcv.VideoReader(invid)\n for frame_ix, frame in enumerate(video):\n frame_name = f\"{output_path}webcam_{frame_ix}_0.jpg\"\n if os.path.isfile(frame_name): continue\n \n frame_img = PIL.Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n _ = model(frame_img,frame_name)", "def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")", "def __call__(self, video_path, per_frames = 1 , offset = None):\n \n cap = cv2.VideoCapture(video_path)\n \n if not cap.isOpened():\n raise Exception(\"Video file does not exist or is invalid\")\n\n \n if offset:\n cap.set(cv2.CAP_PROP_POS_MSEC, offset)\n \n \n info = []\n\n while cap.isOpened():\n ret, frame = cap.read()\n if ret:\n if cap.get(cv2.CAP_PROP_POS_FRAMES) % per_frames == 0:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n faces_info = self.detect_faces_from_image(frame,\n desired_width=224, desired_height=224) \n if faces_info:\n for element in faces_info:\n face_img = image.img_to_array(element[1])\n\n face_img = utils.preprocess_input(face_img, version=1)\n face_img = np.expand_dims(face_img, axis=0)\n\n features = self.vgg_feature_extractor.predict(face_img)\n label = self.gender_svm.predict(features)[0]\n decision_value = round(self.gender_svm.decision_function(features)[0], 3)\n\n bounding_box = element[0][0]\n detection_score = round(element[5], 3)\n bbox_length = bounding_box.bottom() - bounding_box.top()\n\n info.append([\n cap.get(cv2.CAP_PROP_POS_FRAMES), bounding_box, (bbox_length, bbox_length), label,\n decision_value, detection_score\n ])\n\n else:\n break\n cap.release()\n info = pd.DataFrame.from_records(info, columns = ['frame', 'bb', 'size','label', 'decision', 'conf'])\n return info", "def run(self):\n\n for file_cnt, file_path in enumerate(self.files_found):\n video_timer = SimbaTimer()\n video_timer.start_timer()\n _, self.video_name, _ = get_fn_ext(file_path)\n self.video_info, self.px_per_mm, self.fps = self.read_video_info(\n video_name=self.video_name\n )\n self.width, self.height = int(\n self.video_info[\"Resolution_width\"].values[0]\n ), int(self.video_info[\"Resolution_height\"].values[0])\n if self.video_setting:\n self.fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)\n self.video_save_path = os.path.join(\n self.heatmap_clf_location_dir, self.video_name + \".mp4\"\n )\n self.writer = cv2.VideoWriter(\n self.video_save_path,\n self.fourcc,\n self.fps,\n (self.width, self.height),\n )\n if self.frame_setting:\n self.save_video_folder = os.path.join(\n self.heatmap_clf_location_dir, self.video_name\n )\n if not os.path.exists(self.save_video_folder):\n os.makedirs(self.save_video_folder)\n self.data_df = read_df(file_path=file_path, file_type=self.file_type)\n clf_array, aspect_ratio = self.__calculate_bin_attr(\n data_df=self.data_df,\n clf_name=self.clf_name,\n bp_lst=self.bp_lst,\n px_per_mm=self.px_per_mm,\n img_width=self.width,\n img_height=self.height,\n bin_size=self.bin_size,\n fps=self.fps,\n )\n\n if self.max_scale == \"auto\":\n self.max_scale = self.__calculate_max_scale(clf_array=clf_array)\n if self.max_scale == 0:\n self.max_scale = 1\n\n if self.final_img_setting:\n self.make_clf_heatmap_plot(\n frm_data=clf_array[-1, :, :],\n max_scale=self.max_scale,\n palette=self.palette,\n aspect_ratio=aspect_ratio,\n file_name=os.path.join(\n self.heatmap_clf_location_dir,\n self.video_name + \"_final_frm.png\",\n ),\n shading=self.shading,\n clf_name=self.clf_name,\n img_size=(self.width, self.height),\n final_img=True,\n )\n\n if self.video_setting or self.frame_setting:\n for frm_cnt, cumulative_frm_idx in enumerate(range(clf_array.shape[0])):\n frm_data = clf_array[cumulative_frm_idx, :, :]\n cum_df = pd.DataFrame(frm_data).reset_index()\n cum_df = cum_df.melt(\n id_vars=\"index\",\n value_vars=None,\n var_name=None,\n value_name=\"seconds\",\n col_level=None,\n ).rename(\n columns={\"index\": \"vertical_idx\", \"variable\": \"horizontal_idx\"}\n )\n cum_df[\"color\"] = (\n (cum_df[\"seconds\"].astype(float) / float(self.max_scale))\n .round(2)\n .clip(upper=100)\n )\n color_array = np.zeros(\n (\n len(cum_df[\"vertical_idx\"].unique()),\n len(cum_df[\"horizontal_idx\"].unique()),\n )\n )\n for i in range(color_array.shape[0]):\n for j in range(color_array.shape[1]):\n value = cum_df[\"color\"][\n (cum_df[\"horizontal_idx\"] == j)\n & (cum_df[\"vertical_idx\"] == i)\n ].values[0]\n color_array[i, j] = value\n\n fig = plt.figure()\n im_ratio = color_array.shape[0] / color_array.shape[1]\n plt.pcolormesh(\n color_array,\n shading=self.shading,\n cmap=self.palette,\n rasterized=True,\n alpha=1,\n vmin=0.0,\n vmax=float(self.max_scale),\n )\n plt.gca().invert_yaxis()\n plt.xticks([])\n plt.yticks([])\n plt.axis(\"off\")\n plt.tick_params(axis=\"both\", which=\"both\", length=0)\n cb = plt.colorbar(pad=0.0, fraction=0.023 * im_ratio)\n cb.ax.tick_params(size=0)\n cb.outline.set_visible(False)\n cb.set_label(\n \"{} (seconds)\".format(self.clf_name), rotation=270, labelpad=10\n )\n plt.tight_layout()\n plt.gca().set_aspect(aspect_ratio)\n canvas = FigureCanvas(fig)\n canvas.draw()\n mat = np.array(canvas.renderer._renderer)\n image = cv2.cvtColor(mat, cv2.COLOR_RGB2BGR)\n image = cv2.resize(image, (self.width, self.height))\n image = np.uint8(image)\n plt.close()\n\n if self.video_setting:\n self.writer.write(image)\n if self.frame_setting:\n frame_save_path = os.path.join(\n self.save_video_folder, str(frm_cnt) + \".png\"\n )\n cv2.imwrite(frame_save_path, image)\n print(\n \"Created heatmap frame: {} / {}. Video: {} ({}/{})\".format(\n str(frm_cnt + 1),\n str(len(self.data_df)),\n self.video_name,\n str(file_cnt + 1),\n len(self.files_found),\n )\n )\n\n if self.video_setting:\n self.writer.release()\n\n video_timer.stop_timer()\n print(\n \"Heatmap plot for video {} saved (elapsed time: {}s) ... \".format(\n self.video_name, video_timer.elapsed_time_str\n )\n )\n\n self.timer.stop_timer()\n stdout_success(\n msg=\"All heatmap visualizations created in project_folder/frames/output/heatmaps_classifier_locations directory\",\n elapsed_time=\"self.timer.elapsed_time_str\",\n )", "def _spawn_ffmpeg(self) -> None:\r\n if self.ffmpeg_proc is not None:\r\n raise RuntimeError('_spawn_ffmpeg called when ffmpeg_proc is '\r\n + f'{self.ffmpeg_proc} (not None)')\r\n\r\n args = ['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo',\r\n '-s', f'{self.frame_size[0]}x{self.frame_size[1]}',\r\n '-pix_fmt', 'rgba', '-r', str(self.fps),\r\n '-loglevel', 'quiet',\r\n '-i', 'pipe:0',\r\n '-vcodec', 'h264', '-pix_fmt', 'yuv420p',\r\n '-movflags', '+faststart']\r\n\r\n if self.bitrate > 0:\r\n args.extend(['-b', f'{self.bitrate}k'])\r\n args.extend(['-y', self.outfile])\r\n\r\n create_flags = sp.CREATE_NO_WINDOW if 'nt' in os.name else 0\r\n self.ffmpeg_proc = sp.Popen(args, shell=False, stdout=None, stderr=None,\r\n stdin=sp.PIPE, creationflags=create_flags)", "def write_video(project_video_output, output_folder, fps=20):\n print(\"Creating video {}, FPS={}\".format(project_video_output, fps))\n clip = ImageSequenceClip(output_folder, fps)\n clip.write_videofile(project_video_output)", "def process_video(path, method):\n # TODO: Obtener nombre de video a partir del path\n video_name = ''\n # TODO: Crear carpeta si es que no existe (Deteccion/{method}/{video_name})\n folder_path = f\"Deteccion/{method}/{video_name}\"\n try:\n cap = cv2.VideoCapture(path)\n # Check if camera opened successfully\n if (cap.isOpened() is False):\n print(\"Error opening video stream or file\")\n\n frame_counter = 0\n # Read until video is completed\n while(cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret is True:\n # TODO: Crear carpeta del frame si es que no existe (Deteccion/{method}/{video_name}/{frame_name})\n frame_name = f\"frame_{frame_counter}\"\n\n faces = process_frame(frame, method)\n # TODO: Guardar bounding boxes\n np.save(f\"{folder_path}/{frame_name}/bounding_boxes.npy\", faces)\n\n # TODO: Guardar imagenes recortadas\n for bb in faces:\n pass\n frame_counter += 1\n\n # Break the loop\n else:\n break\n\n finally:\n # When everything done, release the video capture object\n cap.release()", "def mapBackToSurface(array,filename):\n #### Map back to surface\n if array.shape[0]==360:\n out_array = np.zeros((glasser2.shape[0],3))\n\n roicount = 0\n for roi in range(360):\n for col in range(array.shape[1]):\n vertex_ind = np.where(glasser2==roi+1)[0]\n out_array[vertex_ind,col] = array[roicount,col]\n\n roicount += 1\n\n else:\n out_array = array\n\n #### \n # Write file to csv and run wb_command\n np.savetxt(filename + '.csv', out_array,fmt='%s')\n wb_file = filename + '.dscalar.nii'\n wb_command = 'wb_command -cifti-convert -from-text ' + filename + '.csv ' + glasserfile2 + ' ' + wb_file + ' -reset-scalars'\n os.system(wb_command)\n os.remove(filename + '.csv')", "def tagVideo(modelpath, videopath, outputPath=None): \n model = get_model_instance_segmentation(3)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # model.load_state_dict(torch.load(modelpath, map_location=device), strict=False)\n model.load_state_dict(torch.load(modelpath, map_location=device))\n model = model.to(device)\n model.eval()\n\n \n data_transform = transforms.Compose([\n ToPILImage(),\n transforms.ToTensor(), \n ])\n\n\n if outputPath:\n writer = FFmpegWriter(str(outputPath))\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.namedWindow('main', cv2.WINDOW_NORMAL)\n labels = ['No mask', 'Mask']\n labelColor = [(10, 0, 255), (10, 255, 0)]\n img_count = 0\n outputDir = os.path.dirname(os.path.realpath(outputPath))\n frame_count = 0\n boundingBoxes = []\n for frame in vreader(str(videopath)):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n print('Frame:', frame_count)\n\n if frame_count%30==0:\n frameTensor = data_transform(frame)\n frameTensor = torch.unsqueeze(frameTensor, 0).to(device)\n output = model(frameTensor)\n boundingBoxes = plot_image_new(frame, frameTensor[0], output[0]) \n \n if len(boundingBoxes)>0:\n for bb in boundingBoxes:\n cv2.rectangle(frame,\n (bb[0], bb[1]),\n (bb[2], bb[3]),\n (54, 66, 227),\n thickness=2)\n\n cv2.imshow('main', frame)\n if outputPath:\n writer.writeFrame(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_count += 1\n if outputPath:\n writer.close()\n cv2.destroyAllWindows()", "def generateVideo(textFilePath,extractedPath,createdVideoPath):\r\n\t\t#Check if the save directory exists, If not create directory\r\n\t\tif not os.path.exists(createdVideoPath):\r\n\t\t\tos.mkdir(createdVideoPath)\r\n\t\t#Open the text file\r\n\t\tfile = open(textFilePath)\r\n\t\tvideoPath = createdVideoPath\r\n\r\n\t\tfor hashedData in file:\r\n\t\t\thashedData = hashedData.split(\"\\n\")[0]\r\n\t\t\timage_folder = extractedPath + \"/\" + \"data\" + \"/\" + hashedData\r\n\t\t\tvideo_name = hashedData + \".avi\"\r\n\t\t\timages = os.listdir(image_folder)\r\n\t\t\tframe = cv2.imread(os.path.join(image_folder, images[0]))\r\n\t\t\theight, width, layers = frame.shape\r\n\t\t\t#declare the video writter\r\n\t\t\tvideo = cv2.VideoWriter(videoPath + \"/\" +video_name, 0, 1, (width,height))\r\n\t\t\t#Write all images to a single video\r\n\t\t\tfor image in images:\r\n\t\t\t\tvideo.write(cv2.imread(os.path.join(image_folder, image)))\r\n\r\n\t\t\tcv2.destroyAllWindows()\r\n\t\t\tvideo.release()", "def create_video():\n print(\"Generating output video\")\n frame_array = []\n files = [f for f in os.listdir(MODIFIED_FRAMES_DIR) if isfile(join(MODIFIED_FRAMES_DIR, f))]\n #for sorting the file names properly\n # files.sort(key = lambda x: x[3:-4])\n files = sorted(files,key=lambda x: int(os.path.splitext(x)[0]))\n for i in range(len(files)):\n filename= MODIFIED_FRAMES_DIR + files[i]\n # print(filename)\n #reading each files\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width,height)\n \n #inserting the frames into an image array\n frame_array.append(img)\n \n out = cv2.VideoWriter(OUTPUT_FILE,cv2.VideoWriter_fourcc(*'DIVX'), FRAME_RATE, size)\n for i in range(len(frame_array)):\n # writing to a image array\n out.write(frame_array[i])\n out.release()\n print(\"Output video generated successfully...\")\n\n # img_array = []\n # for filename in glob.glob(MODIFIED_FRAMES_DIR+'/*.jpg'):\n # img = cv2.imread(filename)\n # height, width, layers = img.shape\n # size = (width,height)\n # img_array.append(img)\n\n # height, width, layers = img_array[0].shape\n # size = (width,height)\n # out = cv2.VideoWriter('output.mov',cv2.VideoWriter_fourcc(*'DIVX'), 15, size) \n # for i in range(len(img_array)):\n # out.write(img_array[i])\n # out.release()", "def generate_frame(video_path, video_name, second, label, dest_path):\n print \"video_path\", video_path\n print 'video_name',video_name\n print 'second',second\n print 'label',label\n print 'dest_path',dest_path\n\n vidcap = cv2.VideoCapture(os.path.join(video_path, video_name))\n vidcap.set(0, int(second*1000))\n success, image = vidcap.read()\n if success:\n cv2.imwrite(os.path.join(dest_path, video_name+\"_\"+str(second)+\"_\"+str(label)+\".jpg\"), image)", "def generateDataFromVideo(path):\n video = cv2.VideoCapture(path)\n success, frame = video.read()\n cnt = 1\n wiperExist = 0\n file = open(file='annotation.txt', mode='w')\n\n while success:\n cv2.imwrite(filename='./data/{0}.jpg'.format(cnt), img=frame)\n cnt += 1\n success, frame = video.read()\n if (cnt - 4) % 37 == 0 or (wiperExist > 0):\n wiperExist = (wiperExist + 1) % 21\n file.write('./Dataset/data/{0}.jpg 1\\n'.format(cnt))\n else:\n file.write('./Dataset/data/{0}.jpg 0\\n'.format(cnt))", "def reconstruction(args):\n\n print('Loading 2D keypoints ...')\n keypoints, scores, _, _ = load_json(args.keypoints_file)\n\n # Loading only one person's keypoints\n if len(keypoints.shape) == 4:\n keypoints = keypoints[0]\n assert len(keypoints.shape) == 3\n\n # Transform the keypoints format from different dataset (MSCOCO, MPII) to h36m format\n if args.kpts_format == 'coco':\n keypoints, valid_frames = coco_h36m(keypoints)\n elif args.kpts_format == 'mpii':\n keypoints, valid_frames = mpii_h36m(keypoints)\n elif args.kpts_format == 'openpose':\n # Convert 'Openpose' format to MSCOCO\n order_coco = [i for i in range(18) if i != 1]\n keypoints = keypoints[:, order_coco]\n keypoints, valid_frames = coco_h36m(keypoints)\n else:\n valid_frames = np.where(np.sum(keypoints.reshape(-1, 34), axis=1) != 0)[0]\n assert args.kpts_format == 'h36m'\n\n # Get the width and height of video\n cap = cv2.VideoCapture(args.video_path)\n width = int(round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)))\n height = int(round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\n # normalize keypoints\n input_keypoints = normalize_screen_coordinates(keypoints[..., :2], w=width, h=height)\n\n if args.frames == 27:\n filter_widths = [3, 3, 3]\n channels = 128\n elif args.frames == 81:\n filter_widths = [3, 3, 3, 3]\n channels = 64\n else:\n filter_widths = [3, 3, 3, 3, 3]\n channels = 32\n\n model_pos = SpatioTemporalModel(adj, 17, 2, 17, filter_widths=filter_widths, channels=channels, dropout=0.05)\n\n if torch.cuda.is_available():\n model_pos = model_pos.cuda()\n\n # load trained model\n print('Loading checkpoint', args.weight)\n chk_file = os.path.join('./checkpoint/gastnet', args.weight)\n checkpoint = torch.load(chk_file, map_location=lambda storage, loc: storage)\n model_pos.load_state_dict(checkpoint['model_pos'])\n\n receptive_field = model_pos.receptive_field()\n pad = (receptive_field - 1) // 2 # Padding on each side\n causal_shift = 0\n\n print('Reconstructing ...')\n gen = UnchunkedGenerator(None, None, [input_keypoints[valid_frames]],\n pad=pad, causal_shift=causal_shift, augment=True,\n kps_left=kps_left, kps_right=kps_right, joints_left=joints_left, joints_right=joints_right)\n prediction = evaluate(gen, model_pos, return_predictions=True)\n prediction = camera_to_world(prediction, R=rot, t=0)\n\n # We don't have the trajectory, but at least we can rebase the height\n prediction[:, :, 2] -= np.min(prediction[:, :, 2])\n\n prediction_new = np.zeros((*input_keypoints.shape[:-1], 3), dtype=np.float32)\n prediction_new[valid_frames] = prediction\n\n print('Rendering ...')\n anim_output = {'Reconstruction': prediction_new}\n render_animation(keypoints, keypoints_metadata, anim_output, h36m_skeleton, 25, 3000,\n np.array(70., dtype=np.float32), args.viz_output, limit=-1, downsample=1, size=5,\n input_video_path=args.video_path, viewport=(width, height), input_video_skip=0)", "def convert(processed_dir: str, video_file: str):\n\n video_name = osp.splitext(osp.basename(video_file))[0]\n out_dir = processed_dir + video_name\n\n # create img dir\n if not osp.exists(processed_dir):\n os.mkdir(processed_dir)\n\n # Create dir for video file if not existent\n # this is where we save our images\n if not osp.exists(out_dir):\n os.mkdir(out_dir)\n\n if osp.exists(out_dir):\n os.mkdir(out_dir + \"/kermit/\")\n os.mkdir(out_dir + \"/not_kermit/\")\n\n # open video file for processing\n cap = cv.VideoCapture(video_file)\n frame_rate = cap.get(5) # frame rate\n\n sec = 0\n total_count = (60*25)+50 # just an approximation\n pbar = tqdm.tqdm(total=total_count, leave=False)\n\n count = 0\n while (cap.isOpened()):\n frame_id = cap.get(1) # current frame number\n frame_exists, curr_frame = cap.read()\n\n if not frame_exists:\n break\n else:\n if (frame_id % math.floor(frame_rate) == 0):\n # output is : video_file/<video_file>_frameNr.jpg\n cv.imwrite(osp.join(out_dir, '{}_{}.jpg'.format(video_name,count)), curr_frame)\n count = count + 1\n pbar.update(1)\n\n pbar.close()\n # release resources\n cap.release()", "def __init__(self, video_folder, output_folder, output_file=None, height=320, width=480,\n sample_every=10, max_workers=32):\n self.video_folder = video_folder\n self.output_folder = output_folder\n self.output_file = output_file\n print(\n f\"Video Preprocessor created with video_folder = {video_folder} , output_folder = {output_folder}, output_file = {output_file}\")\n\n self.height = height\n self.width = width\n self.sample_every = sample_every\n self.max_workers = max_workers\n print(f\"Frames will be created with height = {height} , width = {width} , sample_every = {sample_every}\")", "def let_camera_update_parameters(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -ss 00:00:02 -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))", "def make_video(pattern, plotdir, moviedir, movienametag):\n images_list = glob('%s/%s'%(plotdir, pattern))\n images_list.sort()\n # save all required files into tmp_moviedir, with simple filenames: %.4d.png\n tmp_moviedir = '%s/tmp_movie_%s'%(plotdir, movienametag)\n os.system('mkdir -p %s'%tmp_moviedir)\n for i in range(len(images_list)):\n fname = images_list[i].split('%s/'%plotdir)[-1].split('.png')[0]\n os.system('cp %s/%s.png %s/%.4d.png'%(plotdir, fname, tmp_moviedir, i))\n\n os.system('avconv -i %s'%tmp_moviedir +'/%04d.png ' \\\n +' -y -c:v libx264 -pix_fmt yuv420p %s/%s.mp4'%(moviedir, movienametag))", "def precompute_numpy_video_files(self):\n videos = self.get_videos(self.not_collisions, 1) \\\n | chain_with(self.get_videos(self.collisions, 0)) \\\n | where(lambda f: not isfile(self.get_numpy_filename(f[1])))\n\n for v in videos:\n path = self.get_numpy_filename(v[1])\n\n video_to_npy(v[1],\n # note weird thing here, width doesn't work they appear to be inverted\n height=self.video_size,\n squarecrop=self.squarecrop,\n fps=self.framerate,\n maxlength=self.max_length,\n # save a npy replacement\n outfile=path)\n\n print('%s written' % (path))", "def recordVideo(args, env, model, filename):\n # env = model.get_env()\n images = []\n images = images + runAGame(model, env, args.method == 'centralized')\n images = images + runAGame(model, env, args.method == 'centralized')\n images = images + runAGame(model, env, args.method == 'centralized')\n images[0].save(filename + '.gif',\n format='GIF',\n append_images=images[1:],\n save_all=True,\n duration=500,\n loop=0)\n print('Video saved:', filename)", "def __init__(self, input_file_path, convert_to_bgr=False):\n self.__yuv_video = YuvDecoder(input_file_path, convert_to_bgr=True)\n print('After INSTANTIATION')\n self.__yuv_video.start()", "def run(input_video_file, output_video_file):\n print(\"Debut de la transformation du format de la video\")\n #récupération de la vidéo\n video = cv2.VideoCapture(input_video_file)\n #fps de la vidéo\n fps = video.get(cv2.CAP_PROP_FPS)\n #largeur des images de la vidéo\n width_video = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n #hauteur des images de la vidéo\n height_video = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n #nombre d'images dans la vidéo\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n #durée de la vidéo\n duration = frame_count/fps\n #nouvelle durée de la vidéo (on arrondi)\n new_duration = math.floor(duration)\n #nouveau fps de la vidéo\n new_fps = float(round(fps))\n #appliquer le nouveau fps\n video.set(cv2.CAP_PROP_FPS,new_fps)\n #appliquer la nouvelle durée\n print(new_duration)\n print(new_fps)\n print(new_duration*new_fps)\n new_frame_count = new_duration*new_fps\n video.set(cv2.CAP_PROP_FRAME_COUNT,new_duration*new_fps)\n #déffinition du format de la vidéo en sortie\n video_out = cv2.VideoWriter(output_video_file,0x7634706d,new_fps,(width_video,height_video),True)\n \n count = 0\n #ouverture de la vidéo\n while(video.isOpened()):\n #lecture image par image\n ret, frame = video.read()\n if ret==True:\n\n #ecriture de l'image dans la vidéo en sortie\n video_out.write(frame)\n count = count + 1\n \n if (count > (new_frame_count-1)):\n # Libérer la vidéo\n video.release()\n break\n else:\n break\n\n print(\"fin de la transformation\")\n #fermer les vidéos\n video.release()\n video_out.release()", "def main(data_dir):\n\n face2face_dir = '{}/manipulated_sequences/Face2Face/c0/videos'.format(data_dir)\n orig_dir = '{}/original_sequences/c0/videos'.format(data_dir)\n base_dir = '{}/manipulated_sequences/GANnotation'.format(data_dir)\n output_enc_dir = '{}/encodings'.format(base_dir)\n output_vid_dir = '{}/{}/videos'.format(base_dir, COMPRESSION_LEVEL)\n\n pairs = get_seq_combos(face2face_dir)\n\n # Compute all video encodings and save them to disk.\n # We precompute these because they take roughly 10 times as long to compute\n # as the reenactments, and we may want to recompute the reenactments with\n # different images later.\n print('Computing video encodings...')\n if not os.path.exists(output_enc_dir):\n os.makedirs(output_enc_dir)\n enc_count = 0\n for source_id, _ in pairs:\n encoding_path = get_encoding_path(output_enc_dir, source_id)\n if os.path.exists(encoding_path):\n continue # Encoding already calculated for this video sequence.\n print('Computing encoding for sequence {}...'.format(source_id))\n video_path = '{}/{}.mp4'.format(orig_dir, source_id)\n cap = cv2.VideoCapture(video_path)\n points = compute_video_encoding(cap)\n cap.release()\n try:\n np.savetxt(encoding_path, points.reshape((132,-1)).transpose())\n except KeyboardInterrupt as e:\n # Safely handle premature termination.\n # Remove unfinished file.\n if os.exists(encoding_path):\n os.remove(encoding_path)\n raise e\n enc_count += 1\n\n if enc_count == 0:\n print('No encodings were calculated')\n else:\n print('{} video sequences encoded'.format(enc_count))\n\n print()\n print('Computing reenactments...')\n\n # Load pre-trained model.\n gann_path = os.path.join(dirname, 'models/myGEN.pth')\n my_gann = GANnotation.GANnotation(path_to_model=gann_path)\n\n image_dir = '{}/original_sequences_images/{}/images'.format(data_dir, COMPRESSION_LEVEL)\n if not os.path.exists(output_vid_dir):\n os.makedirs(output_vid_dir)\n reenact_count = 0\n for source_id, driver_id in pairs:\n output_path = '{}/{}_{}.mp4'.format(output_vid_dir, source_id, driver_id)\n if os.path.exists(output_path):\n # Do not recreate a video if it already exists.\n # If the user wants to recreated a video\n # the existing video must be deleted first.\n continue\n\n print('Computing reenactment for {} onto {}...'.format(driver_id, source_id))\n # Validate that input files exist.\n encoding_path = get_encoding_path(output_enc_dir, driver_id)\n if not os.path.isfile(encoding_path):\n print('Failed to find encoding for video sequence {}'.format(driver_id),\n file=stderr)\n continue\n image_path = '{}/{}.png'.format(image_dir, source_id)\n if not os.path.isfile(image_path):\n print('Failed to find image for sequence {}'.format(source_id),\n file=stderr)\n continue\n\n points = np.loadtxt(encoding_path).transpose().reshape(66, 2, -1)\n\n # Load and transform image for inputting.\n image = cv2.imread(image_path)\n cropped = get_gann_cropped_face(image)\n\n # Compute reenactment.\n frames, _ = my_gann.reenactment(cropped, points)\n\n output_path = os.path.abspath(output_path)\n print('Writing video to \"{}\"'.format(output_path))\n try:\n write_video(frames, FPS, (128, 128), output_path)\n except KeyboardInterrupt as e:\n # Safely handle premature termination.\n # Remove unfinished file.\n if os.exists(output_path):\n os.remove(output_path)\n raise e\n reenact_count += 1\n\n if reenact_count == 0:\n print('No reenactments were created')\n else:\n print('{} reenactments created'.format(reenact_count))", "def generate_video(sign, issue, output):\n\n videos = {\n \"Climate Change\": \"ClimateChange.mp4\",\n \"Green Jobs\": \"GreenJobs.mp4\",\n \"Tourism\": \"Tourism.mp4\",\n \"Small Business\": \"SmallBusiness.mp4\",\n \"Public health\": \"PublicHealth.mp4\",\n \"Education Funding\": \"EducationFunding.mp4\"\n }\n\n video_path = CWD(f\"Assets/{videos[issue]}\")\n\n frame = cv2.imread(sign)\n frame = cv2.resize(frame, (1920, 1080))\n height, width, layers = frame.shape\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n video = cv2.VideoWriter(CWD(\"temp.mp4\"), fourcc, 1, (width, height))\n for i in range(5):\n video.write(frame)\n video.release()\n\n image_clip = VideoFileClip(CWD(\"temp.mp4\"))\n original_video = VideoFileClip(video_path)\n final_video = concatenate_videoclips([original_video, image_clip], method=\"compose\")\n\n final_video.write_videofile(output)\n os.remove(CWD(\"temp.mp4\"))", "def recreateVideoFromLapPyr(pyrVideo): \n maxLevel=len(pyrVideo)\n fNumber, H, W, chNum=pyrVideo[0].shape\n videoResult=np.zeros(pyrVideo[0].shape, dtype=np.float32)\n for fn in range(videoResult.shape[0]):\n framePyr=[pyrVideo[i][fn] for i in range(maxLevel)]\n videoResult[fn]=recreateImgsFromLapPyr(framePyr)\n \n return videoResult", "def recreateAnalysis(path,filename, dataset = 'POST',save = False, color = (0,255,0),location = (0,0)):\n\tprint('going to recreate the recording, press anything when ready')\n\tcv2.namedWindow('Video'); cv2.moveWindow('Video',location[0],location[1])\n\tcv2.waitKey(0)\n\tR = cf.OBJECT_RADIUS\n\tmeta,data = loadData(path,filename)\n\tfps = meta['FPS']\n\tfiletype = meta['Format']\n\tvid = []\n\twhile True: #replay loop\n\t\tVidCap = cv2.VideoCapture(path+filename+filetype)\n\t\ti = 0\n\t\twhile True: #video loop\n\t\t\tre,img = VidCap.read()\n\t\t\t#print(len(vid))\n\t\t\t#cv2.imshow('tmp',img)\n\t\t\tif re:\n\t\t\t\tfor k in range(0,meta['Num Objects']):\n\t\t\t\t\tkey = 'object{}'.format(k)\n\t\t\t\t\tcx = data[key+'_x_'+dataset][i]; cy = data[key+'_y_'+dataset][i]; ang = data[key+'_theta_'+dataset][i]\n\t\t\t\t\timg = cv2.circle(img,(cx,cy),4,(200,200,255),-1)\n\t\t\t\t\timg = cv2.circle(img,(cx,cy),R,color)\n\t\t\t\t\timg = cv2.putText(img,key+': '+data[key+'_ID'],(cx,cy-R),cv2.FONT_HERSHEY_SIMPLEX,0.5,(color))\n\t\t\t\t\tpnt1 = (int(cx+R*math.cos(ang)),int(cy-R*math.sin(ang)))\n\t\t\t\t\timg = cv2.arrowedLine(img,(cx,cy),pnt1,color)\n\t\t\t\tcv2.imshow('Video',img)\n\t\t\t\tvid.append(img)\n\t\t\t\ti += 1\n\t\t\t\tif cv2.waitKey(int(1000/fps))== ord('q'):\n\t\t\t\t\tVidCap.release()\n\t\t\t\t\tprint('ending early, not saving')\n\t\t\t\t\tsave = False\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t break\n\t\tprint('press r to replay, or anything else to quit')\n\t\tif cv2.waitKey(0) == ord('r'):\n\t\t print('playing processed video again')\n\t\telse:\n\t\t break\n\tVidCap.release()\n\tif save:\n\t\tim = vid[-1]\n\t\t(frame_height,frame_width) = im.shape[0:2]\n\t\tvideoPath = path+filename + '_EVAL_' + dataset + filetype\n\t\tout = cv2.VideoWriter(videoPath, cf.FOURCC, fps, (frame_width,frame_height))\n\t\tfor i,frame in enumerate(vid):\n\t\t\tout.write(frame)\n\t\tprint('processed video file saved')\n\treturn", "def check_video_timestamps(movie_file, desired_format='.mp4', desired_framerate=30):\n\n check_video_format(movie_file, desired_format='.mp4', original_format='.avi')\n\n new_movie_file = movie_file+'_tt'+desired_format\n if not os.path.isfile(new_movie_file):\n #Convert file to 30 fps\n cmd = ['ffmpeg', '-i', movie_file+desired_format]\n cmd += ['-r', str(desired_framerate)]\n cmd += ['-y', movie_file+'_t'+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd]) \n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()\n\n #Add timecode text to video\n cmd = 'ffmpeg -i '+movie_file+'_t'+desired_format+' -vf drawtext=\\\"fontfile=/opt/X11/share/fonts/TTF/VeraMoBd.ttf: timecode=\\'00\\:00\\:00\\:00\\':rate=30: fontcolor=white@0.8: x=7: y=460\\\" -an -y '+movie_file+'_tt'+desired_format\n args = shlex.split(cmd)\n #print args\n p = subprocess.Popen(args, shell=False)\n p.wait()\n\n os.remove(movie_file+'_t'+desired_format)\n\n return new_movie_file", "def process_video(weights_path,video_path,output_path,margins=40,facenet_threshold=.985,euclidean_distance_threshold = 120.0):\n with torch.no_grad():\n mtcnn = MTCNN(image_size= 256, margin = 0)\n model = Model.VGGFace_Extractor().to(device)\n model.load_state_dict(torch.load(weights_path))\n model.eval()\n cap = cv2.VideoCapture(video_path)\n rotateCode = check_rotation(video_path)\n fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\n out = cv2.VideoWriter(output_path, fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))\n ret, frame1 = cap.read()\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while (cap.isOpened()):\n i += 1\n ret, frame2 = cap.read()\n if not (ret): break\n if rotateCode is not None:\n frame2 = correct_rotation(frame2, rotateCode)\n\n boxes, probs = mtcnn.detect(frame2)\n img_draw = frame2.copy()\n img_draw = Image.fromarray(img_draw)\n draw = ImageDraw.Draw(img_draw)\n if boxes is not None:\n names = []\n distances_difference = []\n for (box, point) in zip(boxes, probs):\n \"\"\" Loop from the extract_face method from facenet_pytorch\"\"\"\n\n if point < facenet_threshold: continue\n margin = margins\n image_size = 256\n margin = [\n margin * (box[2] - box[0]) / (image_size - margin),\n margin * (box[3] - box[1]) / (image_size - margin),\n ]\n raw_image_size = get_size(img_draw)\n box = [\n int(max(box[0] - margin[0] / 2, 0)),\n int(max(box[1] - margin[1] / 2, 0)),\n int(min(box[2] + margin[0] / 2, raw_image_size[0])),\n int(min(box[3] + margin[1] / 2, raw_image_size[1])),\n ]\n\n face = img_draw.crop(box).copy().resize((image_size, image_size), Image.BILINEAR).convert(\"RGB\")\n features_1 = model(utils.preprocess(face,device).reshape(-1, 3, 224, 224))\n images_path = \"individuals_extracted/\"\n data_path = os.path.join(images_path, '*pt')\n files = glob.glob(data_path)\n name = \"Unknown\"\n best_distance = euclidean_distance_threshold + 5\n for k,f1 in enumerate(files):\n features = torch.load(f1)\n distance = utils.euclidean_distance(features,features_1)\n if distance < euclidean_distance_threshold and distance < best_distance:\n best_distance = distance\n name = re.sub('_[1-9]*[.]*[a-zA-Z]*', '', f1.replace(images_path,\"\"))\n\n names.append(name)\n distances_difference.append(best_distance)\n\n for (box, point,name,distances) in zip(boxes, probs,names,distances_difference):\n if point < facenet_threshold or name == \"Unknown\": continue\n draw.rectangle(box.tolist(), width=4)\n draw.text(box.tolist(), name, font=ImageFont.truetype(\"Keyboard.ttf\",40))\n\n k = cv2.waitKey(3) & 0xff\n if k == 27:\n break\n out.write(np.asarray(img_draw))\n\n out.release()\n cap.release()\n cv2.destroyAllWindows()", "def run(self, vid_input_path='project_video.mp4'):\n vid_output_path = self.g.output_movie_path + vid_input_path\n print('Finding lanes for:', vid_input_path) \n\n # Load the Video\n # video_clip = VideoFileClip(video_filename).subclip(10, 11)\n clip1 = VideoFileClip(vid_input_path)\n\n # Feed the video, clip by clip into the pipeline.\n test_clip = clip1.fl_image(self.__image_pipeline) \n test_clip.write_videofile(vid_output_path, audio=False)\n\n return True", "def __init__(self,vid_path:str,num_frames:int=None,vid_flow_direction:str='left'):\n \n self.num_frames=num_frames\n if vid_path.split('.')[-1]=='cine' or vid_flow_direction!='left':\n #This is a cine file or needs to be rotated, convert to mp4\n print('Converting .cine file to mp4 (lossless)')\n #detect platform so we can correct file paths for ffmpeg\n is_win=re.compile('.*[Ww]in.*')\n if is_win.match(sys.platform):\n corrected_vid_path='\"'+vid_path+'\"'\n else:\n #Put escape characters in front of spaces in file name\n corrected_vid_path=[]\n for c in vid_path:\n if c==' ':\n corrected_vid_path.append('\\\\')\n corrected_vid_path.append(c)\n corrected_vid_path=''.join(corrected_vid_path)\n if vid_flow_direction=='up':\n rotate='-vf \"transpose=2\" '\n elif vid_flow_direction=='left':\n rotate=''\n elif vid_flow_direction=='right':\n rotate='-vf \"transpose=2,transpose=2\" '\n else:\n raise Exception(\"vid_flow_direction must be 'up', 'left' or 'right'\")\n if num_frames!=None:\n frames='-frames:v {0} '.format(num_frames)\n else:\n frames=''\n os_handle,new_file_path=tempfile.mkstemp(suffix='.mp4')\n #close file, we don't work with it directly\n os.close(os_handle)\n ffmpeg_command='ffmpeg -y -i {orig_file} {frames}{rotate}-f mp4 -crf 0 {new_file}'.format(orig_file=corrected_vid_path,rotate=rotate,new_file=new_file_path,frames=frames)\n print(ffmpeg_command)\n list(os.popen(ffmpeg_command))\n self.vid_path=new_file_path\n self.delete_file=True\n stats=os.stat(new_file_path)\n if stats.st_size==0:\n raise Exception('File conversion failed, check that ffmpeg is on PATH')\n else:\n #Not a cine\n self.vid_path=vid_path\n self.delete_file=False", "def generate_heatmap_video(img_list,size,video_filename): \n out = cv2.VideoWriter(video_filename,cv2.VideoWriter_fourcc(*'DIVX'), 25, size)\n for i in range(len(img_list)):\n out.write(img_list[i])\n out.release()\n print('Heatmap video generated at: ', video_filename)", "def make_video(data,\n xdim, ydim, sample_read_rows, sample_read_cols, image_write_rows, image_write_cols,\n directory, filename, fps = 24.0, start_frame = 1, end_frame = None, timestamp = False, fontsize = 30, ts_pos = (0,0), save_raw = False):\n\n #Command to send via the command prompt which specifies the pipe parameters\n # command = ['ffmpeg',\n # '-y', # (optional) overwrite output file if it exists\n # '-f', 'image2pipe',\n # '-vcodec', 'mjpeg', #'mjpeg',\n # '-r', '1',\n # '-r', str(fps), # frames per second\n # '-i', '-', # The input comes from a pipe\n # '-an', # Tells FFMPEG not to expect any audio\n # '-vcodec', 'mpeg4',\n # '-b:v', '5000k',\n # directory + filename + \"/\"+filename+\".mp4\",\n # '-hide_banner',\n # '-loglevel', 'panic']\n\n # Create directories if they don't exist\n if not os.path.exists(os.path.join(directory, filename, 'frames/')):\n os.makedirs(os.path.join(directory, filename, 'frames/'))\n if save_raw and not os.path.exists(os.path.join(directory, filename, 'frames-raw/')):\n os.makedirs(os.path.join(directory, filename, 'frames-raw/'))\n\n if end_frame == None:\n end_frame = data.FrameCount\n\n cm = colormap.get_cmap('viridis')\n\n for i, frame_offset in enumerate(tqdm.tqdm(range(start_frame, end_frame))):\n frame = FrameRead(data, frame_offset)\n frame_image = np.zeros([ydim, xdim], dtype=np.uint8)\n frame_image[image_write_rows, image_write_cols] = frame.frame_data[sample_read_rows, sample_read_cols]\n\n rgb_im = Image.fromarray(cm(frame_image, bytes=True)).convert('RGB')\n rgb_im.save(os.path.join(directory, filename, 'frames/', f'{i}.jpg'), 'JPEG')\n\n if save_raw:\n Image.fromarray(np.uint8(frame.frame_data), mode='L').save(os.path.join(directory, filename, 'frames-raw/', f'{i}.jpg'), 'JPEG')", "def process_video(self):\n if os.path.isfile(self.source):\n self.cap = cv2.VideoCapture(self.source)\n else:\n try:\n file_name = \"input.mp4\"\n self.source = self.source.replace('open', 'uc')\n print( \"\\nDownloading video file from drive link to %s\\n\"%file_name)\n gdown.download(self.source, file_name, quiet=False)\n print( \"%s downloaded!\\n\"%file_name )\n self.cap = cv2.VideoCapture(file_name)\n except Exception:\n raise RuntimeError(\"Invalid source input, please specify a Google drive link or a downloaded local file as input \\n\")\n\n\n assert self.cap.isOpened(), \"Failed to open %s\" % self.source\n\n self.w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps = self.cap.get(cv2.CAP_PROP_FPS) \n self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n return", "def __init__(self, videoPath=\"\", cacheString=\"-cache\", faceDetector=None):\n self.videoPath = videoPath\n self.cacheString = cacheString\n self.video = None \n self.currentFrame = []\n self.currentFrameNumber = 0\n self.frameCount = 0 #total frame number\n self.fps = 25\n self.ret = True #if the video is over\n self.isPlaying = False\n self.ifDrawAxis = False\n self.ifDrawSquare = False\n self.cacheData = {}\n \n if videoPath != \"\":\n self.load()", "def mmap2Channel(self):\n self.memmap = np.memmap(self.inputFilenames['ofd'],\n dtype='uint16',\n mode='r',\n offset=0,\n shape=(\n 2, self.reconstructionSettings['numSamples'], self.scanSettings['numAlinesPerRawFrame'],\n self.scanSettings['numFrames']), order='F')", "def video(ctx, video_file, analytic_addr):\n if not analytic_addr:\n analytic_addr = [\"localhost:50051\"]\n db = ctx.obj.db\n client = aceclient.AnalyticMultiClient()\n classes = {}\n cap = cv2.VideoCapture(video_file)\n window_names = []\n f_req = analytic_pb2.FrameRequest()\n for a in analytic_addr:\n analytic = analytic_pb2.AnalyticData()\n analytic.addr = a\n f_req.analytics.append(analytic)\n # Load all frames into a queue buffer\n buf = Queue()\n while (cap.isOpened()):\n ret, frame = cap.read()\n if not ret:\n break\n buf.put(frame)\n try:\n while not buf.empty():\n frame = buf.get(block=False)\n resp = analytic_pb2.CompositeResults()\n resp = client.process_frame(frame, f_req, resp)\n render(resp, window_names, classes, frame, db)\n finally:\n cv2.destroyAllWindows()\n print(\"Shutting down\")", "def gen_frame_feature_resNet152(rgb_fpath, rgb_spath, rgb_spath_flip, u_fpath, u_spath, u_spath_flip, v_fpath, v_spath,\n v_spath_flip):\n rgb_fl = Frameloader(rgb_fpath)\n u_fl = Frameloader(u_fpath)\n v_fl = Frameloader(v_fpath)\n\n # construct the model\n input_layer, resNet152, end_points, saver = model()\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n # config.log_device_placement = True\n config.allow_soft_placement = True\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n saver.restore(sess, \"/home/boy2/UCF101/src/resNet-152/resnet_v1_152.ckpt\")\n # read all video frames and generate frame features by resNet152\n while len(rgb_fl.frame_parent_paths) != 0:\n # save start time\n start = time.time()\n\n # prepare the input frames\n video_name = rgb_fl.get_current_video_name()\n print(\"Current working on: \", video_name)\n rgb_frames = rgb_fl.load_frames()\n u_frames = u_fl.load_frames()\n v_frames = v_fl.load_frames()\n if len(rgb_frames) > len(u_frames) and len(rgb_frames) - 1 == len(u_frames):\n rgb_frames = rgb_frames[:-1]\n rgb_frames_flip, u_frames_flip, v_frames_flip = flip(rgb_frames, u_frames, v_frames)\n\n # resize the frames to (224, 224)\n rgb_frames = resize(rgb_frames, (width, height))\n u_frames = resize(u_frames, (width, height))\n v_frames = resize(v_frames, (width, height))\n rgb_frames_flip = resize(rgb_frames_flip, (width, height))\n u_frames_flip = resize(u_frames_flip, (width, height))\n v_frames_flip = resize(v_frames_flip, (width, height))\n\n if rgb_frames != [] and u_frames != [] and v_frames != [] and rgb_frames_flip != [] \\\n and u_frames_flip != [] and v_frames_flip != []:\n\n rgb_feature = cal_features(rgb_frames, sess, input_layer, resNet152)\n u_feature = cal_features(u_frames, sess, input_layer, resNet152)\n v_feature = cal_features(v_frames, sess, input_layer, resNet152)\n rgb_feature_flip = cal_features(rgb_frames_flip, sess, input_layer, resNet152)\n u_feature_flip = cal_features(u_frames_flip, sess, input_layer, resNet152)\n v_feature_flip = cal_features(v_frames_flip, sess, input_layer, resNet152)\n\n np.save(os.path.join(rgb_spath, video_name), rgb_feature)\n np.save(os.path.join(rgb_spath_flip, video_name), rgb_feature_flip)\n np.save(os.path.join(u_spath, video_name), u_feature)\n np.save(os.path.join(u_spath_flip, video_name), u_feature_flip)\n np.save(os.path.join(v_spath, video_name), v_feature)\n np.save(os.path.join(v_spath_flip, video_name), v_feature_flip)\n\n print(time.time() - start)", "def process_video(data_info, name, mode, is_training=True):\r\n data = Action_Dataset(name, mode, [data_info])\r\n if is_training:\r\n clip_seq, label_seq = data.next_batch(1, _CLIP_SIZE)\r\n else:\r\n clip_seq, label_seq = data.next_batch(\r\n 1, _EACH_VIDEO_TEST_SIZE+1, shuffle=False, data_augment=False)\r\n clip_seq = 2*(clip_seq/255) - 1\r\n clip_seq = np.array(clip_seq, dtype='float32')\r\n return clip_seq, label_seq", "def process_vid_file(vid_file):\n # dict that holds params for matching\n params = {}\n # scale for motion estimation\n params['fScale'] = 1.0\n # number of descriptors\n params['nFeat'] = 2000\n # minimal number of descriptors\n params['nMinKp'] = 50\n # minimal number of matched descriptors\n params['nMinMatch'] = 30\n # maximum offset in flow \n params['fVhThresh'] = 32.0\n # params for image quality (motion blur)\n params['fQualityThresh'] = 5.0\n params['fQualityRatio'] = 0.2\n\n # get video stats\n video_name = os.path.basename(vid_file[:-4])\n video_name.replace(' ', '')\n output_frame_list = []\n output_homography = []\n frame_index = 0\n\n # open video file\n cap = cv2.VideoCapture(vid_file)\n if not cap.isOpened():\n print \"Can not open video file: {:s}\".format(vid_file)\n return frame_pairs\n \n # fetch the first frame\n ret, prev_frame = cap.read()\n if (not ret) or (prev_frame is None):\n print \"Can not read video file: {:s}\".format(vid_file)\n\n start = time.time()\n\n # loop over all frames\n while(True):\n\n # increase the counter\n frame_index += 1\n\n # read current frame\n ret, frame = cap.read()\n\n # end of file?\n if (not ret) or (prev_frame is None):\n break\n\n # match the frames\n M = match_frames(prev_frame, frame, params)\n\n # copy to previous frame\n prev_frame = frame.copy()\n\n # save the frame number and the homography\n if len(M) > 0:\n output_frame_list.append(frame_index)\n output_homography.append(M)\n if len(output_homography) == 1:\n output_homography.append(M)\n # timing\n end = time.time()\n print \"Averge time per frame: {:2f} s. \".format(float(end-start)/frame_index)\n print \"{:2d} valid frame pairs out of {:2d} total pairs\".format(len(output_frame_list), frame_index-1)\n \n return output_frame_list, output_homography", "def startVideo(self,fname):\n\n\n try:\n fourcc = cv2.cv.CV_FOURCC(*'DIVX')\n\n except Exception as e:\n #print \"Exception \",e.args\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n\n self.video = cv2.VideoWriter(fname, fourcc, 10, (self.screenWidth, self.screenHeight))\n if self.video is None:\n print \"VideoWriter failed to start.\"\n else:\n print \"VideoWriter started ok\"", "def start_ffmpeg_record(stream, stream_url, formatted_date):\n filename = stream + '_' + formatted_date\n save_video_dir = 'rover_stream/' + stream\n subprocess.Popen(['mkdir rover_stream'], shell=True)\n subprocess.Popen(['mkdir ' + save_video_dir], shell=True)\n proc_video[stream] = subprocess.Popen(['ffmpeg -i ' + stream_url + ' -acodec copy -vcodec copy ' + save_video_dir + '/' + filename + '.mp4'], stdin=PIPE, shell=True)", "def find_cars_video(input_path, output_path, clf, hyperparams, box_color=None, debug=False):\n v = VideoProcessor(clf, hyperparams, box_color)\n v.process_video(input_path, output_path, debug)", "def extract_faces(frame_path, out_path, face_path, processes=1):\n if os.path.exists(out_path):\n msg = '[extract_faces] Skipping extraction since faces already exist at {}'\n print(msg.format(out_path))\n return\n\n from faceoff.faceswap_api import FaceSwapInterface\n\n os.makedirs(out_path)\n print('[extract_faces] Starting on {}'.format(frame_path))\n start_time = time.time()\n\n api = FaceSwapInterface()\n api.extract(frame_path, out_path, face_path, processes)", "def video2img(video, csv, output_path, match):\n with open(csv, 'r') as file:\n lines = file.readlines()[1:]\n\n csv_content = []\n for line in lines:\n frame, vis, x, y = line.strip().split(',')\n csv_content.append((int(frame), int(vis), float(x), float(y)))\n\n name_split = os.path.split(video)\n name = \"match%d\"%(match) + '_' + name_split[-1][:-4]\n\n count = 0\n num_data = len(csv_content)\n cap = cv2.VideoCapture(video)\n success, image = cap.read()\n ratio = image.shape[0]/HEIGHT\n while success:\n if count >= num_data:\n break\n label = csv_content[count]\n if label[1] == 0:\n heat_map = genHeatMap(WIDTH, HEIGHT, -1, -1, sigma, mag)\n else:\n heat_map = genHeatMap(WIDTH, HEIGHT, int(label[2]/ratio), int(label[3]/ratio), sigma, mag)\n \n image = cv2.resize(image, (WIDTH, HEIGHT))\n heat_map = (heat_map*255).astype('uint8')\n cv2.imwrite(os.sep.join([output_path, 'x_data', name+'_%d.jpg' %(count)]), image)\n cv2.imwrite(os.sep.join([output_path, 'y_data', name+'_%d.jpg' %(count)]), heat_map)\n success, image = cap.read()\n count += 1", "def make_seret(processed_files_directory='files/',fps=5):\r\n # Sort files in processed images directory\r\n files = sort_files(processed_files_directory)\r\n # Create list as container for the movie.\r\n img_array = []\r\n # For each file\r\n for file in files:\r\n file_format = file.split(\".\")\r\n if file_format[-1] == 'jpg': # verify that we will include jpg files only in the movie\r\n # Read the file\r\n img = cv2.imread(file)\r\n # Extract height, width, channels from image\r\n height, width, layers = img.shape\r\n # size = (width, height)\r\n size = (width, height)\r\n # Append image to movie container\r\n img_array.append(img)\r\n # Create a video writer for the movie\r\n out = cv2.VideoWriter(processed_files_directory+'initial.avi', cv2.VideoWriter_fourcc(*'DIVX'), fps, size)\r\n # For each image in container\r\n for image in img_array:\r\n # Write image by video writer\r\n out.write(image)\r\n # Release video writer.\r\n out.release()", "def write_proposals_to_video(self, vdir, frms_per_sec=1.0):\n # Input video\n vid_name = self.props['vname']\n vfpath = fdops.get_files_with_kws(vdir, [vid_name, \".mp4\"])\n if len(vfpath) > 1:\n raise Exception(f\"More than one video found\\n\\t{vfpath}\")\n vin = VidReader(vfpath[0])\n\n # Output video\n ovid_path = f\"{self.props['loc']}/{self.props['name']}.mp4\"\n vw = skvideo.io.FFmpegWriter(\n ovid_path,\n outputdict={'-vcodec': 'libx264','-r':'30'}\n )\n\n # Calculate frame numbers(POC) that we will use.\n f0_start = 0 # starting frame poc\n f0_end = vin.props['num_frames'] - 1 # ending frame poc\n f0_skip = vin.props['frame_rate']*(1/frms_per_sec)\n f0s = list(range(f0_start, f0_end, int(f0_skip)))\n\n # Loop over each frame number and draw proposal regions\n # over them\n for f0 in tqdm(f0s):\n frm = vin.get_frame(f0, c='bgr')\n\n # Get proposals for frame f0\n props = self._get_proposals_for_frame(f0)\n\n # Proposals looop\n for p in props:\n if len(p) > 0:\n w0, h0, w, h = p\n frame = cv2.rectangle(\n frm, (w0, h0), (w0+w, h0+h), (0, 256, 0), 1\n )\n # Write frame to output\n vw.writeFrame(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n\n vw.close()\n vin.release()\n import sys; sys.exit()", "def reencode(filepath, loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n # re encode video without b frame and as mp4\n basename, ext = os.path.splitext(filepath)\n output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))\n if not os.path.isdir(os.path.dirname(output_filepath)):\n os.makedirs(os.path.dirname(output_filepath))\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,\n **{'x264opts': 'bframes=0',\n 'f': 'mp4'})\n ffmpeg.overwrite_output(stream).run()\n except Exception as e:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n output_probe = Videos.get_info(output_filepath)\n start_time = eval(output_probe['streams'][0]['start_time'])\n fps = eval(output_probe['streams'][0]['avg_frame_rate'])\n has_b_frames = output_probe['streams'][0]['has_b_frames']\n start_frame = fps * start_time\n if start_time != 0:\n logger.warning('Video start_time is not 0!')\n if has_b_frames != 0:\n logger.warning('Video still has b frames!')\n return output_filepath", "def make_movie(field='uu1', datadir='data/', proc=-1, extension='xz',\n format='native', tmin=0., tmax=1.e38, amin=0., amax=1.,\n transform='', oldfile=False):\n\n import pylab as plt\n\n datadir = os.path.expanduser(datadir)\n if proc < 0:\n filename = datadir + '/slice_' + field + '.' + extension\n else:\n filename = datadir + '/proc' + \\\n str(proc) + '/slice_' + field + '.' + extension\n\n # Read the global dimensions.\n dim = read_dim(datadir, proc)\n if dim.precision == 'D':\n precision = 'd'\n else:\n precision = 'f'\n\n # Set up slice plane.\n if extension == 'xy' or extension == 'Xy':\n hsize = dim.nx\n vsize = dim.ny\n if extension == 'xz':\n hsize = dim.nx\n vsize = dim.nz\n if extension == 'yz':\n hsize = dim.ny\n vsize = dim.nz\n plane = np.zeros((vsize, hsize), dtype=precision)\n\n infile = npfile(filename, endian=format)\n\n files = []\n fig = plt.figure(figsize=(5, 10))\n ax = fig.add_subplot(111)\n\n ifirst = True\n islice = 0\n while True:\n try:\n raw_data = infile.fort_read(precision)\n except ValueError:\n break\n except TypeError:\n break\n\n if oldfile:\n t = raw_data[-1]\n plane = raw_data[:-1].reshape(vsize, hsize)\n else:\n t = raw_data[-2]\n plane = raw_data[:-2].reshape(vsize, hsize)\n\n if transform:\n exec('plane = plane' + transform)\n\n if t > tmin and t < tmax:\n ax.cla()\n ax.imshow(plane, vmin=amin, vmax=amax)\n fname = '_tmp%03d.png' % islice\n print('Saving frame' + fname)\n fig.savefig(fname)\n files.append(fname)\n\n if ifirst:\n #print \"----islice----------t---------min-------max-------delta\" # Python 2\n print(\"----islice----------t---------min-------max-------delta\")\n #print \"%10i %10.3e %10.3e %10.3e %10.3e\" % \\ # Python 2\n #(islice, t, plane.min(), plane.max(), plane.max() - plane.min()) # Python 2\n print(\"{0:10} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e}\".format(islice, t, plane.min(), plane.max(), plane.max() - plane.min()))\n\n ifirst = False\n islice += 1\n\n #print 'Making movie animation.mpg - this make take a while'\n print('Making movie animation.mpg - this make take a while')\n # SC: Not all systems use mencoder. Need to change this into ffmpeg.\n os.system(\"mencoder 'mf://_tmp*.png' -mf type=png:fps=24 -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o animation.mpg\")\n os.system(\"rm _tmp*.png\")\n infile.close()", "def process_videos(chapter_info):\n\n print(\"Processing chapter_info:\", chapter_info)\n\n # getting creation time of the first chapter\n # TODO update when adding multiple directory proccessing\n os.chdir(DIR_VIDEO_FILES)\n print(\"1st chapter\", chapter_info[1][0])\n chap1_time = time.strftime(\n r\"%Y-%m-%d_%H-%M\", time.localtime(os.path.getctime(chapter_info[1][0])))\n print(\"1st chapter creation\", chap1_time)\n\n # output_file = f\"M_GH00{chapter_info[0]}_{chap1_time}.MP4\"\n output_file = f\"{chap1_time}_GH00{chapter_info[0]}_MRG.MP4\"\n if os.path.isfile(output_file):\n print(f\"Chapter already processed, found file: {output_file}\")\n return\n\n # preparing text file containing file list for merging (for ffmpeg)\n video_list_file = chapter_info[0] + \"_merge.txt\"\n with open(video_list_file, \"w\") as f:\n for video_chapter in chapter_info[1]:\n f.write(f\"file {video_chapter}\\n\")\n\n command = f\"{FFMPEG_EXE} -f concat -i {video_list_file} -c copy {DIR_OUTPUT}{output_file}\"\n print(\"command =\", command)\n # p = subprocess.run(\"dir\", shell=True, capture_output=True)\n # p = subprocess.run(\"dir\", shell=True, stdout=subprocess.PIPE, text=True)\n p = subprocess.run(command, stdout=subprocess.PIPE, text=True)\n print(\"returncode =\", p.returncode)\n # print(\"stdout =\", p.stdout)\n os.remove(video_list_file) # remove file list after merging\n # rename original chapters after processing\n for video_chapter in chapter_info[1]:\n os.rename(video_chapter, f\"OK_{video_chapter}\")", "def video_files():\n p = parse_cmdline(get_parser=get_parser_files)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.INFO)\n vis.show_video_abi_glm(\n files=p.files,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir)\n print(\"Files written to:\", p.outdir)", "def __merge_ts_by_ffmepg(self, local_m3u8_path, video_name):\n try:\n command = 'ffmpeg -allowed_extensions ALL -i {} -c copy -y {}'.format(local_m3u8_path, video_name)\n print(command)\n os.system(command)\n print('merge succeed.')\n except:\n print('merge failed.')", "def process_data(self, clip_name) -> Preprocessor:\n\n config: Config = Config.get_config()\n\n folder_name = config.video_data\n video_data_file = ''.join(clip_name.split('.')[:-1]) + '.json'\n video = Video.from_json(os.path.join(folder_name, video_data_file))\n\n # Convert to usable data type period_running_person division, alle fragment soorten\n preprocessor = Preprocessor(video)\n\n return preprocessor", "def prepare_video(path_to_video: str, number_of_images=87) -> None:\n\n temp_video = path.join(path_to_video, 'temp_outpy.mp4')\n video = path.join(path_to_video, 'outpy.h264')\n\n # create mp4 video for metadata and compute video duration\n subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])\n result = subprocess.run([\"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n \"format=duration\", \"-of\",\n \"default=noprint_wrappers=1:nokey=1\", temp_video],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n video_duration = float(result.stdout)\n\n # create images folder\n path_to_images = path.join(path_to_video, 'images')\n if path.exists(path_to_images) and path.isdir(path_to_images):\n shutil.rmtree(path_to_images)\n makedirs(path_to_images)\n\n # split the given video into images\n subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',\n path.join(path_to_images, 'image%d.jpg')])\n\n # remove extra files\n remove_extra_images(path_to_images, number_of_images)\n remove(temp_video)", "def on_movie(self):\n path = os.path.normcase(self.pMov)\n os.system('\"%s\"' % path)", "def make_video(self, mp4=True, gif=True):\n fn = self.get_output_filename(\".mp4\")\n command = (\n (get_ffmpeg_path() + f\" -loglevel panic -framerate {self.framerate} -i \")\n + os.path.join(self.frame_directory, FRAME_FN_TEMPLATE)\n + \" -s:v \"\n + str(self.width)\n + \"x\"\n + str(self.height)\n + \" -c:v libx264 -profile:v high -crf 1 -pix_fmt yuv420p -y \"\n + fn\n )\n\n os.system(command)\n\n if gif:\n mp4_to_gif(\n self.get_output_filename(\".mp4\"),\n self.get_output_filename(\".gif\"),\n self.framerate,\n )\n\n if not mp4:\n os.remove(fn)", "def __init__(self, pn_output=\"./\"):\n # Initialize the video stream, then allow the camera sensor to warm up\n print(\"[INFO] starting video stream...\")\n self.vs = cv2.VideoCapture(0) # Capture video frames, 0 is default video camera\n time.sleep(2.0)\n\n # Load config\n config = configparser.ConfigParser()\n config.read(fn_config)\n self.pn_guest_images = config['DEFAULT']['pn_guest_images_archive']\n self.guest_archive = p7zip(self.pn_guest_images)\n self.camera_rot = int(config['DEFAULT']['camera_rot'])\n self.image_width = int(config['DEFAULT']['image_width'])\n self.max_capture_interval = float(config['DEFAULT']['capture_interval'])\n self.max_capture_length = int(config['DEFAULT']['max_capture_length'])\n self.max_images = int(config['DEFAULT']['max_images'])\n\n # Capture Vars\n self.curr_pic = None # Current image from the camera\n self.gst_capture = None\n self.start_time = time.time()\n self.save_time = time.time()\n self.pic_num = None\n self.pn_gstcap_out = None\n\n # Face Detection Model\n self.min_detec_conf = float(config['DEFAULT']['min_detec_conf'])\n self.min_face_px = make_tuple(config['DEFAULT']['min_face_px'])\n pn_detector_model = config['DEFAULT']['pn_detector_model']\n self.trainRBGavg = make_tuple(config['DEFAULT']['detector_trainrgbavg'])\n print(\"[INFO] loading face detector and embedding model...\")\n protoPath = os.path.sep.join([pn_detector_model, \"deploy.prototxt\"])\n modelPath = os.path.sep.join([pn_detector_model,\n \"res10_300x300_ssd_iter_140000.caffemodel\"])\n self.detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)\n self.detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n\n # Face Recognition (extract/recognize embeddings) Model\n self.min_recog_prob = float(config['DEFAULT']['min_recog_prob'])\n fn_embedding_model = config['DEFAULT']['fn_embedding_model']\n self.embedder = cv2.dnn.readNetFromTorch(fn_embedding_model)\n self.embedder.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n self.gst_identify = False\n self.guest_ids = {}\n\n # Guest Info (update outside of function)\n self.known_guest_meta = None", "def create_video_output_path(output_dir, cfg):\n filename = os.path.join(output_dir, cfg['video']['output']) + '{}.mp4'\n # If a file of this name exists increase the counter by 1\n counter = 0\n while os.path.isfile(filename.format(counter)):\n counter += 1\n # Apply counter to filename\n return filename.format(counter)", "def take_one_shot(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))", "def _map_fb_memory(fbfid, fix_info):\n return mmap.mmap(fbfid, fix_info.smem_len, mmap.MAP_SHARED, mmap.PROT_READ | mmap.PROT_WRITE, offset=0)", "def save_video(foldername, songname, songlen, num_steps, output):\n num_steps_by_len = num_steps / songlen\n p = subprocess.Popen(['ffmpeg', '-f', 'image2', '-r', str(num_steps_by_len), '-i', '%d.png', '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-vf', 'pad=ceil(iw/2)*2:ceil(ih/2)*2', 'movie.mp4'], cwd=foldername)\n p.wait()\n\n p = subprocess.Popen(['ffmpeg', '-i', 'movie.mp4', '-i', '../audio_files/' + songname + '.mp3', '-map', '0:v', '-map', '1:a', '-c', 'copy', output], cwd=foldername)\n p.wait()", "def do_add_ink():\n clip = mpy.VideoClip(mix_video_ink, duration=13.0)\n clip.write_videofile(\"test_edited.mp4\", fps=24)", "def get_output_file(self, path, fps=30):\n return cv2.VideoWriter(\n filename=path,\n fourcc=cv2.VideoWriter_fourcc(*\"mp4v\"),\n fps=float(fps),\n frameSize=(self.display_width, self.display_height),\n isColor=True,\n )", "def process(in_path, out_path, mouth_height=50, mouth_width=50, frame_dur=1, capture_frame=None, out_img=None, show_boxes=True):\n # Get video capture from in_path.\n vc = cv2.VideoCapture(in_path) \n\n rval, frame = vc.read() if vc.isOpened() else (False, None)\n\n mouth_images = []\n\n #import pdb; pdb.set_trace()\n\n if rval:\n mouths = np.empty((0, mouth_height, mouth_width, frame.shape[2]))\n else:\n return # Skip this video since CV2 can't open it\n\n frame_no = 0\n while rval:\n if DEBUG:\n # Copy of original frame, for annotating.\n image = frame.copy()\n\n try:\n face_rect = locate_face(frame)\n except ValueError:\n print \"No face found for %s at frame %d. Skipping.\" % (in_path, frame_no)\n vc.release()\n return # Skip this video.\n\n if DEBUG:\n highlight_rect(image, face_rect, color=(255,255,255), thickness=2)\n\n mouth_rects = locate_mouth(frame)\n mouth = uniform_rect(select_mouth_candidate(mouth_rects, face_rect), face_rect, 50, 50)\n if not mouth:\n print \"No face found for %s at frame %d. Skipping.\" % (in_path, frame_no)\n vc.release()\n return # Skip this video.\n\n mouth_image = frame[mouth[1]:(mouth[1] + mouth[3]), mouth[0]:(mouth[0] + mouth[2]), :]\n mouth_images.append(mouth_image) \n\n if DEBUG:\n highlight_rect(image, mouth, color=(0,0,0), thickness=2) \n #cv2.imshow('Frame', mouth_image)\n cv2.imshow('Frame', image if show_boxes else frame)\n\n if frame_no == capture_frame:\n cv2.imwrite(out_img, image if show_boxes else frame)\n return\n\n cv2.waitKey(frame_dur)\n\n rval, frame = vc.read()\n frame_no += 1\n\n vc.release()\n\n mouths = np.asarray(mouth_images)\n\n savemat(out_path, {\"mouths\": mouths})", "def _post_proc_para_wrapper(pred_map_mmap_path, tile_info, func, func_kwargs):\n idx, tile_tl, tile_br = tile_info\n wsi_pred_map_ptr = np.load(pred_map_mmap_path, mmap_mode=\"r\")\n tile_pred_map = wsi_pred_map_ptr[tile_tl[0] : tile_br[0], tile_tl[1] : tile_br[1]]\n tile_pred_map = np.array(tile_pred_map) # from mmap to ram\n return func(tile_pred_map, **func_kwargs), tile_info", "def extract_vob(in_vob, guid):\n\t#Detect interlacing.\n\tmediainfo_command = \"mediainfo --Inform='Video;%ScanType%,%ScanOrder%' \" + in_vob\n\tprint(mediainfo_command)\n\tprocess = subprocess.Popen(mediainfo_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0:\n\t\traise Exception(\"Calling Mediainfo on {in_vob} failed with exit code {exit_code}.\".format(in_vob=in_vob, exit_code=exit_code))\n\tmediainfo_parts = cout.decode(\"utf-8\").split(\",\")\n\tis_interlaced = mediainfo_parts[0] == \"Interlaced\"\n\tfield_order = mediainfo_parts[1].lower().strip()\n\tprint(\"Interlace detection:\", is_interlaced, field_order, \"(\", mediainfo_parts, \")\")\n\n\tffmpeg_command = [\"ffmpeg\", \"-i\", in_vob]\n\tprint(ffmpeg_command)\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\tprocess.wait() #Ignore the exit code. It always fails.\n\tvobinfo = cerr.decode(\"utf-8\")\n\ttracks = []\n\tfor match in re.finditer(r\" Stream #0:(\\d+)\\[0x[0-9a-f]+\\]: (\\w+): ([^\\n]+)\", vobinfo):\n\t\ttrack_nr = match.group(1)\n\t\ttrack_type = match.group(2)\n\t\ttrack_codec = match.group(3)\n\t\tnew_track = track.Track()\n\t\tnew_track.from_vob(track_nr, track_type, track_codec, is_interlaced, field_order)\n\t\tnew_track.file_name = guid + \"-T\" + str(new_track.track_nr) + \".\" + new_track.codec\n\t\tif new_track.type != \"unknown\":\n\t\t\ttracks.append(new_track)\n\n\t#Generate the parameters to pass to ffmpeg.\n\ttrack_params = [\"-i\", in_vob]\n\tfor track_metadata in tracks:\n\t\ttrack_params.append(\"-map\")\n\t\ttrack_params.append(\"0:\" + str(track_metadata.track_nr))\n\t\ttrack_params.append(\"-c\")\n\t\ttrack_params.append(\"copy\")\n\t\ttrack_params.append(track_metadata.file_name)\n\n\t#Extract all tracks.\n\tprint(\"---- Extracting tracks...\")\n\tffmpeg(*track_params)\n\n\treturn tracks", "def heatmap_video(path_in, path_out, frames_sec = 2, thresh = 2, maxValue = 3):\n\n cap = cv2.VideoCapture(path_in)\n fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()\n\n num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = cap.get(cv2.CAP_PROP_FPS) # frames per second\n duration = round(num_frames / fps, 2) # duration of the video in seconds\n print('Total number of frames to process: {}'.format(num_frames))\n print('Duration of the video in seconds: {}'.format(duration))\n step = round(fps / frames_sec)\n\n first_iteration_indicator = 1\n for i in range(0, num_frames, step):\n\n if (first_iteration_indicator == 1):\n ret, frame = cap.read()\n first_frame = copy.deepcopy(frame)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n height, width = gray.shape[:2]\n accum_image = np.zeros((height, width), np.uint8)\n first_iteration_indicator = 0\n\n else:\n cap.set(cv2.CAP_PROP_POS_FRAMES, i)\n print('Frame process... ' + str(i) + ' of ' + str(num_frames))\n print('Second process... ' + str(int(cap.get(cv2.CAP_PROP_POS_MSEC)) / 1000) + ' of ' + str(duration))\n print('...')\n ret, frame = cap.read() # read a frame\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\n fgmask = fgbg.apply(gray) # remove the background\n ret, th1 = cv2.threshold(fgmask, thresh, maxValue, cv2.THRESH_BINARY)\n\n # add to the accumulated image\n accum_image = cv2.add(accum_image, th1)\n\n # apply a color map\n # COLORMAP_PINK also works well, COLORMAP_BONE is acceptable if the background is dark\n color_image = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)\n\n # overlay the color mapped image to the first frame\n result_overlay = cv2.addWeighted(first_frame, 0.7, color_image, 0.7, 0)\n\n # save the final overlay image\n cv2.imwrite(path_out, result_overlay)\n\n # cleanup\n cap.release()\n cv2.destroyAllWindows()", "def main(path):\n logger.info(f'Processing video file {path}')\n # Extract audio\n audio_file = extract_audio(path, pipeline_config.audio_target_dir)\n\n # Generate sound classification results and speech recogniser results\n sound_results = SoundRecogniser().process_file(audio_file)\n sound_results = process_overlap(sound_results)\n speech_results = SpeechRecogniser().process_file(audio_file)\n\n # NLP\n wrds = get_words(speech_results)\n nlp = SpaCyNaturalLanguageProcessor(pipeline_config.spacy_model)\n custom_nlp = SpaCyNaturalLanguageProcessor(pipeline_config.custom_spacy_model)\n processor = nlp.get_spacy_results_processor(wrds, speech_results)\n custom_processor = custom_nlp.get_spacy_results_processor(wrds, speech_results)\n chunk_results = processor.process_speech_results_chunk()\n ner_results = processor.process_speech_results_ner()\n ner_results.extend(custom_processor.process_speech_results_ner())\n match_results = processor.process_speech_results_match()\n speech_results = nlp.process_spurious_words(speech_results, chunk_results)\n\n # Add Speech recogniser results, sound classification results and NLP results to a subtitle file\n subs_1 = save_to_subtitles(speech_results,\n lambda speech_result: speech_result['word'])\n subs_1 = compress_subs(subs_1)\n subs_2 = save_to_subtitles(sound_results,\n lambda sound_result: sound_result['class'])\n subs_2 = flatten_subs(subs_2)\n subs_3 = save_to_subtitles(chunk_results,\n lambda chunk_result: f'{chunk_result[\"word\"]} ({chunk_result[\"head\"]})')\n subs_4 = save_to_subtitles(ner_results,\n lambda ner_result: f'{ner_result[\"type\"]} {ner_result[\"word\"]}')\n subs_5 = save_to_subtitles(match_results,\n lambda match_result: match_result[\"word\"])\n\n combined_subs = append_subs(None, subs_1, style='bottom')\n combined_subs = append_subs(combined_subs, subs_2, exclude=['bottom'], style='top', formatter=lambda x: f'({x})')\n combined_subs = append_subs(combined_subs, subs_3, style='left')\n combined_subs = append_subs(combined_subs, subs_4, style='right')\n combined_subs = append_subs(combined_subs, subs_5, style='bottom_left_pred')\n combined_subs = remove_tiny_subs(combined_subs, duration_millis=1000, left_millis=None,\n right_millis=None, style='top')\n subtitle_file_name = os.path.splitext(path)[0] + '.ass'\n create_styles(combined_subs)\n combined_subs.save(subtitle_file_name)\n\n # Burn to a video\n burn_subtitles_into_video(path, subtitle_file_name, pipeline_config.audio_target_dir)\n logger.info(f'Done processing {audio_file}')", "def load(fidfile, procfile):\r\n dic, data = _ng.varian.read_fid(fidfile)\r\n procs = _ng.varian.read_procpar(procfile)\r\n\r\n offset = [float(i) for i in procs['tof']['values']][0]\r\n magfreq = [float(i) for i in procs['sfrq']['values']][0]\r\n rangeHz = [float(i) for i in procs['sw']['values']][0]\r\n\r\n rangeppm = rangeHz / magfreq\r\n offsetppm = offset / magfreq\r\n\r\n # Fourier transform\r\n data = _ng.proc_base.fft(data)\r\n data = data / _np.max(data)\r\n\r\n u = data.real.sum(axis=0)\r\n v = data.imag.sum(axis=0)\r\n\r\n w = _np.linspace(rangeppm - offsetppm, -offsetppm, u.size)\r\n\r\n result = _containers.Data(w[::-1], u[::-1], v[::-1])\r\n return result", "def main():\n global tmpl_ball, pocket_templates, pocket_markers, last_best_cueball, last_confirmed_cueball, CURRENT_FRAME, out_file\n\n start_time = datetime.datetime.now()\n\n # Open output files\n\n out_file = open(here(\"confirmed_cueballs.txt\"), 'w')\n\n # Initialize pygame\n\n pygame.init()\n pygame.display.set_caption(\"Snooker - main info window\")\n window = pygame.display.set_mode((704, 400))\n\n # Load and initialize template images\n\n tmpl_ball = cvLoadImage(BALL_TMPL, CV_LOAD_IMAGE_COLOR)\n if not tmpl_ball:\n raise Exception(\"Failed to load ball template image\")\n\n # Load pocket templates, find marker pixel in template and remember it\n\n for pocket in ['tl', 'tr', 'bl', 'br']: \n pocket_tmpl = cvLoadImage(globals()['POCKET_%s_TMPL' % pocket.upper()], CV_LOAD_IMAGE_COLOR)\n if not pocket_tmpl:\n raise Exception(\"Failed to load pocket '%s' template\" % pocket)\n pocket_templates[pocket] = pocket_tmpl\n pocket_markers[pocket] = findpixel(pocket_tmpl, cvScalar(255, 0, 255))\n if pocket_markers[pocket]:\n if DEBUG: print(\"Found marker for pocket %s at %d:%d\" % \n (pocket.upper(), pocket_markers[pocket].x, pocket_markers[pocket].y))\n\n stream = pyffmpeg.VideoStream()\n\n stream.open(VIDEO_FILE)\n\n fps = stream.tv.get_fps()\n duration_f = stream.tv.duration()\n\n start_frame = int(START*fps)\n end_frame = int(END*fps)\n interval = int(INTERVAL*fps)\n\n length = float(end_frame-start_frame)/float(fps) # Seconds\n\n i = start_frame\n\n while i < end_frame:\n CURRENT_FRAME = i\n\n # Calculate info about the total progress and display it\n\n percentage = float(i-start_frame)/float(end_frame-start_frame)*100.0\n estimated_length = 0\n if i > start_frame:\n estimated_length = float(total_seconds((datetime.datetime.now()-start_time))) / percentage * 100.0\n\n print(\"Frame #%d (%d-%d) %.1f%%\" % (i, start_frame, end_frame, percentage))\n pygame.display.set_caption(\"Snooker - main info window - Frame %d - %.1f%% (est. total %.1fmin)\" % (i, percentage, estimated_length/60.0))\n\n image = stream.GetFrameNo(i)\n image_ipl = adaptors.PIL2Ipl(image)\n\n table = get_table(image_ipl)\n cueballs = None\n best_ball = None\n\n if table:\n cueballs = find_cueballs(image_ipl, table)\n\n if cueballs:\n\n # See if the best cueball we found now, is the best cueball found in the previous run,\n # which would mean that it's standing still. Only consider the best matches.\n\n if not last_best_cueball:\n last_best_cueball = cueballs[0]\n else:\n\n distance_to_last = math.sqrt( (last_best_cueball.x-cueballs[0].x)**2 + (last_best_cueball.y-cueballs[0].y)**2 )\n distance_to_last_confirmed = math.sqrt( (last_confirmed_cueball.x-cueballs[0].x)**2 + \n (last_confirmed_cueball.y-cueballs[0].y)**2 )\n\n if distance_to_last < CUEBALL_CONFIRM_DISTANCE and distance_to_last_confirmed > CUEBALL_NOMOVEMENT_DISTANCE:\n if cueballs[0].confidence >= 0.72: # Extra check to remove most of the false positives\n # CONFIRMED\n last_confirmed_cueball = cueballs[0]\n cueballs[0].confirmed = True\n if DEBUG: print(\"CONFIRMED cueball %s\" % cueballs[0])\n else:\n if DEBUG: print(\"Dropped the ball because distance condition failed: %.2f<%d and %.2f>%d\" % \n (distance_to_last, CUEBALL_CONFIRM_DISTANCE, \n distance_to_last_confirmed, CUEBALL_NOMOVEMENT_DISTANCE))\n\n # If confirmation or sequence fail, reset and try the whole thing again\n last_best_cueball = False\n\n image_interesting = draw_interesting_stuff(image, table, cueballs)\n\n window.blit(image_interesting, (0,0))\n pygame.display.flip()\n\n store_results(image_interesting, table, cueballs)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n out_file.close()\n sys.exit(0)\n else:\n pass\n #print event\n\n # Skip <interval> frames\n i += interval\n\n out_file.close()", "def make_video(input_files, width=0, height=0, frame_rate=24, crf=20, output_path=\"video.mp4\"):\n if isinstance(input_files, list):\n from PIL import Image # pylint: disable=C0415\n\n with Image.open(input_files[0]) as img:\n width, height = img.size\n tmp_dir = \"tmp_ffmpeg_dir\"\n os.mkdir(tmp_dir)\n if width % 2 != 0:\n print(f\"Width ({width}) not divisible by 2\")\n width -= 1\n if height % 2 != 0:\n print(f\"Height ({width}) not divisible by 2\")\n height -= 1\n for i, inp in enumerate(input_files):\n shutil.copy(inp, os.path.join(tmp_dir, f\"{i:06d}.png\"))\n inputs = f\"{tmp_dir}/%06d.png\"\n command = ffmpeg_common_args(frame_rate, inputs, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n for i in range(len(input_files)):\n os.remove(os.path.join(tmp_dir, f\"{i:06d}.png\"))\n os.rmdir(tmp_dir)\n elif isinstance(input_files, str):\n assert width != 0 and height != 0\n command = ffmpeg_common_args(frame_rate, input_files, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n else:\n assert (\n False\n ), f'input_files should be list (of files) or str (of file template, e.g., \"%04d.png\") instead of {type(input_files)}'", "def new_proc(start_gdb=False, val=None):\n p = process(binary.path)\n if start_gdb is True:\n attach_gdb(p)\n return p", "def rescale_video(video_fn, w, h, fps, dir, new_dir, common_suffix, dict_video_length, ffmpeg, crf=17):\n\n # Output video_name\n video_id = video_fn.replace(dir, '').replace(common_suffix, '')\n video_fn_rescaled = video_fn.replace(dir, new_dir)\n video_fn_rescaled = video_fn_rescaled.replace(common_suffix, common_suffix.lower())\n\n # Create the dir\n video_dir_to_create = '/'.join(video_fn_rescaled.split('/')[:-1])\n os.makedirs(video_dir_to_create, exist_ok=True)\n\n # Check if the file already exists\n if os.path.isfile(video_fn_rescaled):\n print(\"{} already exists\".format(video_fn_rescaled))\n else:\n subprocess.call(\n '{ffmpeg} -i {video_input} -vf scale={w}:{h} -crf {crf} -r {fps} -y {video_output} -loglevel panic'.format(\n ffmpeg=ffmpeg,\n video_input=video_fn,\n h=h,\n w=w,\n fps=fps,\n video_output=video_fn_rescaled,\n crf=crf\n ), shell=True)\n\n # Get the duration of the new super_video (in sec)\n duration_sec = get_duration(video_fn_rescaled)\n duration_frames = int(duration_sec * fps)\n\n # update the dict id -> length\n dict_video_length[video_id] = duration_frames\n\n return video_fn_rescaled", "def detect_from_video(config: Dict):\n video = config['inference']['video_input']['video_input_path']\n vp = VideoProcessing(video=video)\n vp.generate_frames(export_path=config['inference']['video_input']['video_to_frames_export_path'])\n if config['inference']['video_input']['video_to_frames_export_path'] == config['inference']['predicted_frames_export_path']:\n print(\"[Warning]... You have given Video to frame path same as prediction output path /nPredicted output will overwrite video to frame\")\n img_height = config['inference']['img_height']\n img_width = config['inference']['img_width']\n model = ssd_300(image_size=(img_height, img_width, 3),\n n_classes=config['inference']['n_classes'],\n mode='inference',\n l2_regularization=0.0005,\n scales=[0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05], # The scales for MS COCO are [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]\n aspect_ratios_per_layer=[[1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5]],\n two_boxes_for_ar1=True,\n steps=[8, 16, 32, 64, 100, 300],\n offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],\n clip_boxes=False,\n variances=[0.1, 0.1, 0.2, 0.2],\n normalize_coords=True,\n subtract_mean=[123, 117, 104],\n swap_channels=[2, 1, 0],\n confidence_thresh=0.5,\n iou_threshold=0.45,\n top_k=200,\n nms_max_output_size=400)\n\n # Load the trained weights into the model.\n weights_path = config['inference']['weights_path']\n\n model.load_weights(weights_path, by_name=True)\n \n # Working with image\n all_images = glob.glob(f\"{config['inference']['video_input']['video_to_frames_export_path']}/*/*\")\n \n # Setting Up Prediction Threshold\n confidence_threshold = config['inference']['confidence_threshold']\n \n # Setting Up Classes (Note Should be in same order as in training)\n classes = config['inference']['classes']\n \n vp.existsFolder(f\"{config['inference']['predicted_frames_export_path']}/{video.split('.')[0]}\")\n # Working with image\n for current_img in tqdm(all_images):\n current_img_name = current_img.split('/')[-1]\n orig_image = cv2.imread(current_img)\n input_images = [] # Store resized versions of the images here\n img = image.load_img(current_img, target_size=(img_height, img_width))\n img = image.img_to_array(img) \n input_images.append(img)\n input_images = np.array(input_images)\n \n # Prediction\n y_pred = model.predict(input_images)\n\n # Using threshold\n y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]\n \n # Drawing Boxes\n for box in y_pred_thresh[0]:\n xmin = box[2] * orig_image.shape[1] / img_width\n ymin = box[3] * orig_image.shape[0] / img_height\n xmax = box[4] * orig_image.shape[1] / img_width\n ymax = box[5] * orig_image.shape[0] / img_height\n \n label = f\"{classes[int(box[0])]}: {box[1]:.2f}\"\n cv2.rectangle(orig_image, (int(xmin), int(ymin)), (int(xmax),int(ymax)), (255, 0, 0), 2)\n cv2.putText(orig_image, label, (int(xmin), int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.imwrite(f\"{config['inference']['predicted_frames_export_path']}/{video.split('.')[0]}/{current_img_name}\", orig_image)\n \n # Creating video\n vp.generate_video(import_path=config['inference']['predicted_frames_export_path'],\n export_path=config['inference']['video_input']['video_output_path'])", "def make_movie_cart(field='uu1', datadir='data/', proc=-1, extension='xz',\n format='native', tmin=0., tmax=1.e38, amin=0., amax=1.,\n transform='', oldfile=False):\n\n import pylab as plt\n import sys\n from pencil.files.var import read_var\n\n mkmvvar = read_var(trimall=True) \n r2d,phi2d = np.meshgrid(mkmvvar.x,mkmvvar.y)\n x2d=r2d*np.cos(phi2d)\n y2d=r2d*np.sin(phi2d)\n\n datadir = os.path.expanduser(datadir)\n if proc < 0:\n filename = datadir + '/slice_' + field + '.' + extension\n else:\n filename = datadir + '/proc' + \\\n str(proc) + '/slice_' + field + '.' + extension\n\n # Read the global dimensions.\n dim = read_dim(datadir, proc)\n if dim.precision == 'D':\n precision = 'd'\n else:\n precision = 'f'\n\n # Set up slice plane.\n if extension == 'xy' or extension == 'Xy':\n hsize = dim.nx\n vsize = dim.ny\n if extension == 'xz':\n print('only works for xy')\n sys.stop\n if extension == 'yz':\n print('only works for xy')\n sys.stop\n plane = np.zeros((vsize, hsize), dtype=precision)\n\n infile = npfile(filename, endian=format)\n\n files = []\n fig = plt.figure(figsize=(5, 10))\n ax = fig.add_subplot(111)\n\n ifirst = True\n islice = 0\n while True:\n try:\n raw_data = infile.fort_read(precision)\n except ValueError:\n break\n except TypeError:\n break\n\n if oldfile:\n t = raw_data[-1]\n plane = raw_data[:-1].reshape(vsize, hsize)\n else:\n t = raw_data[-2]\n plane = raw_data[:-2].reshape(vsize, hsize)\n\n if transform:\n exec('plane = plane' + transform)\n\n if t > tmin and t < tmax:\n ax.set_aspect('equal')\n ax.cla()\n ax.contourf(x2d, y2d, plane, 256)\n fname = '_tmp%03d.png' % islice\n print('Saving frame' + fname)\n fig.savefig(fname)\n files.append(fname)\n\n if ifirst:\n #print \"----islice----------t---------min-------max-------delta\" # Python 2\n print(\"----islice----------t---------min-------max-------delta\")\n #print \"%10i %10.3e %10.3e %10.3e %10.3e\" % \\ # Python 2\n #(islice, t, plane.min(), plane.max(), plane.max() - plane.min()) # Python 2\n print(\"{0:10} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e}\".format(islice, t, plane.min(), plane.max(), plane.max() - plane.min()))\n\n ifirst = False\n islice += 1\n\n #print 'Making movie animation.mpg - this make take a while'\n print('Making movie animation.mpg - this make take a while')\n # SC: Not all systems use mencoder. Need to change this into ffmpeg.\n os.system(\"mencoder 'mf://_tmp*.png' -mf type=png:fps=24 -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o animation.mpg\")\n os.system(\"rm _tmp*.png\")\n infile.close()", "def convert_video_path_and_save(video_path, output_path=\"output.mp4\", temp_folder = \"./temp\",\n frame_frequency=24, image_reducer=100, fontSize=10, spacing=1.1, maxsize=None, chars=\" .*:+%S0#@\",\n logs=False, processes=4, progress_tracker=None):\n\n if logs:\n start_time = time.time()\n print (\"Converting video...\")\n \n # set up a capture temporarily so we can grab some basic info about it\n capture = cv2.VideoCapture(video_path)\n if not capture.isOpened():\n print (\"Could not read video. Please enter a valid video file!\")\n exit(0)\n\n fps = capture.get(cv2.CAP_PROP_FPS)\n bitrate = int(capture.get(cv2.CAP_PROP_BITRATE))\n total_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n frames_included = int(total_frames / frame_frequency)\n # total_frames / fps gives us our video duration.\n video_duration = total_frames / fps\n # frames included / video duration gives new fps\n new_fps = (total_frames / frame_frequency) / video_duration\n\n capture.release()\n\n # First, we grab all the frames we need and store them in a temp folder\n # After that, we convert all the image frames in the temp folder, and save them back in the temp folder\n # Then, we write them to video and save to disk\n # To utilize mutli processing, we separate grabbing frames and converting the frames into batches\n\n while os.path.isdir(temp_folder):\n temp_folder += \"_\"\n temp_folder += \"/\"\n os.mkdir(temp_folder)\n\n # initial setup\n # we divide our work into batches\n batches = processes\n frames_per_batch = int(total_frames / batches / frame_frequency)\n if progress_tracker is None:\n progress_tracker = Value(\"f\", 0, lock=True)\n # progress: saved frames + converted frames + written frames\n progress_step = 100 / (frames_included * 3)\n\n # grab the frames, and write to separate batch folders\n save_frames_processes = []\n for batch in range(batches):\n starting_frame = batch * frames_per_batch * frame_frequency\n batch_folder = temp_folder + str(batch) + \"/\"\n os.mkdir(batch_folder)\n args = (\n starting_frame,\n starting_frame + frames_per_batch * frame_frequency,\n video_path,\n batch_folder,\n frame_frequency,\n logs,\n progress_tracker,\n progress_step\n )\n p = Process(target=_save_frames, args=args)\n p.daemon = True\n p.start()\n save_frames_processes.append(p)\n for p in save_frames_processes:\n p.join()\n\n # convert all the frames in each batch folder\n convert_processes = []\n for batch in range(batches):\n batch_folder = temp_folder + str(batch) + \"/\"\n args = (\n batch_folder,\n frames_per_batch,\n image_reducer,\n fontSize, spacing, maxsize, chars,\n logs, progress_tracker, progress_step\n )\n p = Process(target=_convert_batch, args=args)\n p.daemon = True\n p.start()\n convert_processes.append(p)\n for p in convert_processes:\n p.join()\n\n # if no extension was assigned, automatically assign .mp4\n output_name, output_ext = os.path.splitext(output_path)\n if output_ext == \"\":\n output_ext = \".mp4\"\n # if final output path was specified, then modify it (append _Copy to it)\n final_output_path = output_name + output_ext\n while os.path.isfile(final_output_path):\n if logs : print (final_output_path, \"already exists!\")\n final_output_path = os.path.splitext(final_output_path)[0] + \"_Copy\" + output_ext\n\n # video settings\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n video_out = imageio.get_writer(final_output_path, fps=new_fps, quality=None, bitrate=(bitrate * 1024 * 2.5))\n size = None\n\n # write images to new video\n for batch in range(1, batches + 1):\n batch_folder = temp_folder + str(batch - 1) + \"/\"\n for i in range(1, frames_per_batch + 1):\n img = cv2.imread(batch_folder + str(i) + \".jpg\", 2)\n if size is None:\n height, width = img.shape\n size = (width, height)\n video_out.append_data(img)\n with progress_tracker.get_lock():\n progress_tracker.value += progress_step\n if logs : print (\"Progress: %.4f%%\" % progress_tracker.value, end=\"\\r\")\n video_out.close()\n shutil.rmtree(temp_folder)\n\n # when we are done, there might be some rounding errors when converting some stuff to integers, thus it doesn't appear to be done\n # So we just simply set it to 100\n with progress_tracker.get_lock():\n progress_tracker.value = 100\n\n if logs:\n print (\"=\" * 30)\n print (\"SUMMARY:\")\n print (\"-\" * 20)\n print (\"Progress: %.4f%%\" % progress_tracker.value)\n print (\"Total frames found:\", str(total_frames))\n print (\"Frames included and converted:\", str(frames_per_batch * batches))\n print (\"Original FPS:\", str(fps))\n print(\"New FPS:\", str(new_fps))\n print (\"Resolution:\", str(size))\n print (\"Saved to\", final_output_path)\n print (\"Time took: %.4f secs\" % (time.time() - start_time))", "def gen_fps():\n global data_src ,output_dir \n logger = TaskFileLogger(\"GenFP\")\n\n h_vars = load_hydro_var()\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n for h_type,var_d in h_vars.items():\n print \"considering %s\" %h_type\n\n t_output_dir = os.path.join(output_dir,h_type)\n if not os.path.exists(t_output_dir):\n print \"creating path %s\" %t_output_dir\n os.mkdir(t_output_dir)\n logger.log(\"%s started\" %(h_type))\n\n for fname in glob.glob(data_src):\n complex_id = os.path.basename(fname).split('.')[0] \n fp_path = os.path.join(t_output_dir,complex_id + \".fp\" )\n if os.path.exists(fp_path):\n #print \"%s processed\" %complex_id\n continue\n print \"processing %s,fp saved as %s\" %(fname , fp_path )\n c = Complex(fname,hydro_dict = var_d)\n c.get_fp()\n c.write_fp_to_file(fp_path)\n\n logger.log(\"%s finished\" %(h_type))", "def check_video_format(movie_file, desired_format='.mp4', original_format='.avi'):\n\n if not os.path.isfile(movie_file+original_format):\n print 'Error. avi file does not exist:'+movie_file+'.avi'\n if not os.path.isfile(movie_file+desired_format):\n cmd = ['ffmpeg']\n cmd += ['-i', movie_file+original_format]\n cmd += [movie_file+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()", "def vcf_to_plink(locus, output_directory ,vcf, population):\n logging.info(\"Converting {0} to PLINK format\".format(vcf))\n command = __VCF_TO_PLINK_TEMPLATE__.format(vcf, locus)\n run_command(command)\n try:\n #Rename\n os.rename(locus +'.bed', os.path.join(output_directory, locus + '.' + population + '.bed'))\n os.rename(locus +'.bim', os.path.join(output_directory, locus + '.' + population + '.bim'))\n os.rename(locus +'.fam', os.path.join(output_directory, locus + '.' + population + '.fam'))\n #Remove\n os.remove(locus + '.log')\n os.remove(locus + '.nosex')\n except OSError as e:\n logging.error(\"Could not move or remove PLINK files {0}\".format(e))\n sys.exit(OS_ERROR)", "def __init__(self, name, kill_flag, frame_buffer, flushing, query_idx=0, buffer_size=6000, hdf_resize=30000, min_flush=200, n_cams=1, resolution=None):\n super(MovieSaver, self).__init__()\n self.daemon = True\n\n # Saving params\n if not name.endswith('.h5'):\n name += '.h5'\n self.name = name\n self.buffer_size = buffer_size # should be overkill, since flushing will do the real job of saving it out\n self.hdf_resize = hdf_resize\n self.min_flush = min_flush\n\n # Cam params\n self.n_cams = n_cams\n self.resolution = resolution\n\n # Flags and containers\n self.saving_complete = mp.Value('b', False)\n self.kill_flag = kill_flag\n self.flushing = flushing\n self.frame_buffer = frame_buffer\n \n # Queries\n self.query_idx = query_idx #which cam gets queried\n self.query_flag = mp.Value('b',False)\n self.query_queue = mp.Array(ctypes.c_uint8, np.product([self.resolution[self.query_idx][0], self.resolution[self.query_idx][1]]))\n self.query_queue_ts = mp.Value('d',0.)\n \n self.start()", "def make_movie_crossflow(field='uu1', datadir='data/', proc=-1, extension='yz',\n format='native', tmin=0., tmax=1.e38, amin=0., amax=1.,\n transform='', oldfile=False):\n\n import pylab as plt\n import matplotlib.patches as patches\n\n datadir = os.path.expanduser(datadir)\n if proc < 0:\n filename = datadir + '/slice_' + field + '.' + extension\n else:\n filename = datadir + '/proc' + \\\n str(proc) + '/slice_' + field + '.' + extension\n\n # Read the global dimensions.\n dim = read_dim(datadir, proc)\n if dim.precision == 'D':\n precision = 'd'\n else:\n precision = 'f'\n\n # Set up slice plane.\n if extension == 'xy' or extension == 'Xy':\n hsize = dim.nx\n vsize = dim.ny\n if extension == 'xz':\n hsize = dim.nx\n vsize = dim.nz\n if extension == 'yz':\n hsize = dim.ny\n vsize = dim.nz\n plane = np.zeros((vsize, hsize), dtype=precision)\n\n infile = npfile(filename, endian=format)\n\n files = []\n fig = plt.figure(figsize=(5, 10))\n ax = fig.add_subplot(111)\n ax.add_patch(patches.Rectangle(\n (220,0),\n 40,\n 320,\n color='gray'\n )\n )\n#\n# ax.add_patch(patches.Rectangle(\n# (220,0),\n# 80,\n# 240,\n# hatch='/'\n# )\n# )\n\n ifirst = True\n islice = 0\n while True:\n try:\n raw_data = infile.fort_read(precision)\n except ValueError:\n break\n except TypeError:\n break\n\n if oldfile:\n t = raw_data[-1]\n plane = raw_data[:-1].reshape(vsize, hsize)\n else:\n t = raw_data[-2]\n plane = raw_data[:-2].reshape(vsize, hsize)\n\n if transform:\n exec('plane = plane' + transform)\n\n if t > tmin and t < tmax:\n ax.cla()\n ax.imshow(plane, vmin=amin, vmax=amax)\n ax.add_patch(patches.Rectangle(\n (220,0),\n 40,\n 320,\n color='gray'\n )\n )\n fname = '_tmp%03d.png' % islice\n print('Saving frame' + fname)\n fig.savefig(fname)\n files.append(fname)", "def start_recording(codec, filename=time.strftime(\"%Y-%m-%d_%H-%M-%S\")):\n global video_writer\n folder = 'video_out/' # eventually replace this with the SD card folder\n # TODO: also include branch name and/or commit ID\n path = folder + filename + '.' + filetype\n print \"Saving video to: %s\" % path\n\n height = videoinput.frame_height\n if settings.sidebyside:\n width = 2*videoinput.frame_width\n else:\n width = videoinput.frame_width\n\n try:\n video_writer = cv2.VideoWriter(path, codec, 30, (width, height))\n except:\n print \"Failed to open video file for writing!\"" ]
[ "0.6444913", "0.6132553", "0.6021357", "0.6021357", "0.5793323", "0.575927", "0.56994724", "0.5620519", "0.5565024", "0.5488642", "0.54845893", "0.5479945", "0.5418265", "0.5404724", "0.53233945", "0.5318268", "0.5299321", "0.5261676", "0.52610755", "0.51977015", "0.51628315", "0.5160664", "0.5157827", "0.5144048", "0.5141774", "0.51226306", "0.51177037", "0.5109612", "0.5085291", "0.5074146", "0.50662863", "0.50474757", "0.50340366", "0.50199586", "0.5011041", "0.5002775", "0.49982056", "0.49841854", "0.4983246", "0.49772975", "0.49747086", "0.4967472", "0.49667382", "0.4964631", "0.4955116", "0.49482158", "0.4946755", "0.49455795", "0.49448422", "0.4937976", "0.49160227", "0.4912704", "0.49056038", "0.48997715", "0.48955384", "0.48913154", "0.48893067", "0.48846418", "0.488296", "0.4865082", "0.48621365", "0.4856423", "0.4849818", "0.4848213", "0.48274335", "0.48259526", "0.48196906", "0.4819449", "0.48163456", "0.48151338", "0.48045516", "0.48039013", "0.48005536", "0.47901854", "0.47781155", "0.47677162", "0.47668636", "0.47591752", "0.4755238", "0.4751489", "0.4751318", "0.4750193", "0.47489446", "0.47462398", "0.47455513", "0.4745031", "0.47406575", "0.47356924", "0.47326368", "0.47321242", "0.47300598", "0.47297138", "0.47185674", "0.47185424", "0.47160232", "0.47107762", "0.47041655", "0.46944857", "0.4690744", "0.4688293" ]
0.8082349
0
Find the onsets in the array representing the synchronization light. This function assumes the onsets are periodic (with randomness within 0.5T and 1.5T). The function can also fix missing onsets.
def find_sync_light_onsets(sync_light, invert=True, fixmissing=False): # -- Find changes in synch light -- sync_light_diff = np.diff(sync_light, prepend=0) if invert: sync_light_diff = -sync_light_diff sync_light_diff[sync_light_diff < 0] = 0 sync_light_threshold = 0.2*sync_light_diff.max() sync_light_onset = sync_light_diff > sync_light_threshold # -- Find period of sync_light_onset -- sync_light_onset_ind = np.where(sync_light_onset)[0] sync_light_onset_diff = np.diff(sync_light_onset_ind) # In units of frames expected_onset_period = np.median(sync_light_onset_diff) # In units of (float) frames # -- Remove repeated onsets -- onset_freq_upper_threshold = int(1.5 * expected_onset_period) onset_freq_lower_threshold = int(0.5 * expected_onset_period) repeated_onsets = sync_light_onset_diff < onset_freq_lower_threshold repeated_onsets_ind = np.where(repeated_onsets)[0] fixed_sync_light_onset = sync_light_onset.copy() fixed_sync_light_onset[sync_light_onset_ind[repeated_onsets_ind+1]] = False # -- Fix missing onsets -- if fixmissing: missing_next_onsets = sync_light_onset_diff > onset_freq_upper_threshold missing_next_onsets_ind = np.where(missing_next_onsets)[0] for indm, missing_onset_ind in enumerate(missing_next_onsets_ind): onset_diff = sync_light_onset_diff[missing_onset_ind] n_missing = int(np.round(onset_diff / expected_onset_period))-1 #print(n_missing) last_onset_ind = sync_light_onset_ind[missing_onset_ind] next_onset_ind = sync_light_onset_ind[missing_onset_ind+1] period_missing = (next_onset_ind - last_onset_ind)//(n_missing+1) new_onset_inds = last_onset_ind + np.arange(1, n_missing+1)*period_missing #print([last_onset_ind, next_onset_ind]) #print(new_onset_inds) fixed_sync_light_onset[new_onset_inds] = True return fixed_sync_light_onset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_onsets(self):\n get_onsets = ess.OnsetRate()\n # onset_times is np array\n self.onset_times, onset_rate = get_onsets(self.audio)\n # Onset as sample number in the audio signal\n index2delete = []\n previous_time = -9999999\n for index, itime in enumerate(self.onset_times):\n if (itime - previous_time) < 2*self.stroke_length:\n index2delete.append(index)\n else:\n previous_time = itime\n self.onset_times = np.delete(self.onset_times, index2delete)\n self.onset_samples = [int(self.sampling_rate*i) for i in self.onset_times]", "def DetectPulseOnset(self, asig, fs, wMS):\n # the percentage of the maximal value of the slope sum function\n # to detect the onset\n AmplitudeRatio = .01\n\n # low pass filter\n sig = self.zpIIR(asig, 3, .1, 20, 5 * 2/fs)\n wSmp = int(np.round(wMS*fs/1000))\n\n BlankWindowRatio = .9\n\n # delta x\n diffsig = np.diff(sig)\n\n z = np.empty((sig.size - 1 - wSmp, 1))\n z[:] = np.NaN\n\n # calculate slope sum function\n for i in range(wSmp,sig.size-1):\n subsig = diffsig[i-wSmp:i]\n z[i-wSmp] = np.sum(subsig[subsig>0])\n\n z0 = np.mean(z)\n onset = [0]\n tPnt = []\n zThres = 0\n blankWin = int(np.round(400*fs/1000))\n subIdx = np.r_[onset[0]: onset[0] + 4*blankWin + 1]\n MedianArrayWinSize = 5\n\n # this value controls the final acceptance\n PrcofMaxAMP = .2\n SSFAmpArray = np.ones((MedianArrayWinSize,1))*(np.max(z) - np.min(z)) * PrcofMaxAMP\n # the percentage of maximal amplitude for threshold crossing\n DetectionThreshold = .2\n SSFCrossThresholdArray = np.ones((MedianArrayWinSize,1))*z0*DetectionThreshold\n idx = 1\n\n # Keep loop going while onsets detected\n while(1):\n\n # look for the first location where z > z0\n try:\n\n # Look in z[subIdx] (and make sure it doesn't go past z's size)\n # find first index where z > the mean of z\n tempIndex = np.trim_zeros(subIdx*(z.size>subIdx), 'b')\n ix = np.amin(np.where(z[tempIndex] > z0)[0])\n except:\n break\n\n ix = tempIndex[ix]\n tPnt.append(ix)\n srcWin = np.r_[np.maximum(0,ix - wSmp): ix + wSmp]\n #if the window has passed the length of the data, then exit\n if srcWin[-1] >= len(z):\n break\n\n # This section of code is to remove the initial zero-region in the SSF function before looking for onset (if such region exists)\n zPnt = np.where(z[srcWin] == 0)\n\n if zPnt[0].size != 0:\n zPnt = srcWin[zPnt[0]]\n\n if np.any(zPnt < ix):\n srcWin = np.r_[zPnt[np.max(np.where(zPnt < ix))]: ix + wSmp]\n\n # accept the window\n if ( np.max(z[srcWin]) - np.min(z[srcWin]) > zThres):\n\n # calculate the threshold for next cycle\n SSFAmp = (np.max(z[srcWin]) - np.min(z[srcWin])) * PrcofMaxAMP\n SSFAmpArray[np.remainder(idx, MedianArrayWinSize)] = SSFAmp\n zThres = np.median(SSFAmpArray)\n SSFCrossThresholdArray[np.remainder(idx, MedianArrayWinSize)] = np.mean(z[srcWin])*DetectionThreshold\n z0 = np.median(SSFCrossThresholdArray)\n minSSF = np.min(z[srcWin]) + SSFAmp *AmplitudeRatio\n a = srcWin[0] + np.min(np.where(z[srcWin] >= minSSF))\n onset.append(a)\n\n # adaptively determine analysis window for next cycle\n bw = blankWin\n subIdx = np.round(np.r_[a + bw: a + 3*bw])\n idx = idx + 1\n\n else:\n # no beat detected\n subIdx = np.round(subIdx + blankWin)\n\n return onset", "def _get_run_onsets(\n runs, length_fr, pad_fr, running_threshold_cms, offset_fr):\n out = []\n for run in runs:\n t2p = run.trace2p()\n tr = t2p.trace('deconvolved')\n\n # Add all onsets of \"other\" frames\n others = t2p.nocs(length_fr, pad_fr,\n running_threshold_cms)\n for ot in others:\n start = ot + offset_fr\n out.append(tr[:, start:start + length_fr])\n\n return out", "def getOnsetTick(s):\n ticksPerQuarter = getResolution(s)\n onsets = [int(n.offset * ticksPerQuarter) for n in s.flat.notes]\n return onsets", "def detect_correction_onset(baseline_llr, \n motiontrajectory, times,\n ptstart, ptend, \n disturbancemode, \n disturbancevalue, \n disturbanceonsettime=None, # if none, reconstruct onset form the threshold ratio\n disturbance_threshold=9.0,\n plotfilename=None):\n\n # Error considering the disturbance\n screentrajectory, err_screen, disturbancetime, disturbanceindex = compute_trial_error(\n motiontrajectory, times,\n ptstart, ptend, \n disturbancemode, \n disturbancevalue, \n disturbanceonsettime,\n disturbance_threshold)\n \n # Error as if there is no disturbance (ballistic)\n err_ballistic = raydistance_error(motiontrajectory, ptstart, ptend)\n is_back = 1 - 1 * is_target_forward(motiontrajectory, ptstart, ptend)\n err_ballistic += is_back # penalty for wrong direction\n \n # Normalized motion phase\n phase = normalized_motion_phase(screentrajectory, ptstart, ptend)\n\n # Correction onset is the point \n # when no-disturbance error exceeds A*sigma and\n # the real error is decreasing\n ionsets = []\n for i in range(disturbanceindex, len(err_screen)-3):\n err_base_mean, err_base_covar = baseline_llr.regress(phase[i])\n err_base_var = np.sqrt(err_base_covar)\n if (np.abs(err_ballistic[i]) > 1 * err_base_var) and \\\n (np.abs(err_screen[i]) > np.abs(err_screen[i+1])) and \\\n (np.abs(err_screen[i+1]) > np.abs(err_screen[i+2])) and \\\n (np.abs(err_screen[i+2]) > np.abs(err_screen[i+3])):\n #(np.abs(err_ballistic[i]) > np.abs(err_screen[i])):\n ionsets.append(i)\n\n # Plot the detected onsets\n make_plots = True\n if make_plots:\n plt.plot(screentrajectory[:, 0], screentrajectory[:, 1])\n plt.plot(motiontrajectory[:, 0], motiontrajectory[:, 1])\n sp = ptstart\n plt.plot(sp[0], sp[1], marker='o', markersize=3,)\n gp = ptend\n plt.plot(gp[0], gp[1], marker='o', markersize=3,)\n \n plt.scatter(screentrajectory[:-1, 0], screentrajectory[:-1, 1], marker='o', s=15, \n linewidths=4, c=err_screen, alpha=0.5, cmap=plt.cm.coolwarm)\n\n onsets = screentrajectory[ionsets]\n plt.scatter(onsets[:, 0], onsets[:, 1], marker='*', s=10, linewidths=4,)\n plt.axis(\"equal\")\n if plotfilename is not None:\n plt.savefig(plotfilename)\n plt.close()\n else:\n plt.show()\n \n\n return onsets", "def badMuons(self, allmuons, allvertices):\n\n muons = list(m for m in allmuons) # make it a python list\n goodMuon = []\n\n if len(allvertices) < 1: raise RuntimeError\n PV = allvertices[0].position()\n \n out = [] \n for mu in muons:\n if (not(mu.isPFMuon()) or mu.innerTrack().isNull()):\n goodMuon.append(-1); # bad but we don't care\n continue;\n if (self.preselection(mu)):\n dxypv = abs(mu.innerTrack().dxy(PV));\n dzpv = abs(mu.innerTrack().dz(PV));\n if (self.tighterId(mu)):\n ipLoose = ((dxypv < 0.5 and dzpv < 2.0) or mu.innerTrack().hitPattern().pixelLayersWithMeasurement() >= 2);\n goodMuon.append(ipLoose or (not(self.selectClones_) and self.tightGlobal(mu)));\n elif (self.safeId(mu)):\n ipTight = (dxypv < 0.2 and dzpv < 0.5);\n goodMuon.append(ipTight);\n else:\n goodMuon.append(0);\n else:\n goodMuon.append(3); # maybe good, maybe bad, but we don't care\n\n n = len(muons)\n for i in range(n):\n if (muons[i].pt() < self.ptCut_ or goodMuon[i] != 0): continue;\n bad = True;\n if (self.selectClones_):\n bad = False; # unless proven otherwise\n n1 = muons[i].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n for j in range(n):\n if (j == i or goodMuon[j] <= 0 or not(self.partnerId(muons[j]))): continue\n n2 = muons[j].numberOfMatches(ROOT.reco.Muon.SegmentArbitration);\n if (deltaR(muons[i],muons[j]) < 0.4 or (n1 > 0 and n2 > 0 and ROOT.muon.sharedSegments(muons[i],muons[j]) >= 0.5*min(n1,n2))):\n bad = True;\n break;\n if (bad):\n out.append(muons[i]);\n return out", "def build_set(ls, dsets):\n\n def noh(ls, dsets):\n \"\"\"\n This function remove hydrogens from the selection\n \"\"\"\n data_set = build_set(ls[1], dsets)\n\n noh_set = set()\n pred = oechem.OEIsHydrogen()\n\n for idx in data_set:\n atom = system.GetAtom(oechem.OEHasAtomIdx(idx))\n if not pred(atom):\n noh_set.add(idx)\n\n return noh_set\n\n def residues(ls):\n \"\"\"\n This function select residues based on the residue numbers. An example of\n selection can be:\n mask = 'resid A:16 17 19 B:1'\n \"\"\"\n # List residue atom index to be restrained\n res_atom_set = set()\n\n # Dictionary of lists with the chain residues selected to be restrained\n # e.g. {chainA:[res1, res15], chainB:[res19, res17]}\n chain_dic = {'': []}\n\n # Fill out the chain dictionary\n i = 0\n while i < len(ls):\n if ls[i].isdigit():\n chain_dic[''].append(int(ls[i]))\n i += 1\n else:\n try:\n chain_dic[ls[i]].append(int(ls[i + 2]))\n except:\n chain_dic[ls[i]] = []\n chain_dic[ls[i]].append(int(ls[i + 2]))\n i += 3\n\n # Loop over the molecular system to select the atom indexes to be selected\n hv = oechem.OEHierView(system, oechem.OEAssumption_BondedResidue + oechem.OEAssumption_ResPerceived)\n for chain in hv.GetChains():\n chain_id = chain.GetChainID()\n if chain_id not in chain_dic:\n continue\n for frag in chain.GetFragments():\n for hres in frag.GetResidues():\n res_num = hres.GetOEResidue().GetResidueNumber()\n if res_num not in chain_dic[chain_id]:\n continue\n for oe_at in hres.GetAtoms():\n res_atom_set.add(oe_at.GetIdx())\n\n return res_atom_set\n\n def around(dist, ls):\n \"\"\"\n This function select atom not far than the threshold distance from\n the current selection. The threshold distance is in Angstrom\n\n selection can be:\n mask = '5.0 around ligand'\n \"\"\"\n # at = system.GetAtom(oechem.OEHasAtomIdx(idx))\n\n # Atom set selection\n atom_set_around = set()\n\n # Create a OE bit vector mask for each atoms\n bv_around = oechem.OEBitVector(system.GetMaxAtomIdx())\n\n # Set the mask atom\n for at in system.GetAtoms():\n if at.GetIdx() in ls:\n bv_around.SetBitOn(at.GetIdx())\n\n # Predicate\n pred = oechem.OEAtomIdxSelected(bv_around)\n\n # Create the system molecule based on the atom mask\n molecules = oechem.OEMol()\n oechem.OESubsetMol(molecules, system, pred)\n\n # Create the Nearest neighbours\n nn = oechem.OENearestNbrs(system, float(dist))\n\n for nbrs in nn.GetNbrs(molecules):\n for atom in oechem.OEGetResidueAtoms(nbrs.GetBgn()):\n if atom.GetIdx() in ls:\n continue\n atom_set_around.add(atom.GetIdx())\n\n return atom_set_around\n\n # Start Body of the selection function by language\n\n # Terminal Literal return the related set\n if isinstance(ls, str):\n return dsets[ls]\n # Not or Noh\n if len(ls) == 2:\n if ls[0] == 'noh': # Noh case\n return noh(ls, dsets)\n elif ls[0] == 'not': # Not case\n return dsets['system'] - build_set(ls[1], dsets)\n else: # Resid case with one index\n return residues(ls[1])\n\n if len(ls) == 3:\n if ls[1] == 'or': # Or Case (set union)\n return build_set(ls[0], dsets) | build_set(ls[2], dsets)\n elif ls[1] == 'and': # And Case (set intersection)\n return build_set(ls[0], dsets) & build_set(ls[2], dsets)\n elif ls[1] == 'diff': # Diff case (set difference)\n return build_set(ls[0], dsets) - build_set(ls[2], dsets)\n elif ls[1] == 'around': # Around case\n return around(ls[0], build_set(ls[2], dsets))\n else:\n return residues(ls[1:]) # Resid case with one or two indexes\n else:\n if ls[0] == 'resid':\n return residues(ls[1:]) # Resid case with multiple indexes\n else:\n raise ValueError(\"The passed list have too many tokens: {}\".format(ls))", "def get_selected_muons(muons, trigobj, mask_events, mu_pt_cut_leading, mu_pt_cut_subleading, mu_aeta_cut, mu_iso_cut): \n passes_iso = muons.pfRelIso04_all < mu_iso_cut\n passes_id = muons.mediumId == 1\n passes_subleading_pt = muons.pt > mu_pt_cut_subleading\n passes_leading_pt = muons.pt > mu_pt_cut_leading\n passes_aeta = NUMPY_LIB.abs(muons.eta) < mu_aeta_cut\n \n trigobj.masks[\"mu\"] = (trigobj.id == 13)\n \n muons_matched_to_trigobj = NUMPY_LIB.invert(mask_deltar_first(muons, muons.masks[\"all\"], trigobj, trigobj.masks[\"mu\"], 0.1))\n \n #select muons that pass these cuts\n muons_passing_id = passes_iso & passes_id & passes_subleading_pt & muons_matched_to_trigobj\n \n #select events that have muons passing cuts \n events_passes_muid = sum_in_offsets(muons, muons_passing_id, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 2\n events_passes_leading_pt = sum_in_offsets(muons, muons_passing_id & passes_leading_pt, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 1\n events_passes_subleading_pt = sum_in_offsets(muons, muons_passing_id & passes_subleading_pt, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) >= 2\n\n base_event_sel = mask_events & events_passes_muid & events_passes_leading_pt & events_passes_subleading_pt\n \n muons_passing_os = select_muons_opposite_sign(muons, muons_passing_id & passes_subleading_pt)\n events_passes_os = sum_in_offsets(muons, muons_passing_os, mask_events, muons.masks[\"all\"], NUMPY_LIB.int8) == 2\n \n final_event_sel = base_event_sel & events_passes_os\n final_muon_sel = muons_passing_id & passes_subleading_pt & muons_passing_os\n \n return {\n \"selected_events\": final_event_sel,\n \"selected_muons\": final_muon_sel,\n }", "def clean_detections(npts, on_off):\n on = on_off[:,0]\n off = on_off[:,1]\n idx_on = [on[0]]\n idx_off = [off[0]]\n lowest_idx = on[0]\n\n for ion, ioff in zip(on, off):\n if ion > lowest_idx + npts:\n idx_on.append(ion)\n idx_off.append(ioff)\n lowest_idx = ion\n\n return np.asarray((idx_on, idx_off)).T", "def onset(sig, stw, ltw, centred=False):\n\n stw = int(round(stw))\n ltw = int(round(ltw))\n\n n_channels, n_samples = sig.shape\n onset = np.copy(sig)\n onset_raw = np.copy(sig)\n for i in range(n_channels):\n if np.sum(sig[i, :]) == 0.0:\n onset[i, :] = 0.0\n onset_raw[i, :] = onset[i, :]\n else:\n if centred is True:\n onset[i, :] = sta_lta_centred(sig[i, :], stw, ltw)\n else:\n onset[i, :] = classic_sta_lta(sig[i, :], stw, ltw)\n onset_raw[i, :] = onset[i, :]\n np.clip(1 + onset[i, :], 0.8, np.inf, onset[i, :])\n np.log(onset[i, :], onset[i, :])\n\n return onset_raw, onset", "def merge_sets(sets):\n idxs_skipped = []\n n = len(sets)\n for i in range(n-1):\n if i not in idxs_skipped:\n set_i = sets[i]\n for j in range(i+1,n):\n set_j = sets[j]\n if set_i.intersection( set_j ) > set([]):\n sets[i].update( set_j )\n idxs_skipped.append( j )\n sets_u = [ sets[k] for k in np.setdiff1d(range(n), idxs_skipped).astype(np.int) ]\n return sets_u", "def onsets_rt(self) -> Optional[annotations.BeatData]:\n return load_onsets(self.onsets_rt_path)", "def find_flats(aperture, side='blue'):\r\n \r\n # find dome flat images\r\n domeflats = iraf.hselect('%s????.fits' % side, '$I', 'TURRET == \"APERTURE\" & APERTURE == \"%s\" & LAMPS == \"0000000\" & AIRMASS < 1.01 & IMGTYPE == \"flat\"' % aperture, Stdout=1)\r\n # find internal flat (incandescent lamp) images\r\n intflats = iraf.hselect('%s????.fits' % side, '$I', 'TURRET == \"LAMPS\" & APERTURE == \"%s\" & LAMPS == \"0000001\" & AIRMASS < 1.01' % aperture, Stdout=1)\r\n # dome flats are prefered over internal flats\r\n flats = []\r\n if (len(intflats) > 0) & (len(domeflats) == 0):\r\n flats = intflats\r\n print \"Using %d internal flats for the %s arcsec slit.\" % (len(intflats), aperture)\r\n if len(domeflats) > 3:\r\n flats = domeflats\r\n print \"Using %d dome flats for the %s arcsec slit.\" % (len(domeflats), aperture)\r\n\r\n return flats", "def getmountoffsets():\n r = _getoffsets(isMountoffset=True)\n return r", "def add_sets(list_of_sets):\n global true_introns\n for item in list_of_sets:\n true_introns.update(item)", "def lights_on(self) -> list:\n return [\n entity for entity in self.all_lights if self.hass.get_state(entity) == \"on\"\n ]", "def pointsets_mod_automorphism(self, pointsets):\n points = set()\n for ps in pointsets:\n points.update(ps)\n points = tuple(points)\n Aut = self.lattice_automorphism_group(points,\n point_labels=tuple(range(len(points))))\n indexsets = set([ frozenset([points.index(p) for p in ps]) for ps in pointsets ])\n orbits = []\n while len(indexsets)>0:\n idx = indexsets.pop()\n orbits.append(frozenset([points[i] for i in idx]))\n for g in Aut:\n g_idx = frozenset([g(i) for i in idx])\n indexsets.difference_update([g_idx])\n return tuple(orbits)", "def test_find_sets(self):\n cards = numpy.array([[1,1,1,2,0],\n [0,1,2,2,2],\n [0,1,2,2,2],\n [0,1,2,2,2]])\n\n set_indices = set_solver.find_sets(cards)\n self.assertEqual(len(set_indices), 2)\n self.assertTrue((0, 1, 2) in set_indices)\n self.assertTrue((2, 3, 4) in set_indices)", "def find_all_ngon_sols():\n ngon = [None for _ in range(N)] \n ngon_set = set()\n numbers = set(range(1, (2 * N) + 1))\n\n for triplet in permutations(numbers, 3):\n ngon[0] = tuple(triplet)\n total = sum(triplet)\n next_ngon_set = set()\n fill_ngon(ngon, numbers - set(triplet), 1, next_ngon_set, total)\n ngon_set |= next_ngon_set\n\n return ngon_set", "def powerSetNaive(array):\n\n res = [[d] for d in array]\n res.append([])\n array_ = []\n skip = 1\n while skip <=len(array)-1:\n\n for x in range(0,len(array),skip):\n array_.append(array[x])\n for y in range(len(array_[0:x+skip+1])):\n toAppend = array_[y:x+1]\n if toAppend not in res:\n res.append(toAppend)\n toAppend = array_[0:x]\n if toAppend not in res:\n res.append(toAppend)\n array_=[]\n skip = skip + 1\n\n return res", "def simple(onArray, offArray):\n \n Larray = len(onArray)\n Larray2 = len(offArray)\n \n assert Larray == Larray2, \"both arrays should have the same size\"\n \n #onFiltered = numpy.array(onArray)[:,OnOff.misc.constants.dataRange]\n #offFiltered = numpy.array(offArray)[:,OnOff.misc.constants.dataRange]\n \n #return onFiltered,offFiltered,OnOff.misc.constants.dataRange\n drange = OnOffCalc.misc.getDatarange(onArray.shape[1])\n dataMask = numpy.ones(onArray.shape)\n #dataMask[:,OnOffCalc.misc.constants.dataRange] = 0\n dataMask[:,drange] = 0\n \n return dataMask", "def getIMA(s, onsets):\n s = s.stripTies()\n\n # with subprocess.Popen([\"onsets2ima\",\"-onsets\"] + [str(o) for o in onsets], stdout=subprocess.PIPE) as proc:\n # output = proc.stdout.read().decode('ascii')\n\n proc = subprocess.Popen([\"onsets2ima\",\"-onsets\"] + [str(o) for o in onsets], stdout=subprocess.PIPE)\n try:\n outs, errs = proc.communicate(timeout=5)\n except TimeoutExpired:\n proc.kill()\n raise IMATimeoutError\n output = outs.decode('ascii')\n\n ima_str = output.split('\\n')[0].strip()\n ima_spect_str = output.split('\\n')[1].strip()\n\n ima = [float(w) for w in ima_str.split(' ')]\n ima_spect = [float(w) for w in ima_spect_str.split(' ')]\n\n #if onset of first note != 0 (start with rest), add zeros to ima_spect\n ima_spect = [0.0]*onsets[0] + ima_spect\n\n ima_spect = [ima_spect[o] for o in onsets]\n\n return ima, ima_spect", "def mask_the_images(working_path,set_name):\n\n file_list=glob('/media/talhassid/My Passport/haimTal/test_images_0b8afe447b5f1a2c405f41cf2fb1198e.npy')\n out_images = [] #final set of images for all patients\n for fname in file_list:\n out_images_per_patient = []\n print (\"working on file \", fname)\n imgs_to_process = np.load(fname.replace(\"lungmask\",\"images\")) # images of one patient\n masks = np.load(fname)\n for i in range(len(imgs_to_process)):\n mask = masks[i]\n img = imgs_to_process[i]\n new_size = [512,512] # we're scaling back up to the original size of the image\n img= mask*img # apply lung mask\n #\n # renormalizing the masked image (in the mask region)\n #\n new_mean = np.mean(img[mask>0])\n new_std = np.std(img[mask>0])\n #\n # Pulling the background color up to the lower end\n # of the pixel range for the lungs\n #\n old_min = np.min(img) # background color\n img[img==old_min] = new_mean-1.2*new_std # resetting backgound color\n img = img-new_mean\n img = img/new_std\n #make image bounding box (min row, min col, max row, max col)\n labels = measure.label(mask)\n regions = measure.regionprops(labels)\n #\n # Finding the global min and max row over all regions\n #\n min_row = 512\n max_row = 0\n min_col = 512\n max_col = 0\n for prop in regions:\n B = prop.bbox\n if min_row > B[0]:\n min_row = B[0]\n if min_col > B[1]:\n min_col = B[1]\n if max_row < B[2]:\n max_row = B[2]\n if max_col < B[3]:\n max_col = B[3]\n width = max_col-min_col\n height = max_row - min_row\n if width > height:\n max_row=min_row+width\n else:\n max_col = min_col+height\n #\n # cropping the image down to the bounding box for all regions\n # (there's probably an skimage command that can do this in one line)\n #\n img = img[min_row:max_row,min_col:max_col]\n mask = mask[min_row:max_row,min_col:max_col]\n if max_row-min_row <5 or max_col-min_col<5: # skipping all images with no god regions\n pass\n else:\n # moving range to -1 to 1 to accomodate the resize function\n mean = np.mean(img)\n img = img - mean\n min = np.min(img)\n max = np.max(img)\n img = img/(max-min)\n new_img = resize(img,[512,512], mode='constant')\n out_images_per_patient.append(new_img)\n\n id = re.sub(r'.*_images_(.*)\\.npy',r'\\1',fname)\n patient_images_and_id = (out_images_per_patient,id)\n out_images.append(patient_images_and_id)\n print (\"Delete files: {} \\n\\t {} \".format(fname,re.sub(\"lungmask\",\"images\",fname)))\n os.remove(fname)\n os.remove(fname.replace(\"images\",\"lungmask\")) # images of one patient\n\n\n np.save(working_path+\"{}Images.npy\".format(set_name),out_images)", "def nondetects(self, masked=False):\r\n grd = self.grd\r\n xnd = []\r\n ynd = []\r\n ncells = len(grd.cells['depth'])\r\n non_detects_i_tr = np.zeros(ncells, np.int32)\r\n if masked:\r\n not_flagged = np.where(self.rec_track.flagged==0)[0]\r\n rec_track = self.rec_track[not_flagged]\r\n rec_seg = self.make_segments(set_depth=True, \r\n input_rec_track=rec_track)\r\n else:\r\n rec_seg = self.rec_seg\r\n for nr, rseg in enumerate(rec_seg):\r\n seg = rec_seg[nr]\r\n dt = seg.dt\r\n if dt > dt_signal+1:\r\n t1 = seg.t1\r\n t2 = seg.t2\r\n nint = int(np.rint((t2-t1)/dt_signal)) - 1\r\n x1 = seg.x1\r\n x2 = seg.x2\r\n y1 = seg.y1\r\n y2 = seg.y2\r\n dx_nd = (x2 - x1)/float(nint+1)\r\n dy_nd = (y2 - y1)/float(nint+1)\r\n if nint < 120: # 10 minute cutoff for nondetect filling\r\n xint = [x1 + n*dx_nd for n in range(1,nint)]\r\n yint = [y1 + n*dy_nd for n in range(1,nint)]\r\n xnd = xnd + xint\r\n ynd = ynd + yint\r\n\r\n for nd in range(len(xnd)):\r\n xy = [xnd[nd], ynd[nd]]\r\n i = grd.select_cells_nearest(xy)\r\n if (i is not None) and (i >= 0):\r\n non_detects_i_tr[i] += 1\r\n\r\n return non_detects_i_tr", "def _compute_soffsets(self):\n self.soffsets = [ [] for i in self.doffsets ]\n for idx,dofs in enumerate(self.doffsets):\n for o in dofs:\n self.soffsets[(idx + o) % self.p].append(-o)", "def _gaussian_picker(self, onset, phase, start_time, p_arr, s_arr, ptt,\n stt):\n\n # Determine indices of P and S pick times\n pt_idx = int((p_arr - start_time) * self.sampling_rate)\n st_idx = int((s_arr - start_time) * self.sampling_rate)\n\n # Determine P and S pick window upper and lower bounds based on\n # (P-S)/2 -- either this or the next window definition will be\n # used depending on which is wider.\n pmin_idx = int(pt_idx - (st_idx - pt_idx) / 2)\n pmax_idx = int(pt_idx + (st_idx - pt_idx) / 2)\n smin_idx = int(st_idx - (st_idx - pt_idx) / 2)\n smax_idx = int(st_idx + (st_idx - pt_idx) / 2)\n\n # Check if index falls outside length of onset function; if so set\n # window to start/end at start/end of data.\n for idx in [pmin_idx, pmax_idx, smin_idx, smax_idx]:\n if idx < 0:\n idx = 0\n if idx > len(onset):\n idx = len(onset)\n\n # Defining the bounds to search for the event over\n # Determine P and S pick window upper and lower bounds based on\n # set percentage of total travel time, plus marginal window\n\n # window based on self.fraction_tt of P/S travel time\n pp_ttime = ptt * self.fraction_tt\n ps_ttime = stt * self.fraction_tt\n\n # Add length of marginal window to this. Convert to index.\n P_idxmin_new = int(pt_idx - int((self.marginal_window + pp_ttime)\n * self.sampling_rate))\n P_idxmax_new = int(pt_idx + int((self.marginal_window + pp_ttime)\n * self.sampling_rate))\n S_idxmin_new = int(st_idx - int((self.marginal_window + ps_ttime)\n * self.sampling_rate))\n S_idxmax_new = int(st_idx + int((self.marginal_window + ps_ttime)\n * self.sampling_rate))\n\n # Setting so the search region can't be bigger than (P-S)/2:\n # compare the two window definitions; if (P-S)/2 window is\n # smaller then use this (to avoid picking the wrong phase).\n P_idxmin = np.max([pmin_idx, P_idxmin_new])\n P_idxmax = np.min([pmax_idx, P_idxmax_new])\n S_idxmin = np.max([smin_idx, S_idxmin_new])\n S_idxmax = np.min([smax_idx, S_idxmax_new])\n\n # Setting parameters depending on the phase\n if phase == \"P\":\n sta_winlen = self.p_onset_win[0]\n win_min = P_idxmin\n win_max = P_idxmax\n if phase == \"S\":\n sta_winlen = self.s_onset_win[0]\n win_min = S_idxmin\n win_max = S_idxmax\n\n # Find index of maximum value of onset function in the appropriate\n # pick window\n max_onset = np.argmax(onset[win_min:win_max]) + win_min\n # Trim the onset function in the pick window\n onset_trim = onset[win_min:win_max]\n\n # Only keep the onset function outside the pick windows to\n # calculate the pick threshold\n onset_threshold = onset.copy()\n onset_threshold[P_idxmin:P_idxmax] = -1\n onset_threshold[S_idxmin:S_idxmax] = -1\n onset_threshold = onset_threshold[onset_threshold > -1]\n\n # Calculate the pick threshold: either user-specified percentile of\n # data outside pick windows, or 88th percentile within the relevant\n # pick window (whichever is bigger).\n threshold = np.percentile(onset_threshold, self.pick_threshold * 100)\n threshold_window = np.percentile(onset_trim, 88)\n threshold = np.max([threshold, threshold_window])\n\n # Remove data within the pick window that is lower than the threshold\n tmp = (onset_trim - threshold).any() > 0\n\n # If there is any data that meets this requirement...\n if onset[max_onset] >= threshold and tmp:\n exceedence = np.where((onset_trim - threshold) > 0)[0]\n exceedence_dist = np.zeros(len(exceedence))\n\n # Really faffy process to identify the period of data which is\n # above the threshold around the highest value of the onset\n # function.\n d = 1\n e = 0\n while e < len(exceedence_dist) - 1:\n if e == len(exceedence_dist):\n exceedence_dist[e] = d\n else:\n if exceedence[e + 1] == exceedence[e] + 1:\n exceedence_dist[e] = d\n else:\n exceedence_dist[e] = d\n d += 1\n e += 1\n\n # Find the indices for this period of data\n tmp = exceedence_dist[np.argmax(onset_trim[exceedence])]\n tmp = np.where(exceedence_dist == tmp)\n\n # Add one data point below the threshold at each end of this period\n gau_idxmin = exceedence[tmp][0] + win_min - 1\n gau_idxmax = exceedence[tmp][-1] + win_min + 2\n\n # Initial guess for gaussian half-width based on onset function\n # STA window length\n data_half_range = int(sta_winlen * self.sampling_rate / 2)\n\n # Select data to fit the gaussian to\n x_data = np.arange(gau_idxmin, gau_idxmax, dtype=float)\n x_data = x_data / self.sampling_rate\n y_data = onset[gau_idxmin:gau_idxmax]\n\n # Convert indices to times\n x_data_dt = np.array([])\n for i in range(len(x_data)):\n x_data_dt = np.hstack([x_data_dt, start_time + x_data[i]])\n\n # Try to fit a gaussian.\n try:\n # Initial parameters are:\n # height = max value of onset function\n # mean = time of max value\n # sigma = data half-range (calculated above)\n p0 = [np.max(y_data),\n float(gau_idxmin + np.argmax(y_data))\n / self.sampling_rate,\n data_half_range / self.sampling_rate]\n\n # Do the fit\n popt, _ = curve_fit(util.gaussian_1d, x_data, y_data, p0)\n\n # Results:\n # popt = [height, mean (seconds), sigma (seconds)]\n max_onset = popt[0]\n # Convert mean (pick time) to time\n mean = start_time + float(popt[1])\n sigma = np.absolute(popt[2])\n\n gaussian_fit = {\"popt\": popt,\n \"xdata\": x_data,\n \"xdata_dt\": x_data_dt,\n \"PickValue\": max_onset,\n \"PickThreshold\": threshold}\n\n # If curve_fit fails. Will also spit error message to stdout,\n # though this can be suppressed - see warnings.filterwarnings()\n except (ValueError, RuntimeError):\n gaussian_fit = self.DEFAULT_GAUSSIAN_FIT\n gaussian_fit[\"PickThreshold\"] = threshold\n sigma = -1\n mean = -1\n max_onset = -1\n\n # If onset function does not exceed threshold in pick window\n else:\n gaussian_fit = self.DEFAULT_GAUSSIAN_FIT\n gaussian_fit[\"PickThreshold\"] = threshold\n sigma = -1\n mean = -1\n max_onset = -1\n\n return gaussian_fit, max_onset, sigma, mean", "def _get_ring_nodes(m, namin=3, namax=9, remove_redudant=T):\n # first search for rings\n sets = []\n for i in range(namin, namax+1):\n #if i in [3,4,5]:\n pat_i = '*~1' + '~*'*(i-2) + '~*1'\n #else:\n # pat_i = '*:1' + ':*'*(i-2) + ':*1'\n Qi = Chem.MolFromSmarts( pat_i )\n for tsi in m.GetSubstructMatches(Qi):\n set_i = set(tsi)\n if set_i not in sets:\n sets.append( set(tsi) )\n if remove_redudant:\n # now remove those rings that are union of smaller rings\n n = len(sets)\n sets_remove = []\n ijs = itl.combinations( list(range(n)), 2 )\n sets_u = []\n for i,j in ijs:\n set_ij = sets[i].union( sets[j] )\n if (set_ij in sets) and (set_ij not in sets_remove):\n sets_remove.append( set_ij )\n sets_u = cim.get_compl(sets, sets_remove)\n else:\n sets_u = sets\n return sets_u", "def onsets_rb(self) -> Optional[annotations.BeatData]:\n return load_onsets(self.onsets_rb_path)", "def getSets():", "def emg_onsets(emg_amplitude, threshold=0, threshold2=None):\n # Sanity checks.\n if not isinstance(emg_amplitude, np.ndarray):\n emg_amplitude = np.atleast_1d(emg_amplitude).astype('float64')\n if threshold > np.max(emg_amplitude):\n raise ValueError(\"NeuroKit error: emg_onsets(): threshold\"\n \"specified exceeds the maximum of the signal\"\n \"amplitude.\")\n if threshold2 is not None and threshold2 > np.max(emg_amplitude):\n raise ValueError(\"NeuroKit error: emg_onsets(): threshold2\"\n \"specified exceeds the maximum of the signal\"\n \"amplitude.\")\n\n # Extract indices of data points greater than or equal to threshold.\n indices = np.nonzero(emg_amplitude >= threshold)[0]\n\n # Extract initial and final indexes of each activity burst.\n indices = np.vstack((indices[np.diff(np.hstack((-np.inf, indices))) > 1],\n indices[np.diff(np.hstack((indices, np.inf))) > 1])).T\n indices = indices[indices[:, 1]-indices[:, 0] >= 0, :]\n\n # Threshold2.\n if threshold2 is not None:\n indices2 = np.ones(indices.shape[0], dtype=bool)\n for i in range(indices.shape[0]):\n if np.count_nonzero(emg_amplitude[indices[i, 0]: indices[i, 1]+1] >= threshold2) < 1:\n indices2[i] = False\n indices = indices[indices2, :]\n\n # Prepare output.\n indices = list(np.concatenate(indices))\n info = {\"EMG_Onsets\": indices}\n\n return info", "def loopreelset(combination, reel_set):\n matching_reels = []\n for index in range(5):\n if if3symbols(combination[index][0],combination[index][1],combination[index][2], reel_set[index]):\n matching_reels.append(index)\n if len(matching_reels) == 5:\n return True", "def lego_sets():\n # you must replace this line and return your own list\n return []", "def GET_MODIS_LANDSAT_PAIRS(self, datasets, latitude, longitude, start_date, end_date, max_cloud_cover):\n total_scenes = []\n START_DATE = start_date\n END_DATE = end_date\n \"\"\"\n search through given dates\n finds matching dates and position. \n \"\"\"\n\n #while len(total_scenes) < num_pairs and int((date.fromisoformat(START_DATE) - date.fromisoformat(end_date)).days):\n ### Search for Landsat products ###\n Landsat_scenes = self.api.search(\n dataset=datasets[0],\n latitude=latitude,\n longitude=longitude,\n start_date=START_DATE,\n end_date=END_DATE,\n max_cloud_cover=max_cloud_cover,\n max_results=1000)\n\n # the scenes will be ordered oldest -> most recent\n START_DATE = Landsat_scenes[0]['acquisitionDate']\n\n ### Search for MODIS products ###\n MODIS_scenes = self.api.search(\n dataset=datasets[1],\n latitude=latitude,\n longitude=longitude,\n start_date=START_DATE,\n end_date=END_DATE,\n max_cloud_cover=max_cloud_cover,\n max_results=1000)\n\n # the scenes will be ordered oldest -> most recent\n # next iteration starts at last found landsat date\n START_DATE = Landsat_scenes[-1]['acquisitionDate']\n\n\n # make dict so we can access with dates and scene bounds\n Landsat_scenes = {scene['acquisitionDate']: scene for scene in Landsat_scenes}\n MODIS_scenes = {scene['acquisitionDate']: scene for scene in MODIS_scenes}\n ### GET MATCHING DATES ###\n keys = set(MODIS_scenes.keys()).intersection(Landsat_scenes.keys())\n\n ### generate tuple pairs, landsat, modis ###\n found_scenes = [(Landsat_scenes[date], MODIS_scenes[date]) for date in keys]\n total_scenes += found_scenes\n if len(found_scenes):\n print(\"{} scenes found...\".format(len(found_scenes)))\n print(\"{} total scenes\".format(len(total_scenes)))\n else:\n print(\"{} total scenes\".format(len(total_scenes)))\n\n return total_scenes", "def check_setpoints(self):\n # TODO: Can possibly put this in the CCBC Brains\n for heater in self.ard_data['heaters'].keys():\n current_temp = float(self.ard_data['tempsensors'][self.ard_data['heaters'][heater]['tsensor_name']]['value'])\n\n # Assign the pin_status the previous value from the previous iteration\n pin_status = self.ard_data['heaters'][heater]['status']\n\n if current_temp > self.ard_data['heaters'][heater]['upper limit']:\n pin_status = 'OFF'\n\n if current_temp < self.ard_data['heaters'][heater]['lower limit']:\n pin_status = 'ON'\n\n if current_temp >= self.ard_data['heaters'][heater]['maxtemp']:\n pin_status = 'OFF'\n\n self.ard_data['heaters'][heater]['status'] = pin_status\n\n for pump in self.ard_data['pumps'].keys():\n pressure = float(self.ard_data['presssensors'][self.ard_data['pumps'][pump]['psensor_name']]['pressure'])\n gallons = float(pressure * self.ard_data['pumps'][pump]['psi_to_gal_slope'] +\n self.ard_data['pumps'][pump]['psi_to_gal_intercept'])\n self.ard_data['pumps'][pump]['gallons'] = gallons\n\n # Assign the pin status the previous value from the previous cycle\n pin_status = self.ard_data['pumps'][pump]['status']\n\n if gallons > self.ard_data['pumps'][pump]['upper limit']:\n # Turn the pump off when the setpoint is above the setpoint\n pin_status = 'OFF'\n # TODO: Account for solenoid valve control when available\n\n if gallons < self.ard_data['pumps'][pump]['lower limit']:\n pin_status = 'ON'\n\n self.ard_data['pumps'][pump]['status'] = pin_status", "def lfcSet(file_list):\n # Just out of paranoia, order the LFC files\n pseudo_time = np.empty_like(file_list,dtype='float')\n file_date = np.empty_like(file_list,dtype='float')\n file_obsn = np.empty_like(file_list,dtype='float')\n for i,file_name in enumerate(file_list):\n file_id = os.path.basename(file_name).split('_')[-1][:-5]\n pseudo_time[i] = file_id\n file_date[i], file_obsn[i] = file_id.split('.')\n time_sort = np.argsort(pseudo_time)\n file_list = np.array(file_list)[time_sort]\n file_date = file_date[time_sort]\n file_obsn = file_obsn[time_sort]\n \n sets = []\n consecutive = []\n date = file_date[0]\n for i in range(1,len(file_list)):\n if date != file_date[i]:\n date = file_date[i]\n sets.append(consecutive)\n consecutive=[file_list[i]]\n elif file_obsn[i] != file_obsn[i-1]+1:\n sets.append(consecutive)\n consecutive=[file_list[i]]\n else:\n consecutive.append(file_list[i])\n return sets", "def save_conditions_onsets(conditions_onsets,\n onsets_dir=tempfile.gettempdir()):\n condition_files = []\n\n for cid, cframe in conditions_onsets.groupby(['session_name',\n 'condition_id']):\n session_id, condition_id = cid\n onset_dir = os.path.join(onsets_dir, session_id)\n if not os.path.exists(onset_dir):\n os.makedirs(onset_dir)\n fname = os.path.join(onset_dir, 'cond%03i.txt' % condition_id)\n cframe[['onset', 'duration', 'amplitude']].to_csv(\n fname, sep=' ', header=False, index=False)\n condition_files.append(fname)\n\n return condition_files", "def buildConnectedSets(self, cars):", "def analyze(self, event):\n '''\n\tif not (event.HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ or event.HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ or event.HLT_IsoTkMu24 or event.HLT_IsoMu24):\n\t self.out.fillBranch(\"pass_selection\",0)\n return True\n '''\n\telectrons = Collection(event, \"Electron\")\n muons = Collection(event, \"Muon\")\n jets = Collection(event, \"Jet\")\n Z = ROOT.TLorentzVector()\n\n\ttight_muons = []\n\tgoodmuons_pt = []\n goodmuons_eta = [] \n\n\tif (len(muons)<=1):\n\t\tself.out.fillBranch(\"pass_selection\",0)\n return True\n\tfor i in range(0,len(muons)):\n #if (muons[i].eta) < 2.4 and (muons[i].mediumId) and (muons[i].pfIsoId)>=3:\n\t if (muons[i].eta) < 2.4 and (muons[i].mediumId):\n\t if (muons[i].pt) <= 25:\n continue\n\t\tfor j in range(i+1,len(muons)):\n \t\t #if (muons[j].eta) < 2.4 and (muons[j].mediumId) and (muons[j].pfIsoId)>=3:\n\t if (muons[j].eta) < 2.4 and (muons[j].mediumId):\n\t if (muons[j].pt) <= 20:\n\t\t\t continue\n\t\t if (muons[i].charge + muons[j].charge == 0):\n\t\t\t Z = muons[i].p4() + muons[j].p4()\n\t\t\t if (Z.M() > 76 and Z.M() < 106):\n\t\t\t\tself.out.fillBranch(\"pass_selection\",1)\n\t \t\tself.out.fillBranch(\"z_pt\",Z.Pt())\n\t\t\t\tself.out.fillBranch(\"z_mass\",Z.M())\n\t\t\t\tself.out.fillBranch(\"z_phi\",Z.Phi())\n\t\t\t\ttight_muons.append(muons[i]) \n\t\t\t\ttight_muons.append(muons[j])\n\t\n\tif len(tight_muons) < 2:\n\t self.out.fillBranch(\"pass_selection\",0)\n\t return True\n\n ngoodmuons = 0\n ngoodmuons = len(tight_muons)\n\tif ngoodmuons != 2:\n print(ngoodmuons)\n\n goodmuons_pt.append(tight_muons[0].pt)\n goodmuons_pt.append(tight_muons[1].pt)\n goodmuons_eta.append(tight_muons[0].eta)\n goodmuons_eta.append(tight_muons[1].eta) \n \n self.out.fillBranch(\"muon_pt\",goodmuons_pt)\n self.out.fillBranch(\"muon_eta\",goodmuons_eta) \n \n\tngoodjets = 0\n goodjets_pt = []\n\tgoodjets_id = []\n\tgoodjets_phi = []\n\tgoodjets_dphi_zjet = []\n\n\tfor k in range(0,len(jets)):\n #print(4)\n\t if abs(jets[k].eta) > 2.4:\n continue\n #print(5) \n\t if jets[k].pt < 30:\n\t\tcontinue\n\t #print(6)\n\t pass_lepton_dr_cut = True\n\n\t for i in range(0,len(tight_muons)):\n\t\t#if deltaR(muons[tight_muons[i]].eta,muons[tight_muons[i]].phi,jets[k].eta,jets[k].phi) < 0.4:\n if deltaR(tight_muons[i].eta,tight_muons[i].phi,jets[k].eta,jets[k].phi) < 0.4:\n\t pass_lepton_dr_cut = False\n\n\t if not pass_lepton_dr_cut:\n\t\tcontinue\n\n ngoodjets += 1\n goodjets_pt.append(jets[k].pt)\n\t #goodjets_id.append(jets[k].jetId)\n\t goodjets_phi.append(jets[k].phi)\t \n\t #goodjets_dphi_zjet.append(deltaPhi(Z.Phi(),jets[k].phi)) \n\n if ngoodjets != len(goodjets_pt):\n print(error)\n\n self.out.fillBranch(\"jet_pt\",goodjets_pt)\n\t#self.out.fillBranch(\"jet_id\",goodjets_id)\n\tself.out.fillBranch(\"jet_phi\",goodjets_phi)\n\t#self.out.fillBranch(\"dphi_zjet\",goodjets_dphi_zjet)\n\t'''\n\tif(njet!=0):\n\t print(njet)\n '''\n\tif hasattr(event,\"Generator_weight\"):\n self.out.fillBranch(\"gen_weight\",event.Generator_weight)\n else:\n self.out.fillBranch(\"gen_weight\",0)\n\treturn True", "def get_sparse_noise_onset_index(sparseNoiseDisplayLog):\n\n\n frames = sparseNoiseDisplayLog['presentation']['displayFrames']\n frames = [tuple([np.array([x[1][1],x[1][0]]),x[2],x[3],i]) for i, x in enumerate(frames)]\n dtype = [('location',np.ndarray),('sign',int),('isOnset',int),('index',int)]\n frames = np.array(frames, dtype = dtype)\n\n allOnsetInd = []\n for i in range(len(frames)):\n if frames[i]['isOnset'] == 1 and (i == 0 or frames[i-1]['isOnset'] == -1):\n allOnsetInd.append(i)\n\n onsetFrames = frames[allOnsetInd]\n\n allSquares = list(set([tuple([x[0][0],x[0][1],x[1]]) for x in onsetFrames]))\n\n onsetIndWithLocationSign = []\n\n for square in allSquares:\n indices = []\n for onsetFrame in onsetFrames:\n if onsetFrame['location'][0]==square[0] and onsetFrame['location'][1]==square[1] and onsetFrame['sign']==square[2]:\n indices.append(onsetFrame['index'])\n\n onsetIndWithLocationSign.append([np.array([square[0],square[1]]),square[2],indices])\n\n return allOnsetInd, onsetIndWithLocationSign", "def stations(station_let):\n\tstat = ['']*np.size(station_let,0)\n\tfor i in range(len(stat)):\n\t\tfor j in range(4):\n\t\t\tif station_let[i][j] is not np.ma.masked:\n\t\t\t\tstat[i]+=station_let[i][j]\n\treturn stat", "def GetBonds(Bonds):\n b = sorted([(min(x), max(x)) for x in Bonds])\n Bonds13, Bonds14 = [], []\n for (a1,b1) in b:\n #check for bonds with a1 at the center of a 1-3 interaction,\n #letting b1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == a1 and b2 < b1] + \\\n [a2 for (a2,b2) in b if b2 == a1 and a2 < b1]\n Bonds13.extend([(min(c,b1), max(c,b1)) for c in clist])\n #check for bonds with b1 at the center of a 1-3 interaction,\n #letting a1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == b1 and b2 < a1] + \\\n [a2 for (a2,b2) in b if b2 == b1 and a2 < a1]\n Bonds13.extend([(min(c,a1), max(c,a1)) for c in clist])\n #find atoms connected to a1\n clist = [b2 for (a2,b2) in b if a1==a2 and not b1==b2] +\\\n [a2 for (a2,b2) in b if a1==b2 and not b1==a2]\n #find atoms connected to b1\n dlist = [a2 for (a2,b2) in b if b1==b2 and not a1==a2] +\\\n [b2 for (a2,b2) in b if b1==a2 and not a1==b2]\n Bonds14.extend([(min(c,d), max(c,d)) for c in clist for d in dlist])\n Bonds1213 = b + Bonds13\n #sort\n Bonds1213.sort()\n Bonds14.sort()\n #get unique values in case of loops\n Bonds1213 = [x for (i,x) in enumerate(Bonds1213) if i == 0 or x != Bonds1213[i-1]]\n Bonds14 = [x for (i,x) in enumerate(Bonds14) if i == 0 or x != Bonds14[i-1]]\n #convert to arrays \n Bonds1213 = array(Bonds1213, int)\n Bonds14 = array(Bonds14, int)\n return Bonds1213, Bonds14", "def get_max_num_onsets():\r\n \"\"\" based on the numbers above, should equal to 932945... \"\"\"\r\n c1 = len(gen_onset_c1())\r\n c2 = len(gen_onset_c2())\r\n c3 = len(gen_onset_c3_c4())\r\n c4 = len(gen_onset_c3_c4())\r\n temp = c1\r\n temp = temp + ( c1 * c2 )\r\n temp = temp + ( c1 * c3 )\r\n temp = temp + ( c1 * c2 * c3 )\r\n temp = temp + ( c1 * c3 * c4 )\r\n temp = temp + ( c1 * c2 * c3 * c4 )\r\n return temp", "def get_sensor_bool_dryspot_runlevel(self, filename, threshold_min_counted_dryspots=5):\n f = h5py.File(filename, \"r\")\n meta_file = h5py.File(str(filename).replace(\"RESULT.erfh5\", \"meta_data.hdf5\"), 'r')\n\n try:\n single_states, set_of_states, useless_states = self.__get_dryspot_data(f, meta_file)\n multi_states = self.__get_pressure_data(f)\n multi_states = multi_states.squeeze()\n\n activated_sensors = np.count_nonzero(multi_states, axis=1)\n percentage_of_all_sensors = activated_sensors / 1140\n len_wanted_seq = 100\n current = 0\n sequence = np.zeros((len_wanted_seq, self.num_sensors))\n frame_labels = []\n\n if self.aux_info:\n original_frame_idxs = np.full(len_wanted_seq, np.nan, np.int16)\n frame_labels_aux = np.full(len_wanted_seq, np.nan, np.int8)\n sample_percentages = np.full(len_wanted_seq, np.nan)\n single_state_indices = np.full(len_wanted_seq, np.nan, np.int16)\n # flowfronts = np.zeros((len_wanted_seq, self.image_size[0], self.image_size[1]))\n # _coords, flat_fillings = self.__get_filling_data(f, single_states)\n\n for i, sample in enumerate(single_states):\n state_num = int(str(sample).replace(\"state\", \"0\"))\n try:\n sample_percentage = percentage_of_all_sensors[state_num - 1]\n if sample_percentage >= current / len_wanted_seq:\n data = multi_states[state_num - 1, :]\n data = np.log(np.add(data, 1)) # TODO make log optional\n if self.sensor_indizes != ((0, 1), (0, 1)):\n rect = data.reshape(38, 30)\n sel = rect[self.sensor_indizes[0][0]::self.sensor_indizes[0][1],\n self.sensor_indizes[1][0]::self.sensor_indizes[1][1]]\n data = sel.flatten()\n sequence[current, :] = data\n\n frame_label = 0\n if state_num in set_of_states:\n frame_label = 1\n frame_labels.append(frame_label)\n\n if self.aux_info:\n original_frame_idxs[current] = state_num\n frame_labels_aux[current] = frame_label\n sample_percentages[current] = sample_percentage\n single_state_indices[current] = i\n # flowfronts[current, :, :] = create_np_image(target_shape=self.image_size,\n # norm_coords=_coords, data=flat_fillings[i])\n current += 1\n except IndexError:\n continue\n\n # determine runlevel label using frame labels and threshold\n lens_of_runs_of_dryspots = [sum(1 for _ in group) for key, group in\n groupby(np.array(frame_labels) == 1) if key]\n max_len = 0 if len(lens_of_runs_of_dryspots) == 0 else max(lens_of_runs_of_dryspots)\n label = 0 if max_len < threshold_min_counted_dryspots else 1\n\n f.close()\n meta_file.close()\n\n if self.aux_info:\n # framelabels, original_frame_idx, original_num_frames, flowfronts, filling_percentage\n aux = {\"framelabel\": frame_labels_aux,\n \"original_frame_idx\": original_frame_idxs,\n \"original_num_multi_states\": len(multi_states),\n \"percent_of_sensors_filled\": sample_percentages,\n \"single_state_indices\": single_state_indices,\n }\n return [(sequence, label, aux)]\n\n return [(sequence, label)]\n except KeyError:\n f.close()\n meta_file.close()\n return None", "def extract_onset_events(bin_path, chanList, chunk_size=4000):\n meta = readMeta(bin_path)\n sRate = SampRate(meta)\n\n n_samples = int(float(meta['fileTimeSecs']) * sRate)\n n_chunks = sp.floor(n_samples / chunk_size).astype('int32')\n print(\"leftover samples: %i\" % (n_samples % n_chunks))\n\n rawData = makeMemMapRaw(bin_path, meta)\n\n events = []\n for ch in chanList:\n inds = []\n\n # get digital data for the selected lines\n for i in tqdm(range(n_chunks)):\n start = i * chunk_size\n stop = start + chunk_size\n\n digArray = ExtractDigital(rawData, start, stop, 0, range(8), meta)\n trig_data = digArray[ch,:]\n\n ix = sp.where(sp.diff(trig_data) == 1)[0]\n inds.append(ix+start)\n # if len(ix) > 0:\n # print(len(ix))\n\n inds = sp.concatenate(inds)\n times = inds / sRate\n events.append([inds,times])\n\n return events", "def connected_sets(self):\n self._assert_counted_at_lag()\n return self._connected_sets", "def find_arms(path,fr_nb):\n im=open_frame(path,fr_nb)\n img=im.copy()\n im=img_as_ubyte(im)\n mask_h = hysteresis_thresholding(img,6,10)\n \n ksize=5\n kernel = np.ones((ksize,ksize),dtype = np.uint8)\n kernel = skimage.morphology.disk(ksize)\n \n mask = cv2.morphologyEx(mask_h, cv2.MORPH_OPEN, kernel,iterations=2)\n \n arms = mask_h-mask\n \"\"\"\n lab,_ = ndi.label(diff)\n \n arms = skimage.morphology.remove_small_objects(lab,60)\"\"\" #Only temporary, to track only the biggest\n return mask,arms", "def findSubsetIndices(grdMODEL, min_lat, max_lat, min_lon, max_lon):\n\n\n if min_lon<0 and max_lon>0:\n splitExtract = True; Turns=2\n grdMODEL.splitExtract=splitExtract\n else:\n splitExtract = False; Turns=1\n grdMODEL.splitExtract=splitExtract\n grdMODEL.lon = np.where(grdMODEL.lon>180,grdMODEL.lon-360,grdMODEL.lon)\n \n # Array to store the results returned from the function\n res=np.zeros((Turns,4),dtype=np.float64)\n \n lats=grdMODEL.lat[:,0]\n lons=grdMODEL.lon[0,:]\n\n \n for k in range(Turns):\n\n if k==0 and splitExtract == True:\n minLon=min_lon; maxLon=0\n minLon=minLon+360\n maxLon=maxLon+360\n elif k==1 and splitExtract == True:\n minLon=0; maxLon=max_lon\n else:\n minLon=min_lon; maxLon=max_lon\n \n distances1 = []\n distances2 = []\n indices=[]\n index=1\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n \n distances1 = []\n distances2 = []\n index=1\n \n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n \n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n \n res[k,0]=minI; res[k,1]=maxI; res[k,2]=minJ; res[k,3]=maxJ;\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n grdMODEL.indices=res", "def finalSubsets(self):\n subs = self.allSubsets()\n for s in self.graph.observed:\n subs = subs[subs[:,s] == 1,] # remove subsets where values in s are not True\n return subs", "def isophase(_groups, colour, period):\n # Whole numbers are required, so odd numbers are dealt with by loading\n # the spare into the off period.\n # As this is in milliseconds, this will be imperceptible.\n # It is also unlikely, as the top-level input is in seconds\n # and has been multiplied up to milliseconds before reaching this\n # function\n return [\n (colour, math.floor(period/2)),\n ('Off', math.ceil(period/2))\n ]", "def get_masks(data):\n return [patient[0] for i, patient in enumerate(data) if i in good_patients]", "def lego_sets():\n # you must replace this line and return your own list\n return lego_sets_list", "def find_rings(atom_list): \n CX_list = [atom0 for atom0 in atom_list if ((atom0.atom_name == \"CX\") or (atom0.atom_name == \"CY\"))]\n atom_dict = {}\n for atom0 in CX_list:\n if (len(identify_bonds(atom0, atom_list)) >= 2):\n atom_dict[atom0] = {}\n for atom1 in identify_bonds(atom0, atom_list):\n if ( ((atom1[0].atom_name == \"CX\") or (atom1[0].atom_name == \"CY\")) and (len(identify_bonds(atom1[0], atom_list)) >= 2) ):\n atom_dict[atom0][atom1[0]] = {}\n for atom2 in identify_bonds(atom1[0], atom_list):\n if ( ((atom2[0].atom_name == \"CX\") or (atom2[0].atom_name == \"CY\")) and (atom2[0] != atom0) and (len(identify_bonds(atom2[0], atom_list)) >= 2)):\n atom_dict[atom0][atom1[0]][atom2[0]] = {}\n for atom3 in identify_bonds(atom2[0], atom_list):\n if ( ((atom3[0].atom_name == \"CX\") or (atom3[0].atom_name == \"CY\")) and (atom3[0] != atom0) and (len(identify_bonds(atom3[0], atom_list)) >= 2)):\n atom_dict[atom0][atom1[0]][atom2[0]][atom3[0]] = [atom3[0].atom_number]\n rings = []\n for key in atom_dict.keys():\n for key2 in atom_dict[key].keys():\n for key3 in atom_dict[key][key2].keys():\n for key4 in atom_dict[key][key2][key3].keys():\n rings.append([key, key2, key3, key4])\n finite_rings = []\n for element in rings:\n for element2 in rings:\n if ((element[0] == element2[0]) and (element[3] == element2[3]) and (element[1] != element2[1]) and (element[1] != element2[2]) and (element[2] != element2[1]) and (element[2] != element2[2]) and (element[0] != element2[1] != element[3]) and (element[0] != element2[2] != element[3])):\n check = True\n for el in finite_rings:\n if ((element[0] in el) and (element[1] in el) and (element[2] in el) and (element[3] in el) and (element2[0] in el) and (element2[1] in el) and (element2[2] in el) and (element2[3] in el)):\n check = False\n if (check == True):\n finite_rings.append([element[0], element[1], element[2], element[3], element2[1], element2[2]])\n return finite_rings", "def do_one_set_sims(num_sims=100, num_darts=1000):\n areas = N.zeros(num_sims, dtype='f')\n for isim in range(num_sims):\n areas[isim] = do_one_sim(num_darts)\n return (N.mean(areas), N.std(areas))", "def get_effective_lumi(array_from_googledoc,_era,_skoutput ,data_skoutput, _skdatadir,_dirlist,_summary_path,skim_list, _workdir, RunFull):\n\n print_message(1,\"get_effective_lumi [\"+_era+\"]\")\n\n \"\"\" loop over dataset list on tamsa\"\"\"\n var_url = get_url_from_era(_era,False)\n\n arr_alias=[]\n arr_alias_torun=[]\n array_gd = array_from_googledoc\n\n for dsn in _dirlist:\n \n array_gd = array_from_googledoc\n\n\n var_alias = find_googledoc_var_from_dsn(array_gd,_era,\"alias\", dsn)\n\n if var_alias == \"NULL\" :\n print \"Skipping NULL [get_effective_lumi] \" +dsn\n continue\n\n if not os.path.exists(data_skoutput+\"/\"+ var_alias +\".txt\"):\n print \"get_effective_lumi: adding \" + var_alias + \" to processing list\"\n arr_alias_torun.append(var_alias)\n\n \n arr_alias.append(var_alias)\n if os.path.exists( _workdir+\"/MC\"+_era+\".txt\"):\n os.system(\"rm \" + _workdir + \"/MC\"+_era+\".txt\")\n\n w_list=open(_workdir+\"/MC\"+_era+\".txt\",\"w\")\n\n for x in arr_alias_torun:\n w_list.write(x.split()[0]+\"\\n\")\n w_list.close()\n \n return_list=[]\n\n if len(arr_alias_torun) > 0:\n currentdir = os.getenv(\"PWD\")\n print \"SKFlat.py -a GetEffLumi -l \"+currentdir+\"/\"+_workdir+\"/MC\"+_era+\".txt -n 50 --nmax 300 -e \"+_era\n\n os.chdir(os.getenv(\"SKFlat_WD\"))\n os.system(\"SKFlat.py -a GetEffLumi -l \"+currentdir+\"/\"+_workdir+\"/MC\"+_era+\".txt -n 50 --nmax 300 -e \"+_era )\n for x in arr_alias_torun:\n print \"SKFlat.py -a GetEffLumi -i \"+x.split()[0] +\" -n 50 --nmax 300 -e \"+_era \n #print ('cp ' + _skoutput + \"/GetEffLumi/\"+_era + \"/GetEffLumi_\"+ x.split()[0] +\".root \" + data_skoutput+\"/\")\n #os.system('cp ' + _skoutput + \"/GetEffLumi/\"+_era + \"/GetEffLumi_\"+ x.split()[0] +\".root \" + data_skoutput+\"/\")\n\n #GetEFf=False\n #while not GetEFf:\n # l_userinput= raw_input ('Check if Eff lumi job is ok? [y/ MCname]:') \n # if l_userinput == \"y\" : \n # print \"Good\"\n # GetEFf=True\n # else:\n # os.system(\"SKFlat.py -a GetEffLumi -i \"+l_userinput+\" -n 50 --nmax 300 -e \"+_era )\n \n\n for skim in skim_list:\n new_list=open(currentdir+\"/\"+_workdir+\"/MC\"+_era+\".txt\",\"r\")\n new_skimlist=open(currentdir+\"/\"+_workdir+\"/MC_\"+skim+\"_\"+_era+\".txt\",\"w\")\n runSkim=False\n for l in new_list:\n l = l.split()[0]\n #allowed_inputs=['y','n']\n #l_userinput ='NULL'\n #while not l_userinput in allowed_inputs:\n # l_userinput= raw_input ('Sample to update ['+l+']: make skim ' + skim + ' [y/n]:')\n\n \n if run_skim_from_googledoc(_era,l , skim,var_url) == \"Y\": #l_userinput == \"y\":\n print 'Sample to update ['+l+']: make skim ' + skim\n new_skimlist.write(l+'\\n')\n runSkim=True\n return_list.append(find_googledoc_var_from_alias(_era, \"dsn\", l.split()[0],var_url))\n\n new_list.close()\n new_skimlist.close()\n if runSkim:\n os.system(\"SKFlat.py -a \"+skim+\" -l \"+currentdir+\"/\"+_workdir+\"/MC_\"+skim+\"_\"+_era+\".txt -n 100 --nmax 300 -e \"+_era )\n \n \n os.system(\"rm \"+currentdir+\"/\"+_workdir+\"/MC_\"+skim+\"_\"+_era+\".txt\")\n os.chdir(currentdir)\n \n else:\n print \"get_effective_lumi: all samples proccessed previously\"\n\n ''' delete job submittion file '''\n os.system(\"rm \"+ _workdir+ \"/MC\"+_era+\".txt\") \n\n \n ''' run over ds list at tamsa and fill common samplefile'''\n\n print ('Fill CommonSampleFiles')\n\n update_array=[]\n\n for dsn in _dirlist:\n ''' access alias and xsec fmor google doc'''\n\n array_gd = array_from_googledoc\n\n var_alias = find_googledoc_var_from_dsn(array_gd,_era,\"alias\", dsn)\n if var_alias == \"NULL\" :\n continue\n\n\n if not RunFull:\n if not var_alias in arr_alias_torun:\n print (\"skipping \" + var_alias + \" since not running Full mode\")\n continue\n \n else:\n print 'Filling for ' + dsn\n\n var_xsec = find_googledoc_var_from_dsn(array_gd,_era,\"xsec\" , dsn)\n\n #''' get nevents from GetEffLumi job'''\n\n dirpath = _skoutput + \"/GetEffLumi/\"+_era + \"/\"\n _file = ROOT.TFile(dirpath + \"/GetEffLumi_\"+ var_alias + \".root\")\n hist = _file.Get(\"sumW\")\n nevents_w = hist.Integral()\n signhist = _file.Get(\"sumSign\")\n nevents_sign = signhist.Integral()\n _file.Close()\n nevents_no_w=0\n \n orig_xsec=\"\"\n orig_nevent_no_w=\"\"\n orig_nevent_sign=\"\"\n orig_nevent_w=\"\"\n\n \n print \"Reading : \" + dsn\n print \"Reading \" + _skdatadir + _era+ \"/Sample/CommonSampleInfo/\"+var_alias+\".txt\"\n\n orig_common_list = open(_skdatadir + _era+ \"/Sample/CommonSampleInfo/\"+var_alias+\".txt\",\"r\")\n\n for line in orig_common_list:\n if not \"#\" in line:\n if len(line.split()) < 1:\n continue\n orig_xsec=line.split()[2]\n orig_nevent_no_w=line.split()[3]\n orig_nevent_sign=line.split()[4]\n orig_nevent_w=line.split()[5]\n orig_common_list.close()\n\n print ('Filled for original values')\n update_file=False\n\n if not orig_xsec == var_xsec:\n update_file=True\n print \"CommonSampleInfo xsec updated for \" + var_alias\n\n #if not orig_nevent_no_w==str(nevents_no_w):\n #update_file=True\n #print \"CommonSampleInfo xsec updated for nevents_no_w \" + str(nevents_no_w)\n if not float(orig_nevent_w)==nevents_w:\n update_file=True\n nevents_no_w=nevents(_skdatadir + _era+ \"/Sample/ForSNU/\"+var_alias + \".txt\")\n print \"CommonSampleInfo updated for nevents_w \" + str(nevents_w)\n\n elif not float(orig_nevent_sign)==nevents_sign:\n update_file=True\n nevents_no_w=nevents(_skdatadir + _era+ \"/Sample/ForSNU/\"+var_alias + \".txt\")\n print \"CommonSampleInfo updated for nevents_sign \" + str(nevents_sign)\n else: \n nevents_no_w=orig_nevent_no_w\n\n if update_file:\n ''' make commonfile for alias'''\n common_list=open(_skdatadir + _era+ \"/Sample/CommonSampleInfo/\"+var_alias+\".txt\",\"w\")\n common_list.write(\"# alias PD xsec nmc sumsign sumw\\n\")\n common_list.write( var_alias + \"\\t\" + dsn + \"\\t\" + var_xsec + \"\\t\" + str(nevents_no_w) +\"\\t\"+ str(nevents_sign) +\"\\t\"+ str(nevents_w)+\" \\n\") \n\n\n common_list.close()\n os.system(\"git diff \" + _skdatadir + _era+ \"/Sample/CommonSampleInfo/\"+var_alias+\".txt\")\n\n update_array.append([var_alias,var_xsec, nevents_no_w, nevents_sign , nevents_w])\n\n\n if len(update_array) > 0:\n update_summarymc_file(_era)\n\n return return_list", "def find_tps(ckt, a):\n return set([x for x in a if ckt[x].fots.isdisjoint(set(a))])", "def find_endpoints(batch_trajectories):\n # empty lists to fill\n site_lats = []\n site_lons = []\n last_lats = []\n last_lons = []\n lats_150 = []\n lons_150 = [] \n last_times = []\n times_150 = []\n last_sst = []\n sst_150 = []\n \n # temporary lists as placeholders\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n for speed in range(len(batch_trajectories)):\n # working with one speed at a time means working with one nc file at\n # a time\n \n # reset temporary lists\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n # extract variables into lists\n lats = batch_trajectories[speed].variables['lat'][:]\n lons = batch_trajectories[speed].variables['lon'][:]\n lats150 = batch_trajectories[speed].variables['lat150'][:]\n lons150 = batch_trajectories[speed].variables['lon150'][:]\n times = batch_trajectories[speed].variables['time'][:]\n ssts = batch_trajectories[speed].variables['temp'][:]\n ssts_150 = batch_trajectories[speed].variables['temp150'][:]\n\n # if a particle is deleted before time is up, values are masked. \n # We'd like to get the last valid number.\n for trajectory in range(len(lats)):\n i = -1 # index for the last value\n while np.ma.is_masked(lats[trajectory][i]) is True:\n i -= 1 # if the value is masked, go to one value sooner\n \n j = i # use j for the 150m values\n while lats150[trajectory][j] > 0:\n # we want the first index where the latitude is recorded.\n # j is actually the last one where it's not recorded, so we\n # extract the information at index j+1\n j -= 1\n\n # once i and j are determined for a trajectory, we can extract the\n # variables and append them to temporary lists.\n temp_site_lats.append(lats[trajectory][0])\n temp_site_lons.append(lons[trajectory][0])\n temp_lats.append(lats[trajectory][i])\n temp_lons.append(lons[trajectory][i])\n temp_lats150.append(lats150[trajectory][j+1])\n temp_lons150.append(lons150[trajectory][j+1])\n temp_times.append(times[trajectory][i])\n temp_sst.append(ssts[trajectory][i])\n temp_sst150.append(ssts_150[trajectory][j+1])\n temp_times150.append(times[trajectory][j+1])\n \n # after the temporary lists are appended by sinking speed, they\n # are appended to the big lists that are returned by the function.\n # this keeps the structure of being separated by sinking speed.\n site_lats.append(temp_site_lats)\n site_lons.append(temp_site_lons)\n last_lats.append(temp_lats)\n last_lons.append(temp_lons)\n lats_150.append(temp_lats150)\n lons_150.append(temp_lons150)\n last_times.append(temp_times)\n times_150.append(temp_times150)\n last_sst.append(temp_sst)\n sst_150.append(temp_sst150)\n \n return site_lats, site_lons, last_lats, last_lons, lats_150, lons_150,\\\n last_times, times_150, last_sst, sst_150", "def get_5index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==0]", "def selectOfSample(self, indexes):\n index_set = set()\n for idx in indexes:\n i = list(self.sample[self.sample['masked'] == False].index)[idx]\n index_set.add(i)\n for ind in list(self.sample[self.sample['masked'] == False].index):\n if ind not in index_set:\n self.sample.at[ind, 'masked'] = True\n return index_set", "def mask_incoherent(self):\n self.MaskPrefix = 'i' + self.MaskPrefix\n print('Masking pixel values where .msk value is less than {0}...'.format(threshold))\n for ig in self.Set:\n igram = self.load_ma(ig)\n mskFile = ig.Path[:-3] + 'msk'\n coherence = roipy.tools.load_half(ig, 2, mskFile)\n incoherent = ma.masked_less(coherence, self.Cothresh)\n igram[incoherent.mask] = ma.masked\n mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]\n np.save(os.path.join(self.ProcDir, mskFile), igram.mask)\n print(mskFile)\n\n print('Done')", "def masterFlat(flat_list, master_dark_fname, normalize = 'median', local_sig_bad_pix = 3, \\\n global_sig_bad_pix = 9, local_box_size = 11, hotp_map_fname = None, verbose=False,\n output_dir = None,min_flux=1000):\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n\n if verbose:\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open all files into a 3D array\n #foo = np.empty((dark_shape[0],dark_shape[1],len(flat_list)))\n foo = []\n\n #Open first flat file to check exposure time and filter\n first_flat_hdu = f.open(flat_list[0])\n flat_exp_time = first_flat_hdu[0].header['EXPTIME']\n\n\n\n if dark_exp_time != flat_exp_time:\n print(\"The master dark file doesn't have the same exposure time as the flats. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = flat_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #We've already read it, so we'll stick it in foo\n\n print(\"Combining flat files\")\n for i in range(0,len(flat_list)):\n try: \n #subtract dark for each file, then normalize by mode\n hdu = f.open(flat_list[i],ignore_missing_end=True)\n d_sub = hdu[0].data - factor*master_dark\n if np.nanmedian(d_sub) < min_flux:\n #print(\"Skipping file {}, because its flux is lower than {}\".format(flat_list[i],min_flux))\n continue\n #normalize\n if normalize == 'mode':\n d_sub = d_sub/mode(d_sub, axis = None, nan_policy = 'omit')\n elif normalize == 'median':\n d_sub = d_sub/np.nanmedian(d_sub)\n #foo[:,:,i] = d_sub\n foo.append(d_sub)\n except:\n print(\"Some error. Skipping file {}\".format(i)) \n #Median combine frames\n flat = np.median(foo, axis = 0)\n\n #Filter bad pixels\n #bad_px = sigma_clip(flat, sigma = sig_bad_pix) #old and bad\n ###Major update here: do sigma clipping on the pix-to-pix flat with the large scale vignette removed\n ###Also add local sigma clipping\n def stddevFilter(img, box_size):\n \"\"\" from\n https://stackoverflow.com/questions/28931265/calculating-variance-of-an-image-python-efficiently/36266187#36266187\n This function compute the standard deviation of an image in a\n moving box of a given size. The pixel i,j of the output is the\n standard deviation of the pixel value in the box_size x box_size box\n around the i,j pixel in the original image.\n \"\"\"\n wmean, wsqrmean = (cv2.boxFilter(x, -1, (box_size, box_size), \\\n borderType=cv2.BORDER_REFLECT) for x in (img, img*img))\n return np.sqrt(wsqrmean - wmean*wmean)\n\n #median flat\n median_flat = median_filter(flat, local_box_size) #arbitrary size, shouldn't matter as long as it's big enough\n #standard deviation image\n stddev_im = stddevFilter(flat, local_box_size)\n\n #Local clipping\n local_bad_pix = np.abs(median_flat - flat) > local_sig_bad_pix*stddev_im\n\n #Global clipping here to reject awful pixels and dust, bad columns, etc\n pix_to_pix = flat/median_flat\n global_bad_px = sigma_clip(pix_to_pix, sigma = global_sig_bad_pix).mask #9 seems to work best\n\n #also set all 0 and negative pixels in flat as bad\n non_positive = flat <= 0\n\n #logic combine\n bad_px = np.logical_or(global_bad_px, local_bad_pix)\n\n #also add non_positive pixels\n bad_px = np.logical_or(bad_px, non_positive)\n\n #Normalize good pixel values\n if normalize == 'median':\n norm_flat = flat/np.nanmedian(flat[~bad_px])\n elif normalize == 'mode':\n norm_flat = flat/mode(flat, axis = None, nan_policy = 'omit')\n #Stick it back in the last hdu\n hdu[0].data = norm_flat\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created master flat by median combining the following:\"\n for i in range(len(flat_list)):\n hdu[0].header['HISTORY'] = flat_list[i]\n hdu[0].header['HISTORY'] = \"Normalized to the median of the master flat\"\n hdu[0].header['HISTORY'] = \"Performed bad pixel local and global sigma clipping with {}, {}sigmas\".format(local_sig_bad_pix, global_sig_bad_pix)\n hdu[0].header['HISTORY'] = \"############################\"\n\n #Parse the last fileanme\n if output_dir is not None:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n flat_outname = flat_outname.rsplit('/',1)[-1]\n flat_outname = output_dir+flat_outname\n else:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n\n #Write the fits file\n if verbose:\n print((\"Writing master flat to {}\".format(flat_outname)))\n hdu.writeto(flat_outname, overwrite=True)\n\n #If there's already a hot pixel map then we'll add to it.\n if hotp_map_fname != None:\n #read in the existing bp map\n #hdu = f.open(hotp_map_fname)\n #hdu[0].data += np.array(bad_px.mask, dtype=float)\n #hdu[0].data = np.logical_or(hdu[0].data.astype(bool), bad_px) #use logical or to combine bad pixel maps\n #bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n print(\"Will deal with hot pixel map from dark frames in the calibrate function\")\n\n #else:\n #Parse the last fileanme\n if output_dir is not None:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n bp_outname = bp_outname.rsplit('/',1)[-1]\n bp_outname = output_dir+bp_outname\n else:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n ##### Now write the bad pixel map\n hdu[0].data = bad_px.astype(int)#np.array(bad_px.mask, dtype=float)\n #Parse the last fileanme\n # bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n #Add history keywords\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created bad pixel map by sigma clipping on pixel-to-pixel flat{}\".format(flat_outname)\n hdu[0].header['HISTORY'] = \"Bad pixel cutoffs: local sigma = {} and global sigma = {} for clipping\".format(local_sig_bad_pix, global_sig_bad_pix)\n #hdu[0].header['HISTORY'] = \"Bad pixel cutoff of {}sigma\".format(sig_bad_pix)\n hdu[0].header['HISTORY'] = \"A pixel value of 1 indicates a bad pixel\"\n hdu[0].header['HISTORY'] = \"############################\"\n\n if verbose:\n print((\"Writing bad pixel map to {}\".format(bp_outname)))\n #Write the fits file\n hdu.writeto(bp_outname, overwrite=True)\n\n return flat_outname, bp_outname", "def srlatch(_signal,reset_th, set_th):\n s = [_signal > set_th][0]\n r = [_signal < reset_th][0]\n L = len(_signal)\n q = np.zeros(L)\n # qrev = np.zeros(L)\n for i in np.arange(L):\n if ((s[i] == 0) & (r[i] == 0) & (i > 0)):\n q[i] = q[i - 1]\n elif ((s[i] == 0) & (r[i] == 1)):\n q[i] = 0\n elif ((s[i] == 1) & (r[i] == 0)):\n q[i] = 1\n else:\n q[i] == -1 # error\n return np.array(q)", "def get_nrof_lights(self):\n lights = 0\n for l in self.light_array:\n if l:\n lights += 1\n return lights", "def get_muons(limit, selected_run, run_range_low, run_range_high, gold, atm):\n conn = engine_nl.connect()\n\n runs = []\n\n if not selected_run and not run_range_high:\n current_run = get_latest_run()\n run_range = current_run - limit\n # This missed muon array can be very long, so just select on its length\n if not atm:\n result = conn.execute(\"SELECT DISTINCT ON (a.run) a.run, a.gtids, a.days, a.secs, a.nsecs, \"\n \"array_length(b.gtids, 1), c.gtids FROM muons AS a LEFT JOIN missed_muons \"\n \"AS b ON a.run=b.run LEFT JOIN atmospherics AS c ON a.run=c.run \"\n \"WHERE a.run >= %s ORDER BY a.run DESC, a.timestamp DESC, b.timestamp DESC, \" \n \"c.timestamp DESC\", (run_range,))\n else:\n result = conn.execute(\"SELECT DISTINCT ON (a.run) a.run, b.gtids, b.days, b.secs, b.nsecs, \"\n \"array_length(c.gtids, 1), a.gtids FROM atmospherics \"\n \"AS a INNER JOIN muons AS b ON a.run=b.run INNER JOIN \"\n \"missed_muons AS c ON a.run=c.run WHERE array_length(a.gtids, 1) > 0 \"\n \"AND a.run >= %s ORDER BY a.run DESC, a.timestamp DESC, b.timestamp DESC, \"\n \"c.timestamp DESC\", (run_range,))\n status = conn.execute(\"SELECT DISTINCT ON (run) run, muon_time_in_range, missed_muon_time_in_range, \"\n \"atmospheric_time_in_range FROM time_check WHERE run >= %s \"\n \"ORDER BY run DESC, timestamp DESC\", (run_range,))\n elif run_range_high:\n if not atm:\n result = conn.execute(\"SELECT DISTINCT ON (a.run) a.run, a.gtids, a.days, a.secs, a.nsecs, \"\n \"array_length(b.gtids, 1), c.gtids FROM muons AS a LEFT JOIN missed_muons \"\n \"AS b ON a.run=b.run LEFT JOIN atmospherics AS c ON a.run=c.run \"\n \"WHERE a.run >= %s AND a.run <= %s ORDER BY a.run DESC, a.timestamp DESC, \"\n \"b.timestamp DESC, c.timestamp DESC\", (run_range_low, run_range_high))\n else:\n result = conn.execute(\"SELECT DISTINCT ON (a.run) a.run, b.gtids, b.days, b.secs, b.nsecs, \"\n \"array_length(c.gtids, 1), a.gtids FROM atmospherics \"\n \"AS a INNER JOIN muons AS b ON a.run=b.run INNER JOIN \"\n \"missed_muons AS c ON a.run=c.run WHERE array_length(a.gtids, 1) > 0 \"\n \"AND a.run >= %s AND a.run <= %s ORDER BY a.run DESC, a.timestamp DESC, \"\n \"b.timestamp DESC, c.timestamp DESC\", (run_range_low, run_range_high))\n status = conn.execute(\"SELECT DISTINCT ON (run) run, muon_time_in_range, missed_muon_time_in_range, \"\n \"atmospheric_time_in_range FROM time_check WHERE run >= %s AND \"\n \"run <= %s ORDER BY run DESC, timestamp DESC\", (run_range_low, run_range_high)) \n else:\n result = conn.execute(\"SELECT DISTINCT ON (a.run) a.run, a.gtids, a.days, a.secs, a.nsecs, \"\n \"array_length(b.gtids, 1), c.gtids FROM muons AS a LEFT JOIN missed_muons \"\n \"AS b ON a.run=b.run LEFT JOIN atmospherics AS c ON a.run=c.run \"\n \"WHERE a.run = %s ORDER BY a.run, a.timestamp DESC, b.timestamp DESC, \" \n \"c.timestamp DESC\", (selected_run,))\n status = conn.execute(\"SELECT DISTINCT ON (run) run, muon_time_in_range, missed_muon_time_in_range, \"\n \"atmospheric_time_in_range FROM time_check WHERE run = %s ORDER BY run, \" \n \"timestamp DESC\", (selected_run,)) \n\n rows = result.fetchall()\n\n muon_count = {}\n mmuon_count = {}\n atm_count = {}\n livetime_lost = {} \n fake = {}\n check_time = {}\n\n for run, agtids, adays, asecs, ansecs, bgtids, cgtids in rows:\n\n # Check if the run is on the gold list\n if gold != 0 and run not in gold:\n continue\n\n runs.append(run)\n check_time[run] = \"-\"\n\n if agtids is None:\n continue\n\n # Check if we inserted a fake muon\n if len(agtids) > 0 and agtids[0] == -1:\n fake[run] = 1\n muon_count[run] = len(agtids)\n # array_length returns None for size zero arrays\n if bgtids is None:\n mmuon_count[run] = 0\n else:\n mmuon_count[run] = bgtids\n # Calculate livetime lost to muons, which are the dominant source\n livetime_lost[run] = calculate_livetime_lost(adays, asecs, ansecs)\n # Lots of unprocessed runs, so check to make sure \n if cgtids is not None:\n atm_count[run] = len(cgtids)\n continue\n atm_count[run] = \"Not Processed\"\n\n check = status.fetchall()\n for run, muon_status, mm_status, atm_status in check:\n check_time[run] = (muon_status and mm_status and atm_status)\n\n return runs, muon_count, mmuon_count, atm_count, livetime_lost, fake, check_time", "def get_idx_set(i, sets):\n idxs = []\n for j, set_j in enumerate(sets):\n if i in set_j: idxs.append(j)\n return idxs", "def plateau_finder(data, tol=0.0003):\n from scipy.ndimage.filters import generic_filter\n tol = 0.0003\n filt_data = generic_filter(data, np.std, size=3)\n plat_index = np.where(filt_data < (np.min(filt_data) + tol))[0]\n\n plateaus = group_consecutive(plat_index)\n\n return plateaus", "def get_zones(array,kind,relevant=False,threshold=3):\n\n\tresulting_set=[]\n\n\ti=0\n\tif array[i]==kind:\n\t\tcount=1\n\telse:\n\t\tcount=0\n\n\twhile i<len(array):\n\t\t\n\t\tif array[i]==kind:\n\t\t\tcount+=1\n\t\telif array[i]!=kind and array[i-1]==kind:\n\t\t\tresulting_set.append(([kind]*count,i-count))\n\t\t\tcount=0\n\t\telse:\n\t\t\tpass\n\n\t\ti+=1\n\n\tif count>0:\n\t\tresulting_set.append(([kind]*count, i-count))\n\n\tif relevant == False:\n\t\treturn resulting_set\n\telse:\n\t\treturn [item for item in resulting_set if len(item[0])>threshold]", "def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):\n if(len(trackers)==0):\n return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)\n iou_matrix = iou_batch(detections, trackers)\n\n if min(iou_matrix.shape) > 0:\n a = (iou_matrix > iou_threshold).astype(np.int32)\n if a.sum(1).max() == 1 and a.sum(0).max() == 1:\n matched_indices = np.stack(np.where(a), axis=1)\n else:\n matched_indices = linear_assignment(-iou_matrix)\n else:\n matched_indices = np.empty(shape=(0,2))\n\n unmatched_detections = []\n for d, det in enumerate(detections):\n if(d not in matched_indices[:,0]):\n unmatched_detections.append(d)\n unmatched_trackers = []\n for t, trk in enumerate(trackers):\n if(t not in matched_indices[:,1]):\n unmatched_trackers.append(t)\n\n #filter out matched with low IOU\n matches = []\n for m in matched_indices:\n if(iou_matrix[m[0], m[1]]<iou_threshold):\n unmatched_detections.append(m[0])\n unmatched_trackers.append(m[1])\n else:\n matches.append(m.reshape(1,2))\n if(len(matches)==0):\n matches = np.empty((0,2),dtype=int)\n else:\n matches = np.concatenate(matches,axis=0)\n\n return matches, np.array(unmatched_detections), np.array(unmatched_trackers)", "def onsets_b(self) -> Optional[annotations.BeatData]:\n return load_onsets(self.onsets_b_path)", "def loopreelset2(combination, reel_set):\n matching_reels = []\n for index in range(5):\n if if2symbols(combination[index][0],combination[index][1], reel_set[index]):\n matching_reels.append(index)\n if len(matching_reels) == 5:\n return True", "def onsets_d(self) -> Optional[annotations.BeatData]:\n return load_onsets(self.onsets_d_path)", "def isolate_strokes(self):\n if self.onset_times is False:\n self.find_onsets()\n # Defining the frame to contain the strokes\n frame_sz = int(self.stroke_length*self.sampling_rate)\n self.strokes = np.array(\n [self.audio[i:i+frame_sz] for i in self.onset_samples])", "def do_dtv_flagging2(data, freqs):\n \n mask = data.mask*1\n dtv_times = []\n \n for ledge in (54, 60, 66, 76, 82):\n uedge = ledge + 6\n band = np.where( (freqs>=ledge) & (freqs<=uedge) )[0]\n trns = np.where( (freqs>=ledge+0.25) & (freqs<=uedge-0.25) )[0]\n empt = np.where( ((freqs>=ledge-0.25) & (freqs<ledge+0.25)) | ((freqs>uedge-0.25) & (freqs<=uedge+0.25)) )[0]\n \n pB = np.mean(data.data[:,band], axis=1)\n pT = np.mean(data.data[:,trns], axis=1)\n pE = np.mean(data.data[:,empt], axis=1)\n \n #import pylab\n #pylab.plot(pB-pE)\n #pylab.plot(pT-pE)\n #pylab.plot(pE-1)\n #pylab.plot(pE*0 + 3*pE.std())\n #pylab.show()\n \n st = np.std(pE)\n bad = np.where( np.abs(pT-pE) > 3*st )[0]\n for b in bad:\n\t dtv_times.append(b)\n mask[b,band] |= True\n if b > 1:\n mask[b-1,band] |= True\n\t\t dtv_times.append(b-1)\n if b < data.shape[0]-2:\n mask[b+1,band] |= True\n\t dtv_times.append(b+1)\n \n dtv_times = sorted(dtv_times)\n\n data.mask = mask*1\n return data.mask, list(set(dtv_times))", "def block(array):\r\n grid = []\r\n for z in range(0,7,3): #0,3,6\r\n #vertical down 3\r\n for n in range(0,7,3): #0,3,6\r\n #horiz across 3\r\n line = []\r\n for i in range(3):\r\n for j in range(3):\r\n vert,hor = i+z,j+n\r\n line.append(array[vert][hor])\r\n grid.append(line)\r\n won = True\r\n for i in range(len(grid)):\r\n if won == True:\r\n if len(grid[i]) != len(set(grid[i])):\r\n won = False\r\n else:\r\n pass\r\n else:\r\n break\r\n return won", "def get_light_sky(filenames, onoff=True):\n return filter_filenames(filenames, [\"light-off\"], onoff)", "def power_set(sett):\n\n powerset_so_far = {frozenset()}\n\n for element in sett:\n set.update(powerset_so_far,\\\n extend_all(element, powerset_so_far))\n \n return powerset_so_far", "def gen_all_holds(hand):\n\n mask = sorted(gen_all_sequences((1,0), len(hand)))\n answer_set = []\n for current_mask in mask:\n temp = []\n for indx in range(len(current_mask)):\n if current_mask[indx] == 1:\n temp.append(hand[indx]);\n answer_set.append(tuple(temp))\n return set(answer_set)", "def _find_trial_sounds_regions_in_sync(sync_file):\n bit = 'SOUND'\n return _find_bit_in_sync(sync_file, bit, ['up', 'down'])", "def find_dark_states(excited_state, ground_states):", "def analyze(self, event):\n electrons = self.inputCollection(event)\n muons = Collection(event, \"Muon\")\n triggerObjects = self.triggerObjectCollection(event)\n\n selectedElectrons = []\n unselectedElectrons = []\n \n weight_reco_nominal = 1.\n weight_reco_up = 1.\n weight_reco_down = 1.\n\n weight_id_nominal = 1.\n weight_id_up = 1.\n weight_id_down = 1.\n\n for electron in electrons:\n # https://twiki.cern.ch/twiki/bin/view/CMS/CutBasedElectronIdentificationRun2\n if electron.pt>self.electronMinPt \\\n and math.fabs(electron.eta)<self.electronMaxEta \\\n and self.electronID(electron)\\\n and self.triggerMatched(electron, triggerObjects):\n\n dxy = math.fabs(electron.dxy)\n dz = math.fabs(electron.dz)\n \n if math.fabs(electron.eta) < 1.479 and (dxy>0.05 or dz>0.10):\n unselectedElectrons.append(electron)\n continue\n elif dxy>0.10 or dz>0.20:\n unselectedElectrons.append(electron)\n continue\n\n #reject electron if close-by muon\n if len(muons)>0:\n mindr = min(map(lambda muon: deltaR(muon, electron), muons))\n if mindr < 0.05:\n unselectedElectrons.append(electron)\n continue\n\n selectedElectrons.append(electron)\n \n #TODO: electron reco/ID SFs\n \n \n else:\n unselectedElectrons.append(electron)\n\n \n if not Module.globalOptions[\"isData\"] and self.storeWeights:\n \n self.out.fillBranch(self.outputName+\"_weight_reco_nominal\", weight_reco_nominal)\n self.out.fillBranch(self.outputName+\"_weight_reco_up\", weight_reco_up)\n self.out.fillBranch(self.outputName+\"_weight_reco_down\", weight_reco_down)\n\n self.out.fillBranch(self.outputName+\"_weight_id_nominal\",weight_id_nominal)\n self.out.fillBranch(self.outputName+\"_weight_id_up\",weight_id_up)\n self.out.fillBranch(self.outputName+\"_weight_id_down\",weight_id_down)\n\n self.out.fillBranch(\"n\"+self.outputName,len(selectedElectrons))\n\n for variable in self.storeKinematics:\n self.out.fillBranch(self.outputName+\"_\"+variable,map(lambda electron: getattr(electron,variable), selectedElectrons))\n\n setattr(event,self.outputName,selectedElectrons)\n setattr(event,self.outputName+\"_unselected\",unselectedElectrons)\n\n return True", "def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):\n if(len(trackers)==0):\n return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)\n\n iou_matrix = iou_batch(detections, trackers)\n\n if min(iou_matrix.shape) > 0:\n a = (iou_matrix > iou_threshold).astype(np.int32)\n if a.sum(1).max() == 1 and a.sum(0).max() == 1:\n matched_indices = np.stack(np.where(a), axis=1)\n else:\n matched_indices = linear_assignment(-iou_matrix)\n else:\n matched_indices = np.empty(shape=(0,2))\n\n unmatched_detections = []\n for d, det in enumerate(detections):\n if(d not in matched_indices[:,0]):\n unmatched_detections.append(d)\n unmatched_trackers = []\n for t, trk in enumerate(trackers):\n if(t not in matched_indices[:,1]):\n unmatched_trackers.append(t)\n\n #filter out matched with low IOU\n matches = []\n for m in matched_indices:\n if(iou_matrix[m[0], m[1]]<iou_threshold):\n unmatched_detections.append(m[0])\n unmatched_trackers.append(m[1])\n else:\n matches.append(m.reshape(1,2))\n if(len(matches)==0):\n matches = np.empty((0,2),dtype=int)\n else:\n matches = np.concatenate(matches,axis=0)\n\n return matches, np.array(unmatched_detections), np.array(unmatched_trackers)", "def get_light_sbc(filenames, onoff=True):\n if onoff:\n param = \"on\"\n else:\n param = \"off\"\n return filter_filenames(filenames, [param])", "def trackBunchTurns(self, bunch):\n\t\tturns = self.__turns\n\t\t#start\n\t\tfor i in range(turns-1):\t\t\t\n\t\t\tself.trackBunch(bunch)\t\n\t\t\tsyncPart = bunch.getSyncParticle()\n\t\t\ttime = syncPart.time()\n\t\t\tself.setTimeDepStrength(time)\n\t\t\tprint \"debug trackBunchTurns time\",time,\"in\",i,\"turn\"\n\t\t#getsublattice\n\t\t#sublattice.trackBunch(bunch)", "def findIntersect(wires):\n allSets = list(map(lambda w: coordsFor(w), wires))\n baseSet = allSets[0]\n for s in allSets[1:]:\n baseSet.intersection_update(s)\n central = (0, 0)\n distances = list(map(lambda c: manhattan(central, c), baseSet))\n return min(distances)", "def get_matching_blossom5(graph):\n\n edges = [] # Get all possible edges between the anyons and their weights\n\n def edge_func(edges, e0, e1, weight):\n edges.append([e0, e1, weight])\n\n all_anyons, nxgraph = get_graph_edges(graph, edge_func, edges)\n\n output = pm.getMatching(len(all_anyons), edges) if all_anyons != [] else []\n return [[all_anyons[i0], all_anyons[i1]] for i0, i1 in enumerate(output) if i0 > i1]", "def createBridgeSets(blocksize,operating,MPSS):\n sets = tuple()\n xul = blocksize[0]-operating\n xdl = operating\n yul = int(blocksize[0]/2+operating)\n ydl = int(blocksize[0]/2-operating)\n xts = xul\n xbs = xdl\n for i in range(MPSS):\n sets+=(tuple(product(numpy.arange(xdl,xul,1),numpy.arange(ydl,yul,1))),)\n xdl+=operating\n xul-=operating\n ydl-=operating\n yul+=operating\n return sets,sets[::-1]", "def calculate_rise_set_lsts(self, telescope_latitude, horizon_buffer=0.04364):\n lat_rad = telescope_latitude.rad\n buff = horizon_buffer\n\n lon, lat = self.get_lon_lat()\n\n tans = np.tan(lat_rad) * np.tan(lat.rad)\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\", message=\"invalid value encountered\", category=RuntimeWarning\n )\n rise_lst = lon.rad - np.arccos((-1) * tans) - buff\n set_lst = lon.rad + np.arccos((-1) * tans) + buff\n\n rise_lst[rise_lst < 0] += 2 * np.pi\n set_lst[set_lst < 0] += 2 * np.pi\n rise_lst[rise_lst > 2 * np.pi] -= 2 * np.pi\n set_lst[set_lst > 2 * np.pi] -= 2 * np.pi\n\n self._rise_lst = rise_lst\n self._set_lst = set_lst", "def _find_trial_beam_breaks_regions_in_sync(sync_file):\n bit = 'BEAM_BREAK'\n return _find_bit_in_sync(sync_file, bit, ['down', 'up'])", "def occulting(groups, colour, period):\n if groups == [1]:\n return [\n ('Off', 1000),\n (colour, period - 1000)\n ]\n return light_sequence(groups, 'Off', colour, period, 500, 1000)", "def intersection(arrays):\n # Create hash table (dict) to store numbers in for faster O(1) lookup (for \n # any individual lookup):\n # numbers = {}\n\n # Create list for intersection of the sets:\n # intersection = []\n\n # Populate hash table with numbers from the first list (keys), because any numbers \n # not in the first list will not be in the intersection of the lists, by definition.\n numbers = {item:False for item in arrays[0]}\n # Now check the other input lists in order, removing any number/item that is not in both:\n for list in arrays[1:]:\n for item in list: # NOT actually O(n**2); just O(n) for the whole input matrix.\n # Mark as True to flag any items that are in the intersection of the two lists:\n if item in numbers:\n numbers[item] = True\n # Keep only the numbers that are in the intersection of the two lists:\n numbers = {key:value for key, value in numbers.items() if value == True}\n # Mark all as False again to start a fresh comparison with the next list:\n for item in numbers:\n numbers[item] = False\n\n return [*numbers.keys()]", "def find_starts(config, data):\n\n trigger = butter_bandpass_filter(\n data, config.bandpass_lower, config.bandpass_upper,\n config.sampling_rate, 6)\n trigger = np.absolute(trigger)\n trigger = butter_lowpass_filter(\n trigger, config.lowpass_freq, config.sampling_rate, 6)\n\n # transient = 0.0005\n # start_idx = int(transient * config.sampling_rate)\n start_idx = 0\n average = np.average(trigger[start_idx:])\n maximum = np.max(trigger[start_idx:])\n minimum = np.min(trigger[start_idx:])\n middle = (np.max(trigger[start_idx:]) - min(trigger[start_idx:])) / 2\n if average < 1.1 * middle:\n print()\n print(\"Adjusting average to avg + (max - avg) / 2\")\n average = average + (maximum - average) / 2\n offset = -int(config.trigger_offset * config.sampling_rate)\n\n if config.trigger_rising:\n trigger_fn = lambda x, y: x > y\n else:\n trigger_fn = lambda x, y: x < y\n\n # The cryptic numpy code below is equivalent to looping over the signal and\n # recording the indices where the trigger crosses the average value in the\n # direction specified by config.trigger_rising. It is faster than a Python\n # loop by a factor of ~1000, so we trade readability for speed.\n trigger_signal = trigger_fn(trigger, average)[start_idx:]\n starts = np.where((trigger_signal[1:] != trigger_signal[:-1])\n * trigger_signal[1:])[0] + start_idx + offset + 1\n if trigger_signal[0]:\n starts = np.insert(starts, 0, start_idx + offset)\n\n # plt.plot(data)\n # plt.plot(trigger*100)\n # plt.axhline(y=average*100)\n # plt.show()\n\n return starts, trigger, average", "def denoise(self, offset_arr):\n\n measure_t, otts = zip(*offset_arr)\n all_otts = []\n all_times = []\n otts_per_h = []\n times_per_h = []\n min_arr = []\n hold_x = 0\n hold_y = 0\n ctr = 0\n n = 1\n begin = 0\n end = 120 # N refering to the amount of seconds indicating the first interval\n const = 120 # the coefficient indicating the break interval: every const seconds: must be set the same as end value\n for timel in measure_t: # divide the points every 60 seconds (probe)\n ctr += 1\n if begin <= timel < end:\n times_per_h.append(timel)\n otts_per_h.append(otts[ctr - 1])\n else:\n hold_x = timel\n hold_y = otts[ctr - 1]\n all_times.append(times_per_h)\n all_otts.append(otts_per_h)\n times_per_h = []\n otts_per_h = []\n times_per_h.append(hold_x)\n otts_per_h.append(hold_y) # for the next round\n begin = end\n n += 1\n end = n * const\n if ctr == len(measure_t) and otts_per_h: # belonging to the last hour/min only going through the if statement\n all_times.append(times_per_h)\n all_otts.append(otts_per_h)\n\n for i in range(len(all_otts)):\n try:\n idx = np.array(all_otts[i]).argmin()\n except ValueError as e:\n logging.error(\"{}/{}/{}: denoise: ValueError at argmin -- returning empty array! offset_arr: {}, error e {}\".format(self.domain, self.ip4, self.ip6, offset_arr, e))\n return []\n min_per_probe = all_otts[i][idx]\n assoc_x_per_probe = all_times[i][idx]\n min_arr.append((assoc_x_per_probe, min_per_probe))\n\n return min_arr", "def get_all_setups_nodes():\n ta_roots = get_all_setups_roots()\n ta_nodes = [TechAnim_Setup(x) for x in ta_roots]\n return ta_nodes", "def find_tfl_lights(image: np.ndarray):\n kernel = np.array(\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [1, 3, 1],\n [0, 1, 0]])\n\n kernel = kernel - kernel.mean()\n\n red_image = image.copy()\n red_image = red_image[:, :, 0]\n _, red_image = cv2.threshold(red_image, 200, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(red_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n red_points = np.where(mask)\n positions = []\n final_red_points = []\n for point1 in range(len(red_points[0])):\n point = (red_points[0][point1], red_points[1][point1])\n pixel = image[point[0], point[1]]\n if (pixel[1] < 170 or pixel[2] < 120) and pixel[0] >= 200:\n final_red_points.append(point)\n final_red_points = filter_points(final_red_points)\n positions += final_red_points\n auxilary = ['r'] * len(positions)\n red_x = [val[1] for val in final_red_points]\n red_y = [val[0] for val in final_red_points]\n green_image = image.copy()\n green_image = green_image[:, :, 1]\n _, green_image = cv2.threshold(green_image, 190, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(green_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n green_points = np.where(mask)\n final_green_points = []\n for point1 in range(len(green_points[0])):\n point = (green_points[0][point1], green_points[1][point1])\n pixel = image[point[0], point[1]]\n if pixel[0] <= 180 and pixel[1] >= 220 and pixel[2] >= 160:\n final_green_points.append(point)\n\n final_green_points = filter_points(final_green_points)\n positions += final_green_points\n auxilary += ['g'] * len(final_green_points)\n green_x = [val[1] for val in final_green_points]\n green_y = [val[0] for val in final_green_points]\n print(f\"There are {len(green_x) + len(red_x)} points\")\n return positions, auxilary", "def _random_subset(self, pa_nodes, seq, m, rng):\n targets = set()\n while len(targets) < m:\n x = rng.choice(seq)\n # if x in pa_nodes:\n if pa_nodes.get(x, False):\n targets.add(x)\n else:\n pass\n return targets", "def performance_comparison_of_sets( predicted, known ):\n ### Example:\n ### predicted = Set( 2,3,4,5,10,11,12,13,14,15,20,21,22,23,24,25,26,27,28 )\n ### known = Set( 1,2,3,4,5,10,11,12,13,14,15, 21,22,23,24,25,26 )\n ### Return structure:\n ### [\n ### [ [2,3,4,5,10,11,12,13,14,15], [21,22,23,24,25,26] ], # TP correct predicted\n ### [ [20], [27,28] ], # FP over predicted\n ### [ [1] ], # FN under predicted\n ### ]\n\n # make CORRECT, UNDER and OVER predicted list\n correct = predicted.intersection( known )\n overpredicted = predicted.difference( known )\n underpredicted = known.difference( predicted )\n\n returnlists = []\n for item in ( correct, overpredicted, underpredicted ):\n if item:\n item = list(item)\n item.sort()\n tracks = [ [ item[0] ] ]\n for coord in item[1:]:\n if coord == max(tracks[-1])+1:\n tracks[-1].append(coord)\n else:\n tracks.append( [ coord ] )\n returnlists.append( tracks )\n else:\n # no overlap of this kind!\n returnlists.append( [] )\n\n # return the data structure\n return returnlists", "def minmax():\n minmaxlist = []\n timelist = []\n #create a list of the filenames of all sentinel-images\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n print(\"STEP 1/2\")\n print(\"EXPORTING MIN AND MAX VALUES PER BAND\")\n for i in s2files:\n start = time.time()\n nlfile = nlpath + \"/\" + i\n s2file = s2path+\"/\"+i\n #open the file\n s2raster = gdal.Open(s2file) \n #iterate over the bands of each image\n for n in range(s2raster.RasterCount):\n f = n + 1\n s2band = s2raster.GetRasterBand(f)\n #read the pixels of the band as an numpy-array\n s2band = s2band.ReadAsArray()\n #resize the bands to have all images in the same size\n s2band = np.resize(s2band,(1050,1050))\n #get the min and max values of each band to be able to 0-1 normalize after\n min = s2band.min()\n max = s2band.max()\n #check if there are already values for the band\n if len(minmaxlist) < s2raster.RasterCount + 1:\n s2minmax = [min,max]\n minmaxlist.append(s2minmax)\n # if the min value of the open band is smaller than the saved minimal value, overwrite it\n if min < minmaxlist[n][0]:\n minmaxlist[n][0] = min\n #if the max value of the open band is higher than the saves maximum value, overwrite it\n if max > minmaxlist[n][1]:\n minmaxlist[n][1] = max\n #open the nightlight img\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n #read the only band of the image as a numpy-array\n nlband = nlband.ReadAsArray()\n #resize it the same way as the sentinel images\n nlband = np.resize(nlband,(1050,1050))\n #get the min and max values of the band\n nlmin = nlband.min()\n nlmax = nlband.max()\n #check if there are already information about min and max values for the nightlight images\n if len(minmaxlist) < s2raster.RasterCount + 1:\n nlminmax = [nlmin,nlmax]\n minmaxlist.append(nlminmax)\n #if the min value of the open nightlight image is smaller than the saved minimal value, overwrite it\n if nlmin < minmaxlist[16][0]:\n minmaxlist[16][0] = nlmin\n #if the max value of the open nightlight image is higher than the saves maximum value, overwrite it\n if nlmax > minmaxlist[16][1]:\n minmaxlist[16][1] = nlmax\n end = time.time()\n timelist.append(end-start)\n print(\"Step 1/2\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))\n #throw out the Quality Bands (QA10,QA20,QA60)\n minmaxlist = [i for j,i in enumerate(minmaxlist) if j not in [13,14,15]]\n return minmaxlist", "def build_amg_index_sets(L_sizes):\n neqns = L_sizes[0][0]\n velocityDOF=[]\n for start in range(1,3):\n velocityDOF.append(np.arange(start=start,\n stop=1+neqns,\n step=3,\n dtype='i'))\n velocityDOF_full=np.vstack(velocityDOF).transpose().flatten()\n velocity_u_DOF = []\n velocity_u_DOF.append(np.arange(start=0,\n stop=2*neqns//3,\n step=2,\n dtype='i'))\n velocity_u_DOF_full = np.vstack(velocity_u_DOF).transpose().flatten()\n velocity_v_DOF = []\n velocity_v_DOF.append(np.arange(start=1,\n stop=1+2*neqns//3,\n step=2,\n dtype='i'))\n velocity_v_DOF_full = np.vstack(velocity_v_DOF).transpose().flatten()\n isvelocity = PETSc.IS()\n isvelocity.createGeneral(velocityDOF_full)\n isu = PETSc.IS()\n isu.createGeneral(velocity_u_DOF_full)\n isv = PETSc.IS()\n isv.createGeneral(velocity_v_DOF_full)\n return [isvelocity, isu, isv]", "def getSets(unique_name=None):", "def eeg_peaks(array,tim,onset,plot='false'):\n\tp1_i,n1_i,p2_i = onset+56,onset+104,onset+176\n\twin_p1,win_n1,win_p2 = 15,20,40\n\t# determine P1,N1 and P2 values on the basis of the maximum in GFP in a window around the expected values\n\tidx_p1 = np.logical_and(tim>p1_i-win_p1, tim<p1_i+win_p1)\n\tidx_n1 = np.logical_and(tim>n1_i-win_n1, tim<n1_i+win_n1)\n\tidx_p2 = np.logical_and(tim>p2_i-win_p2, tim<p2_i+win_p2)\n\tp1 = np.max(array[idx_p1])\n\ttp1 = tim[idx_p1][array[idx_p1].argmax()]\n\tn1 = np.min(array[idx_n1])\n\ttn1 = tim[idx_n1][array[idx_n1].argmin()]\n\tp2 = np.max(array[idx_p2])\n\ttp2 = tim[idx_p2][array[idx_p2].argmax()]\n\n\tlineax = dict(linewidth=1, color='black', linestyle='--')\n\tlinep1 = dict(linewidth=1, color='red', linestyle='--')\n\tlinen1 = dict(linewidth=1, color='green', linestyle='--')\n\tlinep2 = dict(linewidth=1, color='blue', linestyle='--')\n\n\tif plot == 'true':\t\t\n\t\tfig = plt.figure(19,figsize=[7,5])\n\t\tax = fig.add_subplot(111, autoscale_on=False, xlim=[onset-100,tp2+200], ylim=[1.25*np.min([p1,n1,p2]),1.25*np.max([p1,n1,p2])])\n\t\tplt.plot(tim,array,'k-',lw=3)\n\t\tplt.plot(tp1,p1,'ro')\n\t\tplt.plot(tn1,n1,'go')\n\t\tplt.plot(tp2,p2,'bo')\n\t\tax.axvline(p1_i-win_p1,**linep1)\n\t\tax.axvline(p1_i+win_p1,**linep1)\n\t\tax.axvline(n1_i-win_n1,**linen1)\n\t\tax.axvline(n1_i+win_n1,**linen1)\n\t\tax.axvline(p2_i-win_p2,**linep2)\n\t\tax.axvline(p2_i+win_p2,**linep2)\n\t\tax.axhline(**lineax)\n\t\tplt.text(tp1-120,1.25*p1,'P1 = %.2f muV at %.0f ms' %(p1,tp1),fontsize=10)\n\t\tplt.text(tn1-40,1.1*n1,'N1 = %.2f muV at %.0f ms' %(n1,tn1),fontsize=10)\n\t\tplt.text(tn1+40,1.1*p2,'P2 = %.2f muV at %.0f ms' %(p2,tp2),fontsize=10)\n\t\tplt.xlabel('time (ms)',fontsize = 13)\n\t\tplt.ylabel('Amplitude',fontsize = 13)\n\treturn [p1,n1,p2,tp1,tn1,tp2]", "def covid(covidSet):\n newcovidSet = set()\n for x in covidSet:\n player = playerList[x]\n player.changeImage(1)\n\n #gotta check if anyone is close to this guy\n for idx, y in enumerate(playerList):\n if idx == x or idx in covidSet or idx in newcovidSet:\n continue\n if abs(player.rect.x - y.rect.x) > covidRange or abs(player.rect.y - y.rect.y) > covidRange:\n continue #to reduce amount of astar checks required\n else:\n xx = changeCoord(player.rect.x)\n xy = changeCoord(player.rect.y)\n yx = changeCoord(y.rect.x)\n yy = changeCoord(y.rect.y)\n path = aStar.astar(maze, (xy, xx), (yy, yx))\n if path != None and len(path) <= mazeRange:\n if randChance(covidChance):\n newcovidSet.add(idx)\n print(\"Person\", x, \"infected\", \"Person\", idx)\n edges[\"edges\"].append({\"from\": x, \"to\": idx})\n else:\n y.changeImage(2)\n # uncomment to see astar algo (that was too far apart)\n #else:\n # if path == None:\n # print(\"none\")\n # else:\n # print(len(path), path)\n return set.union(covidSet, newcovidSet)" ]
[ "0.6857784", "0.6249284", "0.58837444", "0.5452273", "0.53577805", "0.5303218", "0.52744794", "0.52505463", "0.51989776", "0.51802486", "0.51636773", "0.50905186", "0.507182", "0.5051442", "0.5007559", "0.50056607", "0.49910834", "0.4961425", "0.4949146", "0.493811", "0.49135157", "0.49105212", "0.49056417", "0.48830733", "0.48685992", "0.48603174", "0.4848229", "0.48321086", "0.48042777", "0.47821876", "0.47757435", "0.4767048", "0.4765696", "0.47633648", "0.47509626", "0.47479573", "0.4718518", "0.47159487", "0.47055426", "0.4698063", "0.46974832", "0.46953535", "0.4694091", "0.4691179", "0.46858895", "0.46830046", "0.46810395", "0.46765524", "0.46642125", "0.46454656", "0.46438295", "0.46283516", "0.46255508", "0.46239224", "0.46237698", "0.46159163", "0.46082196", "0.46046174", "0.45912626", "0.45858124", "0.4584342", "0.45794848", "0.45726305", "0.4567295", "0.4562011", "0.45587313", "0.45538047", "0.45506278", "0.4545687", "0.4543421", "0.4540708", "0.45392558", "0.4529454", "0.45134875", "0.45065075", "0.4505932", "0.4501985", "0.44990826", "0.4495074", "0.44833398", "0.44735572", "0.44697222", "0.44684306", "0.4465541", "0.44651774", "0.4449681", "0.44485477", "0.444564", "0.44361293", "0.44357154", "0.4435594", "0.44350225", "0.44332066", "0.44280732", "0.44272992", "0.44264206", "0.44258034", "0.4424545", "0.44241107", "0.44170743" ]
0.7570172
0
Estimate whether the animal was running during each trial. This function first smooths the running trace according to smoothsize (noncausal), it then uses the average of N presamples before the onset to to estimate whether running was higher than the threshold.
def estimate_running_each_trial(running_trace, trial_onset, smoothsize=10, presamples=4, threshold=3, showfig=False): smoothwin = np.ones(smoothsize)/(smoothsize) running_trace_smooth = np.convolve(running_trace, smoothwin, mode='same') trial_onset_ind = np.where(trial_onset)[0] presamples_inds = np.arange(-presamples, 0) + trial_onset_ind[:, np.newaxis] pretrial_avg = running_trace_smooth[presamples_inds].mean(axis=1) running_each_trial = pretrial_avg > threshold if showfig: plt.cla() plt.plot(running_trace_smooth, '0.8') plt.plot(trial_onset_ind, pretrial_avg, 'xg') plt.plot(trial_onset_ind, running_each_trial*running_trace_smooth.max(), 'og') plt.axhline(threshold, color='k') plt.legend(['running_trace_smooth', 'pretrial_avg', 'running_each_trial'], loc='upper right') plt.show() return running_each_trial, running_trace_smooth
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def punches(self):\n #:TODO Need to parameterize n\n # Initialize smoothing function\n # Also because I can't take the second derivitive\n\n n = 3\n assert (len(self.averages)==len(self.timestamps))\n size = len(self.averages)\n slopes = []\n for t in [0,size-n]:\n averages = np.asarray(self.averages[t:size])\n timestamps = np.asarray(self.timestamps[t:size])\n \"\"\"\n slope = np.absolute((np.corrcoef(averages,\n timestamps))*np.std(averages)/np.std(timestamps))\n \"\"\"\n slope = np.absolute(np.polyfit(timestamps, averages, 1)[0])*1000000\n #plt.scatter(timestamps, averages)\n slopes.append(slope)\n # If you were punching you are likely still punching need to set a weighting factor to this somehow\n # print(slopes[1])\n self.smoothing_queue.pop(0)\n if self.SIG_DELTA_AVERAGE < slopes[1]:\n self.smoothing_queue.append(1)\n else:\n self.smoothing_queue.append(0)\n if self.smoothing_queue.count(1) > len(self.smoothing_queue)/2:\n punching = True\n else: punching = False\n # print(self.smoothing_queue)\n\n return punching\n #self.counter +=1\n \"\"\"\n if self.counter==self.timing:\n self.counter == 0\n else:\n \"\"\"", "def is_SNP(count):\n counts = sum(count)\n return counts and float(counts - max(count)) / counts > MAX_NOISE", "def stationarity(self, nfactor=20):\n \n tau = self.sampler.get_autocorr_time(tol=0)\n converged = np.all(tau * nfactor < self.sampler.iteration)\n return converged", "def stop_on_low_ais_ess(trial_id, result):\n return result[\"ais_effective_sample_size\"] < 0.1", "def current_threshold_hit(self):\n\n\t\tnew_current = self.robot.pdp.getCurrent(const.CARGO_PDP_ID)\n\n\t\tself._current_samples.append(new_current)\n\n\t\tif len(self._current_samples) > 10:\n\t\t\tself._current_samples.pop(0)\n\n\t\t# Calculate new running average\n\t\tnew_avg = sum(self._current_samples) / len(self._current_samples)\n\n\t\treturn new_avg > const.CARGO_INTAKE_THRESHOLD", "def is_artificial(self):\n\t\treturn 0", "def epidemic_finish(states, iteration):\n return np.sum(states) == 0 and iteration > 10", "def autorange(self, analyte=None, gwin=11, win=40, smwin=5,\n conf=0.01, on_mult=(1., 1.), off_mult=(1., 1.), d_mult=1.2):\n\n if analyte is None:\n analyte = self.internal_standard\n\n bins = 50 # determine automatically? As a function of bkg rms noise?\n\n v = self.focus[analyte] # get trace data\n vl = np.log10(v[v > 1]) # remove zeros from value\n x = np.linspace(vl.min(), vl.max(), bins) # define bin limits\n\n n, _ = np.histogram(vl, x) # make histogram of sample\n kde = gaussian_kde(vl)\n yd = kde.pdf(x) # calculate gaussian_kde of sample\n\n mins = self.findmins(x, yd) # find minima in kde\n\n vs = fastsmooth(v, gwin)\n bkg = vs < 10**(d_mult * mins[0]) # set background as lowest distribution\n if not bkg[0]:\n bkg[0] = True\n\n # assign rough background and signal regions based on kde minima\n self.bkg = bkg\n self.sig = ~bkg\n\n # remove transitions by fitting a gaussian to the gradients of\n # each transition\n # 1. calculate the absolute gradient of the target trace.\n g = abs(fastgrad(v, gwin))\n # 2. determine the approximate index of each transition\n zeros = bool_2_indices(bkg).flatten()\n if zeros[0] == 0:\n zeros = zeros[1:]\n if zeros[-1] == bkg.size:\n zeros = zeros[:-1]\n tran = [] # initialise empty list for transition pairs\n\n for z in zeros: # for each approximate transition\n # isolate the data around the transition\n if z - win > 0:\n xs = self.Time[z - win:z + win]\n ys = g[z - win:z + win]\n # determine type of transition (on/off)\n # checkes whether first - last value in window is\n # positive ('on') or negative ('off')\n tp = np.diff(v[z - win:z + win][[0, -1]]) > 0\n\n else:\n xs = self.Time[:z + win]\n ys = g[:z + win]\n # determine type of transition (on/off)\n tp = np.diff(v[:z + win][[0, -1]]) > 0\n # determine location of maximum gradient\n c = self.Time[z] # xs[ys == np.nanmax(ys)]\n try: # in case some of them don't work...\n # fit a gaussian to the first derivative of each\n # transition. Initial guess parameters are determined\n # by:\n # - A: maximum gradient in data\n # - mu: c\n # - sigma: half the exponential decay coefficient used\n # for despiking OR 1., if there is no exponent.\n try:\n width = 0.5 * abs(self.despike_params['exponent'])\n except:\n width = 1.\n # The 'sigma' parameter of curve_fit:\n # This weights the fit by distance from c - i.e. data closer\n # to c are more important in the fit than data further away\n # from c. This allows the function to fit the correct curve,\n # even if the data window has captured two independent\n # transitions (i.e. end of one ablation and start of next)\n # ablation are < win time steps apart).\n pg, _ = curve_fit(gauss, xs, ys,\n p0=(np.nanmax(ys),\n c,\n width),\n sigma=abs(xs - c) + .1)\n # get the x positions when the fitted gaussian is at 'conf' of\n # maximum\n # determine transition FWHM\n fwhm = 2 * pg[-1] * np.sqrt(2 * np.log(2))\n # apply on_mult or off_mult, as appropriate.\n if tp:\n lim = np.array([-fwhm, fwhm]) * np.array(on_mult) + pg[1]\n else:\n lim = np.array([-fwhm, fwhm]) * np.array(off_mult) + pg[1]\n\n tran.append(lim)\n except:\n warnings.warn((\"\\nSample {:s}: \".format(self.sample) +\n \"Transition identification at \" +\n \"{:.1f} failed.\".format(self.Time[z]) +\n \"\\nPlease check the data plots and make sure \" +\n \"everything is OK.\\n(Run \" +\n \"'trace_plots(ranges=True)'\"),\n UserWarning)\n pass # if it fails for any reason, warn and skip it!\n\n # remove the transition regions from the signal and background ids.\n for t in tran:\n self.bkg[(self.Time > t[0]) & (self.Time < t[1])] = False\n self.sig[(self.Time > t[0]) & (self.Time < t[1])] = False\n\n self.trn = ~self.bkg & ~self.sig\n\n self.mkrngs()\n\n # final check to catch missed transitions\n # calculate average transition width\n tr = self.Time[self.trn ^ np.roll(self.trn, 1)]\n tr = np.reshape(tr, [tr.size // 2, 2])\n self.trnrng = tr\n trw = np.mean(np.diff(tr, axis=1))\n\n corr = False\n for b in self.bkgrng.flat:\n if (self.sigrng - b < 0.3 * trw).any():\n self.bkg[(self.Time >= b - trw / 2) &\n (self.Time <= b + trw / 2)] = False\n self.sig[(self.Time >= b - trw / 2) &\n (self.Time <= b + trw / 2)] = False\n corr = True\n\n if corr:\n self.mkrngs()\n\n # number the signal regions (used for statistics and standard matching)\n n = 1\n for i in range(len(self.sig) - 1):\n if self.sig[i]:\n self.ns[i] = n\n if self.sig[i] and ~self.sig[i + 1]:\n n += 1\n self.n = int(max(self.ns)) # record number of traces\n\n return", "def get_excess_smoothing_status(self) -> bool:\n return self._excess_smoothing_live.get()", "def is_sampled(z):\n return True", "def preprocessing(image, smooth_size, folder):\n from skimage.restoration import denoise_tv_chambolle\n \n dim = int(image.shape[0] / 50.)\n smoothed = rank.median(image, disk(smooth_size))\n #smoothed = denoise_tv_chambolle(image, weight=0.002)\n smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))\n \n pl.subplot(2, 3, 1)\n pl.title(\"after median\")\n pl.imshow(smoothed)\n pl.gray()\n # If after smoothing the \"dot\" disappears\n # use the image value\n \n # TODO: wat do with thresh?\n try:\n im_max = smoothed.max()\n thresh = threshold_otsu(image)\n except:\n im_max = image.max()\n thresh = threshold_otsu(image)\n\n \n if im_max < thresh:\n labeled = np.zeros(smoothed.shape, dtype=np.int32)\n \n else:\n binary = smoothed > thresh\n \n # TODO: this array size is the fault of errors\n bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)\n bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)\n \n pl.subplot(2, 3, 2)\n pl.title(\"threshold\")\n pl.imshow(binary, interpolation='nearest')\n pl.subplot(2, 3, 3)\n pl.title(\"opening\")\n pl.imshow(bin_open, interpolation='nearest')\n pl.subplot(2, 3, 4)\n pl.title(\"closing\")\n pl.imshow(bin_close, interpolation='nearest')\n \n distance = ndimage.distance_transform_edt(bin_open)\n local_maxi = peak_local_max(distance,\n indices=False, labels=bin_open)\n \n markers = ndimage.label(local_maxi)[0]\n \n labeled = watershed(-distance, markers, mask=bin_open)\n pl.subplot(2, 3, 5)\n pl.title(\"label\")\n pl.imshow(labeled)\n #pl.show()\n pl.savefig(folder)\n pl.close('all')\n\n #misc.imsave(folder, labeled)\n# labels_rw = random_walker(bin_close, markers, mode='cg_mg')\n# \n# pl.imshow(labels_rw, interpolation='nearest')\n# pl.show()\n\n return labeled", "def choose_to_stop_early(self):\n # return self.cumulated_num_tests > 10 # Limit to make 10 predictions\n # return np.random.rand() < self.early_stop_proba\n batch_size = 30 # See ingestion program: D_train.init(batch_size=30, repeat=True)\n num_examples = self.metadata_.size()\n num_epochs = self.cumulated_num_steps * batch_size / num_examples\n return num_epochs > self.num_epochs_we_want_to_train # Train for certain number of epochs then stop", "def was_pig_caught(prize):\n if prize > 20:\n return True\n return False", "def _compute_is_terminal(self):\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when self.n_horizon datapoints were labelled\n if np.size(self.indeces_known) == self.n_horizon:\n done = True\n return done", "def num_wet(self):\n return np.sum(self.array == 5)", "def DetectPulseOnset(self, asig, fs, wMS):\n # the percentage of the maximal value of the slope sum function\n # to detect the onset\n AmplitudeRatio = .01\n\n # low pass filter\n sig = self.zpIIR(asig, 3, .1, 20, 5 * 2/fs)\n wSmp = int(np.round(wMS*fs/1000))\n\n BlankWindowRatio = .9\n\n # delta x\n diffsig = np.diff(sig)\n\n z = np.empty((sig.size - 1 - wSmp, 1))\n z[:] = np.NaN\n\n # calculate slope sum function\n for i in range(wSmp,sig.size-1):\n subsig = diffsig[i-wSmp:i]\n z[i-wSmp] = np.sum(subsig[subsig>0])\n\n z0 = np.mean(z)\n onset = [0]\n tPnt = []\n zThres = 0\n blankWin = int(np.round(400*fs/1000))\n subIdx = np.r_[onset[0]: onset[0] + 4*blankWin + 1]\n MedianArrayWinSize = 5\n\n # this value controls the final acceptance\n PrcofMaxAMP = .2\n SSFAmpArray = np.ones((MedianArrayWinSize,1))*(np.max(z) - np.min(z)) * PrcofMaxAMP\n # the percentage of maximal amplitude for threshold crossing\n DetectionThreshold = .2\n SSFCrossThresholdArray = np.ones((MedianArrayWinSize,1))*z0*DetectionThreshold\n idx = 1\n\n # Keep loop going while onsets detected\n while(1):\n\n # look for the first location where z > z0\n try:\n\n # Look in z[subIdx] (and make sure it doesn't go past z's size)\n # find first index where z > the mean of z\n tempIndex = np.trim_zeros(subIdx*(z.size>subIdx), 'b')\n ix = np.amin(np.where(z[tempIndex] > z0)[0])\n except:\n break\n\n ix = tempIndex[ix]\n tPnt.append(ix)\n srcWin = np.r_[np.maximum(0,ix - wSmp): ix + wSmp]\n #if the window has passed the length of the data, then exit\n if srcWin[-1] >= len(z):\n break\n\n # This section of code is to remove the initial zero-region in the SSF function before looking for onset (if such region exists)\n zPnt = np.where(z[srcWin] == 0)\n\n if zPnt[0].size != 0:\n zPnt = srcWin[zPnt[0]]\n\n if np.any(zPnt < ix):\n srcWin = np.r_[zPnt[np.max(np.where(zPnt < ix))]: ix + wSmp]\n\n # accept the window\n if ( np.max(z[srcWin]) - np.min(z[srcWin]) > zThres):\n\n # calculate the threshold for next cycle\n SSFAmp = (np.max(z[srcWin]) - np.min(z[srcWin])) * PrcofMaxAMP\n SSFAmpArray[np.remainder(idx, MedianArrayWinSize)] = SSFAmp\n zThres = np.median(SSFAmpArray)\n SSFCrossThresholdArray[np.remainder(idx, MedianArrayWinSize)] = np.mean(z[srcWin])*DetectionThreshold\n z0 = np.median(SSFCrossThresholdArray)\n minSSF = np.min(z[srcWin]) + SSFAmp *AmplitudeRatio\n a = srcWin[0] + np.min(np.where(z[srcWin] >= minSSF))\n onset.append(a)\n\n # adaptively determine analysis window for next cycle\n bw = blankWin\n subIdx = np.round(np.r_[a + bw: a + 3*bw])\n idx = idx + 1\n\n else:\n # no beat detected\n subIdx = np.round(subIdx + blankWin)\n\n return onset", "def is_smelling(self,conc_array):\n if conc_array[int(self.x)][int(self.y)]>self.threshold:\n self.smell_timer = self.Timer(self.T,self.lamda)\n #Nav mode three and four need to know whether the moth is smelling\n #at a specific moment, for that reason they use Tfirst.\n self.Tfirst = self.T\n self.odor = True #this datum will be useful in the graphical functions\n return True\n elif self.turned_on:\n self.odor = False\n if self.smell_timer.is_running(self.T):\n return True #note - even though the there is no detection, the navigator stay in nav mode.\n else:\n self.odor = False\n return False", "def _compute_is_terminal(self):\n new_score = self.episode_qualities[-1]\n # by default the episode will terminate when all samples are labelled\n done = LalEnv._compute_is_terminal(self)\n # it also terminates when a quality reaches a predefined level\n if new_score >= self.target_quality:\n done = True\n return done", "def Continue():\n # adjust this to take as many steps as you need\n return warp.top.it <= 500", "def is_over(self, state: StonehengeState) -> bool:\n total_result = state.hori_result + state.left_result + state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n # all_taken = True\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item =='2':\n p2_taken += 1\n # else:\n # all_taken = False\n # print('p1 taken:' + str(p1_taken))\n # print('p2 taken:' + str(p2_taken))\n # print('p1_taken more than half?')\n # print(float(p1_taken) >= total_line/2)\n # print('p2_taken more than half?')\n # print(float(p2_taken) >= total_line/2)\n return float(p1_taken) >= total_line/2 or float(p2_taken) >= total_line/2", "def win_condition(self):\n return self.wave == 8", "def run_experiment() -> List[bool]:\n return [random.random() < 0.5 for _ in range(1000)]", "def precondition(amp):\n n = len(amp)\n mean = np.mean(amp[:n/5])\n return -(amp-mean)", "def test_next_window_time_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n # Value 15 will be filtered as it ranges between lower and upper bound limits\n filtered_value = test_window_scheme.filter(self.middle_value)\n self.assertEquals(filtered_value, self.middle_value)\n # Let next window time elapse\n time.sleep(4)\n filtered_value = test_window_scheme.filter(self.more_than_upper_bound)\n # None is expected as filtered value because at least one sample has been already passed and\n # value ranges outside lower and upper bound limits\n self.assertEquals(filtered_value, None)", "def _step(self, a):\n obs, rew, done, info = super()._step(a)\n # rew = +1 if past int threshold for first time in episode\n # if self.robot.body_xyz[0] > self.threshold:\n # self.threshold += 1\n # rew = 1.0\n # else:\n # rew = 0.0\n # self.steps += 1\n # if self.steps > self.max_episode_steps:\n # done = True\n return obs, rew, done, info", "def success_rate(x_tapes):\n return np.sum([is_success(x_tape) for x_tape in x_tapes]) / len(x_tapes)", "def problem2():\n k = 4\n total_draws = 20\n total_balls = 50\n\n plt.figure()\n for _ in range(50):\n for num_samples in [10000]:\n experiment_results = []\n for samples in range(num_samples):\n N = np.random.randint(1, k, total_balls - 1)\n N = np.append(N, k)\n N = np.array(N).flatten()\n random.shuffle(N)\n draw = N[:total_draws]\n experiment_result = np.any(draw == 4)\n experiment_results.append(experiment_result)\n plt.plot(np.cumsum(experiment_results) / np.arange(1, num_samples + 1))\n old_result = experiment_results[:]\n\n plt.xlabel('Total Draws')\n plt.ylabel('Probability')\n plt.show()", "def get_nsatpix( self, step ):\n \n return np.sum( self.get_image_step( step, divide_by_exptime=False ) >= 1.6e4 )", "def get_sensor_bool_dryspot_runlevel(self, filename, threshold_min_counted_dryspots=5):\n f = h5py.File(filename, \"r\")\n meta_file = h5py.File(str(filename).replace(\"RESULT.erfh5\", \"meta_data.hdf5\"), 'r')\n\n try:\n single_states, set_of_states, useless_states = self.__get_dryspot_data(f, meta_file)\n multi_states = self.__get_pressure_data(f)\n multi_states = multi_states.squeeze()\n\n activated_sensors = np.count_nonzero(multi_states, axis=1)\n percentage_of_all_sensors = activated_sensors / 1140\n len_wanted_seq = 100\n current = 0\n sequence = np.zeros((len_wanted_seq, self.num_sensors))\n frame_labels = []\n\n if self.aux_info:\n original_frame_idxs = np.full(len_wanted_seq, np.nan, np.int16)\n frame_labels_aux = np.full(len_wanted_seq, np.nan, np.int8)\n sample_percentages = np.full(len_wanted_seq, np.nan)\n single_state_indices = np.full(len_wanted_seq, np.nan, np.int16)\n # flowfronts = np.zeros((len_wanted_seq, self.image_size[0], self.image_size[1]))\n # _coords, flat_fillings = self.__get_filling_data(f, single_states)\n\n for i, sample in enumerate(single_states):\n state_num = int(str(sample).replace(\"state\", \"0\"))\n try:\n sample_percentage = percentage_of_all_sensors[state_num - 1]\n if sample_percentage >= current / len_wanted_seq:\n data = multi_states[state_num - 1, :]\n data = np.log(np.add(data, 1)) # TODO make log optional\n if self.sensor_indizes != ((0, 1), (0, 1)):\n rect = data.reshape(38, 30)\n sel = rect[self.sensor_indizes[0][0]::self.sensor_indizes[0][1],\n self.sensor_indizes[1][0]::self.sensor_indizes[1][1]]\n data = sel.flatten()\n sequence[current, :] = data\n\n frame_label = 0\n if state_num in set_of_states:\n frame_label = 1\n frame_labels.append(frame_label)\n\n if self.aux_info:\n original_frame_idxs[current] = state_num\n frame_labels_aux[current] = frame_label\n sample_percentages[current] = sample_percentage\n single_state_indices[current] = i\n # flowfronts[current, :, :] = create_np_image(target_shape=self.image_size,\n # norm_coords=_coords, data=flat_fillings[i])\n current += 1\n except IndexError:\n continue\n\n # determine runlevel label using frame labels and threshold\n lens_of_runs_of_dryspots = [sum(1 for _ in group) for key, group in\n groupby(np.array(frame_labels) == 1) if key]\n max_len = 0 if len(lens_of_runs_of_dryspots) == 0 else max(lens_of_runs_of_dryspots)\n label = 0 if max_len < threshold_min_counted_dryspots else 1\n\n f.close()\n meta_file.close()\n\n if self.aux_info:\n # framelabels, original_frame_idx, original_num_frames, flowfronts, filling_percentage\n aux = {\"framelabel\": frame_labels_aux,\n \"original_frame_idx\": original_frame_idxs,\n \"original_num_multi_states\": len(multi_states),\n \"percent_of_sensors_filled\": sample_percentages,\n \"single_state_indices\": single_state_indices,\n }\n return [(sequence, label, aux)]\n\n return [(sequence, label)]\n except KeyError:\n f.close()\n meta_file.close()\n return None", "def autorange(xvar, sig, gwin=7, swin=None, win=30,\n on_mult=(1.5, 1.), off_mult=(1., 1.5),\n nbin=10, transform='log', thresh=None):\n failed = []\n sig = np.asanyarray(sig)\n\n # smooth signal\n if swin is not None:\n sigs = fastsmooth(sig, swin)\n else:\n sigs = sig\n\n # transform signal\n if transform == 'log':\n tsigs = log_nozero(sigs)\n else:\n tsigs = sigs\n\n if thresh is None:\n if tsigs.ndim == 1:\n scale = False\n tsigs = tsigs.reshape(-1, 1)\n else:\n scale = True\n fsig = separate_signal(tsigs, scaleX=scale).astype(bool)\n else:\n if transform == 'log':\n thresh = np.log(thresh)\n fsig = tsigs > thresh\n fsig[0] = False # the first value must always be background\n fbkg = ~fsig\n\n # remove transitions by fitting a gaussian to the gradients of\n # each transition\n\n # 1. determine the approximate index of each transition\n zeros = bool_2_indices(fsig)\n \n if zeros is not None:\n zeros = zeros.flatten()\n if sigs.ndim > 1:\n sigs = sigs.sum(axis=1)\n\n # 2. calculate the absolute gradient of the target trace.\n grad = abs(fastgrad(sigs, gwin)) # gradient of untransformed data.\n\n for z in zeros: # for each approximate transition\n # isolate the data around the transition\n if z - win < 0:\n lo = gwin // 2\n hi = int(z + win)\n elif z + win > (len(sig) - gwin // 2):\n lo = int(z - win)\n hi = len(sig) - gwin // 2\n else:\n lo = int(z - win)\n hi = int(z + win)\n\n xs = xvar[lo:hi]\n ys = grad[lo:hi]\n\n # determine type of transition (on/off)\n mid = (hi + lo) // 2\n tp = sigs[mid + 3] > sigs[mid - 3] # True if 'on' transition.\n\n # fit a gaussian to the first derivative of each\n # transition. Initial guess parameters:\n # - A: maximum gradient in data\n # - mu: c\n # - width: 2 * time step\n # The 'sigma' parameter of curve_fit:\n # This weights the fit by distance from c - i.e. data closer\n # to c are more important in the fit than data further away\n # from c. This allows the function to fit the correct curve,\n # even if the data window has captured two independent\n # transitions (i.e. end of one ablation and start of next)\n # ablation are < win time steps apart).\n centre = xvar[z] # center of transition\n width = (xvar[1] - xvar[0]) * 2\n\n try:\n pg, _ = curve_fit(gauss, xs, ys,\n p0=(np.nanmax(ys),\n centre,\n width),\n sigma=(xs - centre)**2 + .01)\n # get the x positions when the fitted gaussian is at 'conf' of\n # maximum\n # determine transition FWHM\n fwhm = abs(2 * pg[-1] * np.sqrt(2 * np.log(2)))\n # apply on_mult or off_mult, as appropriate.\n if tp:\n lim = np.array([-fwhm, fwhm]) * on_mult + pg[1]\n else:\n lim = np.array([-fwhm, fwhm]) * off_mult + pg[1]\n\n fbkg[(xvar > lim[0]) & (xvar < lim[1])] = False\n fsig[(xvar > lim[0]) & (xvar < lim[1])] = False\n\n except RuntimeError:\n failed.append([centre, tp])\n pass\n\n ftrn = ~fbkg & ~fsig\n\n # if there are any failed transitions, exclude the mean transition width\n # either side of the failures\n if len(failed) > 0:\n trns = xvar[bool_2_indices(ftrn)]\n tr_mean = (trns[:, 1] - trns[:, 0]).mean() / 2\n for f, tp in failed:\n if tp:\n ind = (xvar >= f - tr_mean *\n on_mult[0]) & (xvar <= f + tr_mean * on_mult[0])\n else:\n ind = (xvar >= f - tr_mean *\n off_mult[0]) & (xvar <= f + tr_mean * off_mult[0])\n fsig[ind] = False\n fbkg[ind] = False\n ftrn[ind] = False\n\n return fbkg, fsig, ftrn, [f[0] for f in failed]", "def close_to_exceeding(self) -> bool:\n mean = self.current / self.num_cuts\n if self.max_frames is not None:\n return self.current + mean > self.max_frames\n if self.max_samples is not None:\n return self.current + mean > self.max_samples\n if self.max_duration is not None:\n return self.current + mean > self.max_duration\n return False", "def shaking_false_alarm_rate(individual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data)\r\n truth_data = np.array(truth_data)\r\n total = 0\r\n num_wrong = 0\r\n for test_point, truth_point in zip(test_data, truth_data):\r\n # Nine represents a shaking event\r\n if truth_point != 9:\r\n if test_point > 8.5 and test_point <= 9.5:\r\n num_wrong += 1\r\n total += 1\r\n #if num_wrong == 0:\r\n # # Perfection implies overtraining\r\n # return 1.0\r\n #else:\r\n return float(num_wrong)/float(total)", "def get_noise_thresholds(size_of_class=45, fakes='./data/CASIA1_fakes', originals='./data/CASIA1_originals', \n fakes_ela='./data/CASIA1_fakes_ela'):\n fakes_list = os.listdir(fakes)\n\n fakes = load_fakes(fakes_list, fakes, originals)\n\n noises = []\n for i, item in enumerate(fakes):\n image = cv2.imread(os.path.join(fakes_ela, item.path.split('\\\\')[-1]))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n \n image = cv2.inRange(image, np.array([0,0,0]), np.array([180,255,60]))\n image = cv2.bitwise_not(image)\n noises.append(estimate_noise(image))\n\n fakes = np.array(fakes)\n noises = np.array(noises)\n idxs = noises.argsort()\n sorted_by_noise = fakes[idxs]\n\n for i, item in enumerate(sorted(noises)):\n if (i+1) % size_of_class == 0:\n print(\"####\", i+1, item)\n else:\n print(i+1, item)", "def converged(self, nfactor=20):\n tau = self.sampler.get_autocorr_time(tol=0)\n converged = np.all(tau * nfactor < self.sampler.iteration)\n return converged", "def _find_saturated_profiles(self) -> np.ndarray:\n n_gates, var_lim, _, _ = self.noise_params\n var = np.var(self.data['backscatter'][:, -n_gates:], axis=1)\n return var < var_lim", "def check_background_SN(ramp_file='id1ketvxq_ramp.dat', show=False):\n from astropy.table import Table\n import matplotlib.pyplot as plt\n import numpy as np\n \n time, bg = np.loadtxt(ramp_file, unpack=True)\n t0 = np.diff(time)[0]\n time, bg = time[1:], bg[1:] # skip first 2.9 s read\n\n if len(bg) <= 2:\n return []\n \n dt = np.append(t0, np.diff(time))\n \n s = 1. # test count rate\n so = np.argsort(bg)\n \n NREAD = len(bg)\n reads = np.arange(NREAD)+1\n \n fluence = np.cumsum((s*dt)[so])\n rms = np.sqrt(np.cumsum((bg*dt)[so]))\n rms_optimal = np.sqrt(np.cumsum((bg[so][2]*dt)[so]))\n\n max_ix = np.argmax(fluence/rms)\n if max_ix < (NREAD-1):\n pop_reads = list(reads[so][max_ix+1:])\n else:\n pop_reads = []\n \n ### where observed rms < expected RMS for flat background\n other_bad = list(reads[so][rms/rms_optimal > 1.25])\n pop_reads = np.cast[int](np.unique(np.hstack((pop_reads, other_bad))))\n pop_reads = list(pop_reads)\n \n if len(pop_reads) > 0:\n # np.savetxt(ramp_file.replace('ramp.dat', 'ramp.pop.png'), pop_reads,\n # fmt='%d')\n fp = open('/tmp/'+ramp_file.replace('ramp.dat', 'ramp.pop.dat'),'w')\n for r in pop_reads:\n fp.write('%d\\n' %(r))\n fp.close()\n \n if show:\n plt.ioff()\n \n fig = plt.figure(figsize=[6,3])\n ax = fig.add_subplot(111)\n ax.plot(time, bg, color='0.8', linewidth=6, zorder=1)\n si = 60\n ax.scatter(time, bg, color='w', s=si, zorder=8)\n ax.scatter(time, bg, color='k', s=0.5*si, zorder=9)\n if len(pop_reads) > 0:\n ix = np.array(pop_reads)\n ax.scatter(time[ix-1], bg[ix-1], color='r', zorder=10, s=0.5*si)\n \n ax.grid()\n ax.set_title(ramp_file)\n ax.set_xlabel('time'), ax.set_ylabel('background')\n \n fig.tight_layout(pad=0.1)\n \n fig.savefig('/tmp/'+ramp_file.replace('.dat', '.pop.png'))\n plt.close()\n \n return pop_reads", "def __call__(self, trainer):\n observation = trainer.observation\n summary = self._summary\n key = self._key\n if key in observation:\n summary.add({key: observation[key]})\n\n if not self._interval_trigger(trainer):\n return False\n\n if self._max_trigger(trainer):\n return True\n\n stats = summary.compute_mean()\n value = float(stats[key]) # copy to CPU\n self._init_summary()\n\n if not self._best_value or self._compare(self._best_value, value):\n self._best_value = value\n self._waited = 0\n return False\n elif self._waited >= self._patience:\n return True\n else:\n self._waited += 1\n if self._waited >= self._patience:\n return True\n else:\n return False", "def _no_improve(self):\n improve = [p-f for (f,p),_ in self.population]\n return np.mean(improve) < 1.0", "def run(self, date):\n\n out = self.nanoutput()\n\n for cs in config.stimuli():\n stimresp = self.analysis('stim_dff_alltrials_%s' % cs)\n dff_active = np.sum(stimresp > 0.025)/float(len(stimresp))\n out['good_%s' % cs] = dff_active > 0.05\n\n return out", "def has_more_trials(self) -> bool:\r\n raise NotImplementedError", "def eval_clicks(true_songs, predicted_songs, seeds):\n n_preds = len(predicted_songs)\n true_set = set(true_songs)\n for i, idx in enumerate(np.arange(0, n_preds, 10)):\n preds = predicted_songs[idx:idx+10]\n for p in preds:\n if p in true_set and p not in seeds:\n return i\n return float(\"inf\")", "def setCaptured(self):\r\n total = 0\r\n for x in self.animals:\r\n if x.captured == True:\r\n total += 1\r\n self.captured = total", "def run_experiment():\n return [random.random() < 0.5 for _ in range(1000)]", "def run_experiment():\n return [random.random() < 0.5 for _ in range(1000)]", "def run_experiment():\n return [random.random() < 0.5 for _ in range(1000)]", "def run_experiment():\n return [random.random() < 0.5 for _ in range(1000)]", "def check_if_stopping_criterion_is_met(original_training_data_values):\n if len(original_training_data_values)<23:\n return True\n else:\n target_column = original_training_data_values[:, -1]\n recipe_type, cupcake_muffin_count = np.unique(target_column, return_counts=True)\n cupcake_ratio = cupcake_muffin_count[0] / (cupcake_muffin_count.sum())\n muffin_ratio = cupcake_muffin_count[1] / (cupcake_muffin_count.sum())\n\n if cupcake_ratio >= 0.9 or muffin_ratio >= 0.9:\n return True\n else:\n return False", "def _send_sampled_event(self):\n if not self.enabled:\n return False\n send_sample = False\n self.count += 1\n if self.actual_rate < self.statsd_sample_rate:\n self.monitored += 1\n send_sample = True\n self.actual_rate = float(self.monitored) / float(self.count)\n if self.count >= maxint or self.monitored >= maxint:\n self.count = 0\n self.monitored = 0\n return send_sample", "def compute_sw_threshold(flanking_reads, paf_dict, fasta_dict, window_size):\n\n max_scores = []\n for query, target in itertools.product(flanking_reads, flanking_reads):\n\n if str(query + target) in paf_dict:\n overlap_info = paf_dict[query+target]\n elif str(target + query) in paf_dict:\n # get info and swap them\n overlap_info = paf_dict[target+query]\n query, target = target, query\n else:\n continue\n\n query_start = overlap_info['query_start']\n query_end = overlap_info['query_end']\n target_start = overlap_info['target_start']\n target_end = overlap_info['target_end']\n\n query_seq = fasta_dict[query][query_start:query_end]\n target_seq = fasta_dict[target][target_start:target_end]\n\n # Get scores for this pair; store in cur_scores\n cur_scores = []\n if window_size:\n # Use rolling window\n min_len = min(len(query_seq), len(target_seq))\n for start, end in utils.pairwise(range(0, min_len, window_size)):\n qs = query_seq[start:end]\n ts = target_seq[start:end]\n score = smith_waterman.smith_waterman(qs, ts)\n cur_scores.append(score)\n\n if cur_scores:\n score = max(cur_scores)\n max_scores.append(score)\n else:\n # No rolling window\n score = smith_waterman.smith_waterman(query_seq, target_seq)\n max_scores.append(score)\n\n threshold = 0.9 * max(max_scores)\n\n print(\"using {} as threshold\".format(threshold))\n\n plt.subplot(2, 3, 2)\n plt.hist(max_scores)\n plt.title(\"FLANKING READS\\nhistogram of num_gaps / len(aligned_sequence)\\nthreshold = {}\\nwindow_size = {}\\nshowing {} scores\"\n .format(threshold, window_size, len(max_scores)))\n\n\n\n return threshold", "def is_sampling_for_minmax(self):\n return (self._level_change_time is not None) and \\\n (get_time() - self._level_change_time) < self._duration_in_sec", "def compute_profiling_time(key, expected_num_spikes, rate, t_stop, n,\n winlen, binsize, num_rep=10):\n\n time_fast_fca = 0.\n time_fpgrowth = 0.\n for rep in range(num_rep):\n # Generating artificial data\n data = []\n for i in range(n):\n np.random.seed(0)\n data.append(stg.homogeneous_poisson_process(\n rate=rate, t_start=0*pq.s, t_stop=t_stop))\n\n # Extracting Closed Frequent Itemset with FP-Growth\n t0 = time.time()\n # Binning the data and clipping (binary matrix)\n binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()\n # Computing the context and the binary matrix encoding the relation\n # between objects (window positions) and attributes (spikes,\n # indexed with a number equal to neuron idx*winlen+bin idx)\n context, transactions, rel_matrix = spade._build_context(binary_matrix,\n winlen)\n # Applying FP-Growth\n fim_results = [i for i in spade._fpgrowth(\n transactions,\n rel_matrix=rel_matrix,\n winlen=winlen)]\n time_fpgrowth += time.time() - t0\n\n # Extracting Closed Frequent Itemset with Fast_fca\n t1 = time.time()\n # Binning the data and clipping (binary matrix)\n binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()\n # Computing the context and the binary matrix encoding the relation\n # between objects (window positions) and attributes (spikes,\n # indexed with a number equal to neuron idx*winlen+bin idx)\n context, transactions, rel_matrix = \\\n spade._build_context(binary_matrix, winlen)\n # Applying FP-Growth\n fim_results = spade._fast_fca(context, winlen=winlen)\n time_fast_fca += time.time() - t1\n\n time_profiles = {'fp_growth': time_fpgrowth/num_rep,\n 'fast_fca': time_fast_fca/num_rep}\n\n # Storing data\n res_path = '../results/{}/{}/'.format(key, expected_num_spikes)\n # Create path is not already existing\n path_temp = './'\n for folder in split_path(res_path):\n path_temp = path_temp + '/' + folder\n mkdirp(path_temp)\n\n np.save(res_path + '/profiling_results.npy', {'results': time_profiles,\n 'parameters': {'rate': rate, 't_stop': t_stop, 'n': n,\n 'winlen': winlen, 'binsize': binsize}})", "def has_more_samples(self):\n return True", "def has_more_samples(self):\n return True", "def has_more_samples(self):\n return True", "def is_exploring(self, step):\n return np.random.rand() < self._epsilon(step)", "def reduction_size_one_autarkies(self):\n done = False\n while not done:\n current_N = self.A.shape[1]\n max_projection = self.A @ np.ones(current_N, dtype=int)\n inprods = self.A.transpose() @ max_projection\n A_y = self.A.transpose() @ self.y\n indices = []\n vals = []\n for i in range(current_N):\n if A_y[i] <= 0.5 * self.m:\n indices.append(i)\n vals.append(0)\n elif A_y[i] >= inprods[i] - 0.5 * self.m:\n indices.append(i)\n vals.append(1)\n no_reductions = len(indices)\n for i in range(no_reductions - 1, -1, -1): #reverse order\n self.problem_reduction_single(indices[i], vals[i])\n if no_reductions == 0:\n done = True", "def can_sample(self, n_samples):\n return len(self) >= n_samples", "def can_sample(self, n_samples):\n return len(self) >= n_samples", "def can_sample(self, n_samples):\n return len(self) >= n_samples", "def can_sample(self, n_samples):\n return len(self) >= n_samples", "def noise_despiker(self, win=3, nlim=12.):\n if ~isinstance(win, int):\n win = int(win)\n if not hasattr(self, 'despiked'):\n self.data['despiked'] = {}\n for a, vo in self.focus.items():\n v = vo.copy()\n if 'time' not in a.lower():\n # calculate rolling mean using convolution\n kernel = np.ones(win) / win\n rmean = np.convolve(v, kernel, 'same')\n\n # with warnings.catch_warnings():\n # to catch 'empty slice' warnings\n # warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n # rmean = \\\n # np.apply_along_axis(np.nanmean, 1,\n # rolling_window(v, win,\n # pad=np.nan))\n # rmean = \\\n # np.apply_along_axis(np.nanmean, 1,\n # rolling_window(v, win,\n # pad=np.nan))\n # calculate rolling standard deviation\n # (count statistics, so **0.5)\n rstd = rmean**0.5\n\n # find which values are over the threshold\n # (v > rmean + nlim * rstd)\n over = v > rmean + nlim * rstd\n if sum(over) > 0:\n # get adjacent values to over - limit values\n neighbours = \\\n np.hstack([v[np.roll(over, -1)][:, np.newaxis],\n v[np.roll(over, 1)][:, np.newaxis]])\n # calculate the mean of the neighbours\n replacements = np.apply_along_axis(np.nanmean, 1,\n neighbours)\n # and subsitite them in\n v[over] = replacements\n self.data['despiked'][a] = v\n self.setfocus('despiked')\n return", "def _snr_preprocessing(self):\n if self.flux is None or self.fluxerr is None:\n return np.ones(len(self.stamps), dtype=bool)\n\n snrs = self.flux.astype(float) / self.fluxerr.astype(float)\n return snrs > self.snr_threshold", "def weighted_estimation(self) -> bool:\n pass", "def smooth_al(data):\n wd = 5\n optimize = True\n DW_min = 5\n while optimize == True:\n smooth = savgol_filter(data, wd, 2)\n DW = DW_cal(data, smooth)\n if abs(2 - DW) < DW_min:\n wd = wd + 2\n DW_min = abs(2 - DW)\n else:\n wd = wd - 2\n smooth = savgol_filter(data, wd, 2)\n DW = DW_cal(data, smooth)\n break\n return smooth, wd", "def test_am_threshold(Simulator, plt, seed, rng):\n d = 64\n vocab = Vocabulary(d, pointer_gen=rng)\n vocab.populate('A; B; C; D')\n\n d2 = int(d / 2)\n vocab2 = Vocabulary(d2, pointer_gen=rng)\n vocab2.populate('A; B; C; D')\n\n def input_func(t):\n return '0.49 * A' if t < 0.1 else '0.8 * B'\n\n with spa.Network('model', seed=seed) as m:\n m.am = ThresholdingAssocMem(\n threshold=0.5, input_vocab=vocab, output_vocab=vocab2,\n function=filtered_step_fn, mapping='by-key')\n m.stimulus = spa.Transcode(input_func, output_vocab=vocab)\n m.stimulus >> m.am\n\n in_p = nengo.Probe(m.am.input)\n out_p = nengo.Probe(m.am.output, synapse=0.03)\n\n with Simulator(m) as sim:\n sim.run(0.3)\n t = sim.trange()\n below_th = t < 0.1\n above_th = t > 0.25\n\n plt.subplot(2, 1, 1)\n plt.plot(t, similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.subplot(2, 1, 2)\n plt.plot(t, similarity(sim.data[out_p], vocab2))\n plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.9, c='g', lw=2)\n plt.ylabel(\"Output\")\n\n assert np.mean(sim.data[out_p][below_th]) < 0.01\n assert_sp_close(t, sim.data[out_p], vocab2['B'], skip=0.25, duration=0.05)", "def goalReached(self, rewards):\n return len(rewards) >= 100 and np.mean(rewards[-100:]) >= 18", "def over_sample(self) -> float:\n return self._over_sample", "def over_sample(self) -> float:\n return self._over_sample", "def is_over(self, state) -> bool:\n\n p1_count = 0\n p2_count = 0\n ley_line_total = (state.side_length + 1) * 3\n for itype in state.current_ley_lines:\n for line in itype:\n if line[0] == '1':\n p1_count += 1\n if line[0] == '2':\n p2_count += 1\n\n if p1_count >= ley_line_total/2 or p2_count >= ley_line_total/2:\n return True\n return False", "def is_tentative(self):\n return self.state == TrackState.Tentative", "def is_tentative(self):\n return self.state == TrackState.Tentative", "def is_stationary(self) -> bool:\n ad_fuller_result = adfuller(self.y.dropna(), autolag='AIC')\n p_value = ad_fuller_result[1]\n return p_value <= 0.5", "def scan_ap(self, cut_off = 3480, r_ap = 12, r_an = 3):\r\n for trial in range(self.dimension):\r\n max_count, max_rank = self.pick_largest(cut_off = cut_off)\r\n if max_count >= 0:\r\n y,x = self.rank_yx(max_rank)\r\n print(\"Scan pos\", y,x,\" scanning\",trial,\"counts\", max_count)\r\n count_in, count_out = self.fit_galaxy(y,x,r_ap, r_an)\r\n count_sum = []\r\n local_bg = []\r\n for c in range(len(count_in)):\r\n if count_in[c] >= cut_off:\r\n count_sum.append(count_in[c])\r\n for c in range(len(count_out)):\r\n if count_out[c] <= cut_off:\r\n local_bg.append(count_out[c])\r\n no_c = len(count_in)\r\n if len(count_sum) >= int(np.pi * (r_ap **2) / 2): # Make sure it is not noise\r\n count_sum = np.array(count_sum).sum()\r\n if len(local_bg) != 0:\r\n total = 0\r\n for c in range(len(local_bg)):\r\n if 3*13.8 <= abs(local_bg[c] - 3419):\r\n total += local_bg[c]\r\n local_bg = total / len(local_bg)\r\n else:\r\n local_bg = 3419\r\n print(\"galaxy founded at \", y, x)\r\n self.galaxies.append(galaxy(y, x, r_ap, count_sum, bg_galaxy=local_bg, no_count = no_c))\r\n \r\n elif max_count == -1:\r\n print(\"aperture scan completed, number of galaxies found is\", len(self.galaxies))\r\n break", "def skip_test(n):\n return k > 0 and magic * n * k**0.5 >= t4_ref", "def future_deceivedup(self, a): \n nfav, succfav = self.control[a.name]\n #f_n = n+1;\n # f_worldround = self.world.round+1\n f_successRate = float(a.success +1) / float(self.world.round+1)\n if hardrule:\n return (nfav+1 > 5) and ((self.world.round - nfav) > 5) and \\\n float(a.success+1-succfav)/(self.world.round+1 - nfav) > \\\n (float(succfav)/nfav) + epsilonD\n else:\n return nfav > 5 and (f_successRate > (float(succfav)/nfav) + epsilonD \\\n or f_successRate < epsilonD)", "def _check_for_noise(self) -> None:\n safety_stop = 5\n while self._has_noise() and safety_stop > 0:\n self.filter(size=3)\n safety_stop -= 1", "def runcount(test_keys, sigma, sigma_max, sigma_step,\n npoints_min, npoints_max, npoints_step):\n run = 1\n for key in test_keys:\n if key:\n while sigma < sigma_max:\n npoints = npoints_min\n while npoints < npoints_max:\n npoints += npoints_step\n run += 1\n sigma += sigma_step\n return run", "def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1", "def isPlayed(self):\n return bool(self.viewedLeafCount == self.leafCount)", "def isPlayed(self):\n return bool(self.viewedLeafCount == self.leafCount)", "def classify(self, testInstance):\n return self.fire(testInstance) > 0.5", "def burn_in_finished():\n global trials\n if trials <= 0:\n return True\n trials -= 1\n return False", "def early_stopping(cost, opt_cost, threshold, patience, count):\n booly = False\n if cost >= opt_cost - threshold:\n count += 1\n if count == patience:\n return True, count\n else:\n booly = False\n count = 0\n return booly, count", "def _compute_is_terminal(self):\n # self.n_actions contains a number of unlabelled datapoints that is left\n if self.n_actions==1:\n # print('We ran out of samples!')\n done = True\n else:\n done = False\n return done", "def greedy(self):\n n_step_t = self.filter['n_step_t']\n n_traj = self.filter['n_traj']\n traj = self.filter['traj']\n steps = [0 for i in xrange(n_step_t)]\n for i in xrange(n_traj):\n n_step = traj[i]['n_step']\n for j in xrange(n_step):\n steps[j] += 1\n self.filter['steps'] = steps\n \n return", "def __call__(self, *args, **kwargs):\n out = super().__call__(*args, **kwargs)\n out **= 2\n self._debug.append(out)\n out = int(out > self.threshold_value)\n if out and self.since_last_peak > self._num_of_taps:\n self.since_last_peak = -1\n else:\n out = 0\n self.since_last_peak += 1\n return out", "def trial_atr(trial, omit_missing_frames=True):\n frames = trial.HMM_MLE\n if omit_missing_frames:\n frames = frames[frames >= 0]\n\n runs = calc_run_lengths(trial.HMM_MLE)\n return_times = []\n current_return_time = 0\n for run in runs:\n if run.object == 0:\n return_times.append(current_return_time/60)\n current_return_time = 0\n else:\n current_return_time += run.length\n return np.mean(return_times)", "def finished(self):\n hit_max_evals = len(self.rounds) >= self.max_evals\n\n if len(self.rounds) < self.conv_check_iters:\n hit_conv = False\n else:\n last_rounds = self.rounds[-self.conv_check_iters:]\n a = zip(*last_rounds)[1]\n a_sd = np.std(a, axis=0)\n hit_conv = (a_sd < self.conv_action_eps).all()\n\n hit_max_time = self.duration > self.max_time\n\n return hit_max_evals or hit_conv or hit_max_time", "def pull(self):\n chance = np.random.uniform()\n return chance < self.winning_prob", "def test_next_window_time_no_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n time.sleep(4)\n collected_value = test_window_scheme.filter(self.more_than_upper_bound)\n self.assertEquals(collected_value, self.more_than_upper_bound)", "def run_adfuller_test(preprocessed_data, alpha=0.05, wanted_fraction=0.95):\n inds = list(np.ndindex(preprocessed_data.shape[:-1]))\n\n def return_adfuller_pval(this_ind): return adfuller(\n preprocessed_data[this_ind])[1]\n pval_list = np.array(parallelize(return_adfuller_pval, inds, n_jobs=30))\n alpha = 0.05\n threshold = alpha/len(pval_list)\n wanted_fraction = 0.95\n if np.sum(pval_list < threshold) > wanted_fraction * len(pval_list):\n print('Data is stationary')\n else:\n raise ValueError('Data is not stationary')", "def should_average(self):\n return self._should_average", "def check_stimOn_delays(data, **_):\n metric = np.nan_to_num(data[\"stimOn_times\"] - data[\"stimOnTrigger_times\"], nan=np.inf)\n passed = (metric <= 0.15) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed", "def is_recording(self) -> bool:\n return self.elastic_span.transaction.is_sampled and not self.elastic_span.ended_time", "def single_iteration_condition(args):\n return np.logical_and(\n np.greater(args[-3], acceptance_ratio),\n np.less(args[-2], max_iteration))", "def _episode_success(self, observations):\n dist = self._env.get_metrics()[\"object_to_goal_distance\"]\n if (\n abs(dist) > self._success_distance\n or observations[\"gripped_object_id\"] != -1\n ):\n return False\n return True", "def process_trace(n_tr, tr, sta, orig_time, cmps, cfg):\n cmp = tr.stats.channel[2:3]\n sta[cmp] = {}\n sta[cmp][\"times\"] = tr.times(reftime=orig_time)\n\n sta[cmp][\"tr_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), sta[\"lenD\"])\n )\n sta[cmp][\"f1_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]), len(cfg.picking.KURT_WINS),\n sta[\"lenD\"])\n )\n sta[cmp][\"f1_mean\"] = np.zeros(sta[\"lenD\"])\n sta[cmp][\"f3_results\"] = np.zeros(\n (len(cfg.picking.FILT_WINS[\"P\"]),\n len(cfg.picking.KURT_WINS), sta[\"lenD\"])\n )\n sta[cmp][\"f3_mean_smooth\"] = np.zeros(\n (len(cfg.picking.CF_MEAN_SMOOTH_WIND), sta[\"lenD\"])\n )\n sta[cmp][\"f4_all\"] = np.zeros((len(cfg.picking.CF_MEAN_SMOOTH_WIND),\n sta[\"lenD\"]))\n sta[cmp][\"f1_mean_smooth\"] = np.zeros(sta[\"lenD\"])\n # Get suitable filters (exclude those fully outside Nyquist freq.)\n for phase in [\"P\", \"S\"]:\n if cmp in cmps[phase]:\n sta[\"picks\"][\"poss_obs\"][phase][cmp] = {}\n sta[cmp][\"filtwins_check\"] = [\n filt_win for filt_win in cfg.picking.FILT_WINS[phase]\n if filt_win[0] < sta[\"samplerate\"] / 2\n ]\n if cfg.picking.INTEGRATE_S is True:\n tr.integrate()\n\n for n_filt, filt in enumerate(sta[cmp][\"filtwins_check\"]):\n # Ensure that filter covers sample rate / 2\n if (tr.stats.sampling_rate / 2) <= filt[0]:\n print(\"Skipping this Kurtosis run due to sample rate/2<f\")\n continue\n tr.filter(\"bandpass\", freqmin=filt[0], freqmax=filt[1])\n try:\n sta[cmp][\"tr_results\"][n_filt] = tr.data\n except ValueError: # If input array length is inconsistent\n continue\n # Loop over kurtosis windows\n for n_kurt, kurt_win_s in enumerate(cfg.picking.KURT_WINS):\n f1 = CF_kurtosis(kurt_win_s, tr)\n sta[cmp][\"f1_results\"][n_filt, n_kurt] = f1 # Needed for weights\n f2 = kurt_transform_f2(f1, kurt_win_s, tr)\n f3 = kurt_transform_f3(f2, kurt_win_s, tr)\n\n sta[cmp][\"f3_results\"][n_filt, n_kurt] = f3\n sta[cmp][\"f1_mean\"] = np.nanmean(sta[cmp][\"f1_results\"], axis=0)[0]\n sta[cmp][\"f1_mean_smooth\"] = do_smooth(\n sta[cmp][\"f1_mean\"], cfg.picking.CF_MEAN_SMOOTH_WIND[0],\n tr.stats.sampling_rate\n )\n # ^ Throws up a warning first time due to NaN slices\n # Compute mean CF and final kurtosis transform\n f3_mean = np.nanmean(sta[cmp][\"f3_results\"], axis=0)[0]\n\n for nsm, smooth_wind in enumerate(cfg.picking.CF_MEAN_SMOOTH_WIND):\n sta[cmp][\"f3_mean_smooth\"][nsm] = do_smooth(\n f3_mean, smooth_wind, tr.stats.sampling_rate\n )\n f4 = kurt_transform_f4(sta[cmp][\"f3_mean_smooth\"][nsm],\n np.max(cfg.picking.KURT_WINS), tr)\n sta[cmp][\"f4_all\"][nsm] = f4\n\n # Now pick (avoiding end and beginning of signal)\n # Pick the P-waves\n if cmp in cmps[\"P\"]:\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm] = []\n # Find points where Kurt<0 & doesn't look like S-wave\n p_cands = np.argwhere((f4 < 0.0))\n for idx in p_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(\n cfg.picking.KURT2WGHT[\"P\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx])))\n sta[\"picks\"][\"poss_obs\"][\"P\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n # Pick the S-waves\n if cmp in cmps[\"S\"]:\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm] = []\n\n # Find points where Kurt<0 & doesn't look like S-wave\n s_cands = np.argwhere((f4 < 0.0))\n for idx in s_cands.tolist():\n kurt_wgt = np.min(np.where(np.array(cfg.picking.KURT2WGHT[\"S\"]\n <= sta[cmp][\"f1_mean_smooth\"][idx]))\n )\n sta[\"picks\"][\"poss_obs\"][\"S\"][cmp][nsm].append([\n orig_time+sta[cmp][\"times\"][idx][0], f4[idx][0],\n tr.stats.channel, kurt_wgt, idx,\n sta[cmp][\"times\"][idx][0]\n ])\n return(sta)", "def _collect_induced_spikes(spikes, input_targeted_times, trial_length_ms, targeted_gid):\n inter_induction_wins = spt.make_windows(input_targeted_times, (0, trial_length_ms))\n inter_induction_wins = spt.ExclusiveWindows(inter_induction_wins)\n targeted_spikes = spikes[spikes.gid == targeted_gid]\n targeted_spikes = inter_induction_wins.classify_spikes(targeted_spikes)\n\n targeted_spikes = targeted_spikes[targeted_spikes['delay'] < 10.]\n induced_spk_idcs = targeted_spikes.groupby('win_idx')['delay'].idxmin().values\n\n is_induced = pd.Series(np.zeros(len(spikes), dtype=np.bool_), index=spikes.index)\n is_induced.loc[induced_spk_idcs] = True\n\n return is_induced", "def _step(self, a):\n obs, rew, done, info = super()._step(a)\n # if self.robot.body_xyz[0] > self.threshold:\n # rew = 1.0\n # self.threshold += 1\n # else:\n # rew = 0.0\n # self.steps += 1\n # if self.steps > self.max_episode_steps:\n # done = True\n return obs, rew, done, info" ]
[ "0.5619866", "0.5553437", "0.5524997", "0.53908414", "0.5296018", "0.5253825", "0.5213897", "0.52011967", "0.51841336", "0.5176683", "0.51282734", "0.5120962", "0.51179755", "0.5112065", "0.50441957", "0.50383514", "0.503653", "0.50297564", "0.4981896", "0.4948819", "0.49382964", "0.49352995", "0.4923914", "0.4913876", "0.488362", "0.486648", "0.4865212", "0.4859857", "0.4845986", "0.4844091", "0.48335487", "0.4819243", "0.48095194", "0.48041978", "0.47951248", "0.47886539", "0.478849", "0.4785646", "0.4783216", "0.47796804", "0.47752506", "0.47694692", "0.47645342", "0.47645342", "0.47645342", "0.47645342", "0.4762756", "0.4756849", "0.47488472", "0.47456446", "0.47444832", "0.47362223", "0.47362223", "0.47362223", "0.47357813", "0.47310227", "0.47287056", "0.47287056", "0.47287056", "0.47287056", "0.47241643", "0.4723517", "0.47211432", "0.47138724", "0.4713607", "0.47126657", "0.4711587", "0.4711587", "0.47075152", "0.47024995", "0.47024995", "0.46926537", "0.46907163", "0.46892238", "0.4671544", "0.46609223", "0.46589068", "0.4656004", "0.4656004", "0.46545446", "0.46545446", "0.46504384", "0.46458313", "0.46405274", "0.4635777", "0.46324214", "0.462278", "0.46170834", "0.4615073", "0.46128902", "0.46109375", "0.46103176", "0.46103162", "0.46102574", "0.46064585", "0.46045214", "0.46032125", "0.45825028", "0.45821914", "0.45736486" ]
0.7172721
0
This test should not be run as a part of the normal test suite. This is a resource test only! Nose2 will find it. Unittest will not.
def test_will_get_nwis_return_response(): expected = 200 response = hf.get_nwis('01585200', 'dv', '2001-01-01', '2001-01-02') actual = response.status_code assert expected == actual print('NWIS is up and running!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_subsystems(self):\n pass", "def test_create_system_entire(self):\n pass", "def unitary_test():", "def test(self):\n pass", "def test_create_run(self):\n pass", "def test_4_4_1_1(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test_package(self):\n pass", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_require():", "def test_get_run(self):\n pass", "def test_basic_execution(self):", "def test_create_namespaced_resource_access_review(self):\n pass", "def test_create_from_pear(self):\n pass", "def test_create_unexpected_problem(self):\n pass", "def runtest(self):", "def test_create_namespaced_local_resource_access_review(self):\n pass", "def test_module(self):\n pass", "def test_households_in_admin_unit(self):", "def test_get(self):\n pass", "def test(self):\n raise NotImplementedError", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def test(self):", "def test(self):", "def testApi(self):", "def test_alien_data(self):", "def test_stub(self):\n pass", "def test_get_system(self):\n pass", "def test_loading_document(self):", "def tests():", "def test_let(self):", "def test_create_scenario1(self):\n pass", "def test_for_client():", "def runTest(self):\r\n self.setUp()\r\n self.test_CreateROI1()", "def test_install(self):\n pass", "def setUp(self):\n self", "def setUp(self):\n self", "def test_01_Init(self):\n pass", "def test_get_insumo(self):", "def test_doc():\n pass", "def test_get2(self):\n pass", "def test_begin(self):", "def test_create(self):\n pass", "def test_get_scenario(self):\n pass", "def setUp(self):\r\n pass # nothing required by all\r", "def test():\n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def test_create_client(self):\n pass", "def test_download2(self):\n pass", "def test_client_retrieve(self):\n pass", "def test_get_software(self):\n pass", "def test_document_retrieval(self):", "def test_nothing(self):", "def test_untar(self):", "def test_T01():", "def test_meme_get(self):\n pass", "def test_get1(self):\n pass", "def test_create10(self):\n pass", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def test_new(self):", "def test_new(self):", "def test_get_client(self):\n pass", "def setUp(self):\r\n pass" ]
[ "0.71541893", "0.713073", "0.71006393", "0.7076272", "0.70147574", "0.69360954", "0.6920686", "0.6920686", "0.6920686", "0.6887088", "0.6876075", "0.6876075", "0.6876075", "0.6876075", "0.6876075", "0.6841704", "0.6797133", "0.67678326", "0.67677563", "0.675755", "0.6699573", "0.66841465", "0.6679246", "0.6677635", "0.6600168", "0.65922755", "0.65915775", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.658401", "0.65839434", "0.65839434", "0.65820664", "0.6575808", "0.6569613", "0.65605456", "0.6553227", "0.65527874", "0.65374625", "0.65199053", "0.65195346", "0.65128857", "0.65076923", "0.650164", "0.650164", "0.6488903", "0.64883745", "0.648301", "0.6477642", "0.64773005", "0.64763623", "0.6467402", "0.6458281", "0.64442104", "0.6444016", "0.6444016", "0.6444016", "0.6444016", "0.6444016", "0.6444016", "0.6444016", "0.6444016", "0.6444016", "0.64312583", "0.6421439", "0.64190257", "0.6418158", "0.6414663", "0.6408392", "0.63910073", "0.6389432", "0.6359039", "0.6355904", "0.63555336", "0.6354709", "0.6353834", "0.6353834", "0.6349538", "0.6345929" ]
0.0
-1